3 #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
4 #define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
8 #include <cds/intrusive/details/split_list_base.h>
9 #include <cds/details/binary_functor_wrapper.h>
11 namespace cds { namespace intrusive {
13 /// Split-ordered list RCU specialization
14 /** @ingroup cds_intrusive_map
15 \anchor cds_intrusive_SplitListSet_rcu
17 Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see
18 - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables"
19 - [2008] Nir Shavit "The Art of Multiprocessor Programming"
21 The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original
22 recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets
23 without moving an item on resizing, see \ref cds_SplitList_algo_desc "short algo description".
27 Template parameters are:
28 - \p RCU - one of \ref cds_urcu_gc "RCU type"
29 - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList.
30 The intrusive ordered list implementation specifies the type \p T stored in the hash-set,
31 the comparing functor for the type \p T and other features specific for the ordered list.
32 - \p Traits - set traits, default isd \p split_list::traits.
33 Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction.
35 @note About reqired features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
38 Before including <tt><cds/intrusive/split_list_rcu.h></tt> you should include appropriate RCU header file,
39 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" and
41 MichaelList-based split-list you should include:
43 #include <cds/urcu/general_buffered.h>
44 #include <cds/intrusive/michael_list_rcu.h>
45 #include <cds/intrusive/split_list_rcu.h>
47 // Declare Michael's list for type Foo and default traits:
48 typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo > rcu_michael_list;
50 // Declare split-list based on rcu_michael_list
51 typedef cds::intrusive::SplitListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, rcu_michael_list > rcu_split_list;
58 # ifdef CDS_DOXYGEN_INVOKED
59 class Traits = split_list::traits
64 class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits >
67 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
68 typedef Traits traits; ///< Traits template parameters
70 /// Hash functor for \ref value_type and all its derivatives that you use
71 typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
74 typedef cds::intrusive::split_list::implementation_tag implementation_tag;
79 typedef split_list::details::rebind_list_traits<OrderedList, traits> wrapped_ordered_list;
83 # ifdef CDS_DOXYGEN_INVOKED
84 typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list
86 typedef typename wrapped_ordered_list::result ordered_list;
88 typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list
89 typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor
90 typedef typename ordered_list::disposer disposer; ///< Node disposer functor
91 typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
92 typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
93 /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
94 static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
96 typedef typename traits::item_counter item_counter; ///< Item counter type
97 typedef typename traits::back_off back_off; ///< back-off strategy for spinning
98 typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
99 typedef typename traits::stat stat; ///< Internal statistics
102 typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list
103 typedef split_list::node<list_node_type> node_type; ///< split-list node type
104 typedef node_type dummy_node_type; ///< dummy node type
106 /// Split-list node traits
108 This traits is intended for converting between underlying ordered list node type \ref list_node_type
109 and split-list node type \ref node_type
111 typedef split_list::node_traits<typename ordered_list::node_traits> node_traits;
114 /// Bucket table implementation
115 typedef typename split_list::details::bucket_table_selector<
116 traits::dynamic_bucket_table
119 , opt::allocator< typename traits::allocator >
120 , opt::memory_model< memory_model >
121 >::type bucket_table;
127 /// Ordered list wrapper to access protected members of OrderedList
128 class ordered_list_wrapper: public ordered_list
130 typedef ordered_list base_class;
131 typedef typename base_class::auxiliary_head bucket_head_type;
134 bool insert_at( dummy_node_type * pHead, value_type& val )
136 assert( pHead != nullptr );
137 bucket_head_type h(pHead);
138 return base_class::insert_at( h, val );
141 template <typename Func>
142 bool insert_at( dummy_node_type * pHead, value_type& val, Func f )
144 assert( pHead != nullptr );
145 bucket_head_type h(pHead);
146 return base_class::insert_at( h, val, f );
149 template <typename Func>
150 std::pair<bool, bool> ensure_at( dummy_node_type * pHead, value_type& val, Func func )
152 assert( pHead != nullptr );
153 bucket_head_type h(pHead);
154 return base_class::ensure_at( h, val, func );
157 bool unlink_at( dummy_node_type * pHead, value_type& val )
159 assert( pHead != nullptr );
160 bucket_head_type h(pHead);
161 return base_class::unlink_at( h, val );
164 template <typename Q, typename Compare, typename Func>
165 bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
167 assert( pHead != nullptr );
168 bucket_head_type h(pHead);
169 return base_class::erase_at( h, val, cmp, f );
172 template <typename Q, typename Compare>
173 bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
175 assert( pHead != nullptr );
176 bucket_head_type h(pHead);
177 return base_class::erase_at( h, val, cmp );
180 template <typename Q, typename Compare>
181 value_type * extract_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
183 assert( pHead != nullptr );
184 bucket_head_type h(pHead);
185 return base_class::extract_at( h, val, cmp );
188 template <typename Q, typename Compare, typename Func>
189 bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f ) const
191 assert( pHead != nullptr );
192 bucket_head_type h(pHead);
193 return base_class::find_at( h, val, cmp, f );
196 template <typename Q, typename Compare>
197 bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp ) const
199 assert( pHead != nullptr );
200 bucket_head_type h(pHead);
201 return base_class::find_at( h, val, cmp );
204 template <typename Q, typename Compare>
205 value_type * get_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp ) const
207 assert( pHead != nullptr );
208 bucket_head_type h(pHead);
209 return base_class::get_at( h, val, cmp );
212 bool insert_aux_node( dummy_node_type * pNode )
214 return base_class::insert_aux_node( pNode );
216 bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode )
218 bucket_head_type h(pHead);
219 return base_class::insert_aux_node( h, pNode );
223 template <typename Less>
224 struct less_wrapper: public cds::opt::details::make_comparator_from_less<Less>
226 typedef cds::opt::details::make_comparator_from_less<Less> base_wrapper;
228 template <typename Q1, typename Q2>
229 int operator()( split_list::details::search_value_type<Q1> const& v1, Q2 const& v2 ) const
231 return base_wrapper::operator()( v1.val, v2 );
234 template <typename Q1, typename Q2>
235 int operator()( Q1 const& v1, split_list::details::search_value_type<Q2> const& v2 ) const
237 return base_wrapper::operator()( v1, v2.val );
243 ordered_list_wrapper m_List; ///< Ordered list containing split-list items
244 bucket_table m_Buckets; ///< bucket table
245 atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
246 atomics::atomic<size_t> m_nMaxItemCount; ///< number of items container can hold, before we have to resize
247 item_counter m_ItemCounter; ///< Item counter
248 hash m_HashFunctor; ///< Hash functor
249 stat m_Stat; ///< Internal stattistics accumulator
253 typedef cds::details::Allocator< dummy_node_type, typename traits::allocator > dummy_node_allocator;
255 dummy_node_type * alloc_dummy_node( size_t nHash )
257 m_Stat.onHeadNodeAllocated();
258 return dummy_node_allocator().New( nHash );
260 void free_dummy_node( dummy_node_type * p )
262 dummy_node_allocator().Delete( p );
263 m_Stat.onHeadNodeFreed();
266 /// Calculates hash value of \p key
267 template <typename Q>
268 size_t hash_value( Q const& key ) const
270 return m_HashFunctor( key );
273 size_t bucket_no( size_t nHash ) const
275 return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 );
278 static size_t parent_bucket( size_t nBucket )
280 assert( nBucket > 0 );
281 return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
284 dummy_node_type * init_bucket( size_t nBucket )
286 assert( nBucket > 0 );
287 size_t nParent = parent_bucket( nBucket );
289 dummy_node_type * pParentBucket = m_Buckets.bucket( nParent );
290 if ( pParentBucket == nullptr ) {
291 pParentBucket = init_bucket( nParent );
292 m_Stat.onRecursiveInitBucket();
295 assert( pParentBucket != nullptr );
297 // Allocate a dummy node for new bucket
299 dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) );
300 if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
301 m_Buckets.bucket( nBucket, pBucket );
302 m_Stat.onNewBucket();
305 free_dummy_node( pBucket );
308 // Another thread set the bucket. Wait while it done
310 // In this point, we must wait while nBucket is empty.
311 // The compiler can decide that waiting loop can be "optimized" (stripped)
312 // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
314 m_Stat.onBucketInitContenton();
317 dummy_node_type volatile * p = m_Buckets.bucket( nBucket );
319 return const_cast<dummy_node_type *>( p );
321 m_Stat.onBusyWaitBucketInit();
325 dummy_node_type * get_bucket( size_t nHash )
327 size_t nBucket = bucket_no( nHash );
329 dummy_node_type * pHead = m_Buckets.bucket( nBucket );
330 if ( pHead == nullptr )
331 pHead = init_bucket( nBucket );
333 assert( pHead->is_dummy() );
340 // GC and OrderedList::gc must be the same
341 static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
343 // atomicity::empty_item_counter is not allowed as a item counter
344 static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
345 "cds::atomicity::empty_item_counter is not allowed as a item counter");
347 // Initialize bucket 0
348 dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ );
350 // insert_aux_node cannot return false for empty list
351 CDS_VERIFY( m_List.insert_aux_node( pNode ));
353 m_Buckets.bucket( 0, pNode );
356 static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor )
358 return nBucketCount * nLoadFactor;
361 void inc_item_count()
363 size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed);
364 if ( ++m_ItemCounter <= nMaxCount )
367 size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed);
368 const size_t nBucketCount = static_cast<size_t>(1) << sz;
369 if ( nBucketCount < m_Buckets.capacity() ) {
370 // we may grow the bucket table
371 const size_t nLoadFactor = m_Buckets.load_factor();
372 if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
373 return; // someone already have updated m_nBucketCountLog2, so stop here
375 m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
376 memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
377 m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
380 m_nMaxItemCount.store( std::numeric_limits<size_t>::max(), memory_model::memory_order_relaxed );
383 template <typename Q, typename Compare, typename Func>
384 bool find_( Q& val, Compare cmp, Func f )
386 size_t nHash = hash_value( val );
387 split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
388 dummy_node_type * pHead = get_bucket( nHash );
389 assert( pHead != nullptr );
391 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
392 [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
395 template <typename Q, typename Compare>
396 bool find_value( Q const& val, Compare cmp )
398 size_t nHash = hash_value( val );
399 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
400 dummy_node_type * pHead = get_bucket( nHash );
401 assert( pHead != nullptr );
403 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
406 template <typename Q, typename Compare>
407 value_type * get_( Q const& val, Compare cmp )
409 size_t nHash = hash_value( val );
410 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
411 dummy_node_type * pHead = get_bucket( nHash );
412 assert( pHead != nullptr );
414 value_type * p = m_List.get_at( pHead, sv, cmp );
415 m_Stat.onFind( p != nullptr );
419 template <typename Q, typename Compare>
420 value_type * extract_( Q const& val, Compare cmp )
422 size_t nHash = hash_value( val );
423 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
424 dummy_node_type * pHead = get_bucket( nHash );
425 assert( pHead != nullptr );
427 value_type * pNode = m_List.extract_at( pHead, sv, cmp );
430 m_Stat.onExtractSuccess();
433 m_Stat.onExtractFailed();
437 template <typename Q, typename Less>
438 value_type * extract_with_( Q const& val, Less pred )
441 return extract_( val, typename wrapped_ordered_list::template make_compare_from_less<Less>());
444 template <typename Q, typename Compare>
445 bool erase_( const Q& val, Compare cmp )
447 size_t nHash = hash_value( val );
448 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
449 dummy_node_type * pHead = get_bucket( nHash );
450 assert( pHead != nullptr );
452 if ( m_List.erase_at( pHead, sv, cmp ) ) {
454 m_Stat.onEraseSuccess();
457 m_Stat.onEraseFailed();
461 template <typename Q, typename Compare, typename Func>
462 bool erase_( Q const& val, Compare cmp, Func f )
464 size_t nHash = hash_value( val );
465 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
466 dummy_node_type * pHead = get_bucket( nHash );
467 assert( pHead != nullptr );
469 if ( m_List.erase_at( pHead, sv, cmp, f )) {
471 m_Stat.onEraseSuccess();
474 m_Stat.onEraseFailed();
481 /// Initialize split-ordered list of default capacity
483 The default capacity is defined in bucket table constructor.
484 See split_list::expandable_bucket_table, split_list::static_ducket_table
485 which selects by split_list::dynamic_bucket_table option.
488 : m_nBucketCountLog2(1)
489 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
494 /// Initialize split-ordered list
496 size_t nItemCount ///< estimate average of item count
497 , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1.
499 : m_Buckets( nItemCount, nLoadFactor )
500 , m_nBucketCountLog2(1)
501 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
509 The function inserts \p val in the set if it does not contain
510 an item with key equal to \p val.
512 The function makes RCU lock internally.
514 Returns \p true if \p val is placed into the set, \p false otherwise.
516 bool insert( value_type& val )
518 size_t nHash = hash_value( val );
519 dummy_node_type * pHead = get_bucket( nHash );
520 assert( pHead != nullptr );
522 // TSan false positive: hash is read-only, will be ordered when we insert a node
523 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
524 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
525 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
527 if ( m_List.insert_at( pHead, val )) {
529 m_Stat.onInsertSuccess();
532 m_Stat.onInsertFailed();
538 This function is intended for derived non-intrusive containers.
540 The function allows to split creating of new item into two part:
541 - create item with key only
542 - insert new item into the set
543 - if inserting is success, calls \p f functor to initialize value-field of \p val.
545 The functor signature is:
547 void func( value_type& val );
549 where \p val is the item inserted.
551 The function makes RCU lock internally.
553 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
554 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
557 template <typename Func>
558 bool insert( value_type& val, Func f )
560 size_t nHash = hash_value( val );
561 dummy_node_type * pHead = get_bucket( nHash );
562 assert( pHead != nullptr );
564 // TSan false positive: hash is read-only, will be ordered when we insert a node
565 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
566 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
567 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
569 if ( m_List.insert_at( pHead, val, f )) {
571 m_Stat.onInsertSuccess();
574 m_Stat.onInsertFailed();
578 /// Ensures that the \p val exists in the set
580 The operation performs inserting or changing data with lock-free manner.
582 If the item \p val is not found in the set, then \p val is inserted into the set.
583 Otherwise, the functor \p func is called with item found.
584 The functor signature is:
586 void func( bool bNew, value_type& item, value_type& val );
589 - \p bNew - \p true if the item has been inserted, \p false otherwise
590 - \p item - item of the set
591 - \p val - argument \p val passed into the \p ensure function
592 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
593 refers to the same thing.
595 The function makes RCU lock internally.
597 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
598 \p second is \p true if new item has been added or \p false if the item with \p key
599 already is in the set.
601 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
602 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
605 template <typename Func>
606 std::pair<bool, bool> ensure( value_type& val, Func func )
608 size_t nHash = hash_value( val );
609 dummy_node_type * pHead = get_bucket( nHash );
610 assert( pHead != nullptr );
612 // TSan false positive: hash is read-only, will be ordered when we insert a node
613 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
614 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
615 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
617 std::pair<bool, bool> bRet = m_List.ensure_at( pHead, val, func );
618 if ( bRet.first && bRet.second ) {
620 m_Stat.onEnsureNew();
623 m_Stat.onEnsureExist();
627 /// Unlinks the item \p val from the set
629 The function searches the item \p val in the set and unlinks it from the set
630 if it is found and is equal to \p val.
632 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
633 and deletes the item found. \p unlink finds an item by key and deletes it
634 only if \p val is an item of that set, i.e. the pointer to item found
635 is equal to <tt> &val </tt>.
637 RCU \p synchronize method can be called, therefore, RCU should not be locked.
639 The function returns \p true if success and \p false otherwise.
641 bool unlink( value_type& val )
643 size_t nHash = hash_value( val );
644 dummy_node_type * pHead = get_bucket( nHash );
645 assert( pHead != nullptr );
647 if ( m_List.unlink_at( pHead, val ) ) {
649 m_Stat.onEraseSuccess();
652 m_Stat.onEraseFailed();
656 /// Deletes the item from the set
657 /** \anchor cds_intrusive_SplitListSet_rcu_erase
658 The function searches an item with key equal to \p key in the set,
659 unlinks it from the set, and returns \p true.
660 If the item with key equal to \p key is not found the function return \p false.
662 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
663 and deletes the item found. \p unlink finds an item by key and deletes it
664 only if \p key is an item of that set, i.e. the pointer to item found
665 is equal to <tt> &key </tt>.
667 RCU \p synchronize method can be called, therefore, RCU should not be locked.
669 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
671 template <typename Q>
672 bool erase( Q const& key )
674 return erase_( key, key_comparator() );
677 /// Deletes the item from the set using \p pred for searching
679 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase "erase(Q const&)"
680 but \p cmp is used for key compare.
681 \p Less has the interface like \p std::less.
682 \p pred must imply the same element order as the comparator used for building the set.
684 template <typename Q, typename Less>
685 bool erase_with( Q const& key, Less pred )
688 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
691 /// Deletes the item from the set
692 /** \anchor cds_intrusive_SplitListSet_rcu_erase_func
693 The function searches an item with key equal to \p key in the set,
694 call \p f functor with item found, unlinks it from the set, and returns \p true.
695 The \ref disposer specified by \p OrderedList class template parameter is called
696 by garbage collector \p GC asynchronously.
698 The \p Func interface is
701 void operator()( value_type const& item );
705 If the item with key equal to \p key is not found the function return \p false.
707 RCU \p synchronize method can be called, therefore, RCU should not be locked.
709 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
711 template <typename Q, typename Func>
712 bool erase( Q const& key, Func f )
714 return erase_( key, key_comparator(), f );
717 /// Deletes the item from the set using \p pred for searching
719 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)"
720 but \p cmp is used for key compare.
721 \p Less has the interface like \p std::less.
722 \p pred must imply the same element order as the comparator used for building the set.
724 template <typename Q, typename Less, typename Func>
725 bool erase_with( Q const& key, Less pred, Func f )
728 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
731 /// Extracts an item from the set
732 /** \anchor cds_intrusive_SplitListSet_rcu_extract
733 The function searches an item with key equal to \p key in the set,
734 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
735 If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr.
737 @note The function does NOT call RCU read-side lock or synchronization,
738 and does NOT dispose the item found. It just excludes the item from the set
739 and returns a pointer to item found.
740 You should lock RCU before calling of the function, and you should synchronize RCU
741 outside the RCU lock before reusing returned pointer.
744 typedef cds::urcu::gc< general_buffered<> > rcu;
745 typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list;
746 typedef cds::intrusive::SplitListSet< rcu, rcu_michael_list, foo_traits > rcu_splitlist_set;
748 rcu_splitlist_set theSet;
751 rcu_splitlist_set::exempt_ptr p;
753 // first, we should lock RCU
754 rcu_splitlist_set::rcu_lock lock;
756 // Now, you can apply extract function
757 // Note that you must not delete the item found inside the RCU lock
758 p = theList.extract( 10 );
760 // do something with p
765 // We may safely release p here
766 // release() passes the pointer to RCU reclamation cycle:
767 // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list.
771 template <typename Q>
772 exempt_ptr extract( Q const& key )
774 return exempt_ptr(extract_( key, key_comparator() ));
777 /// Extracts an item from the set using \p pred for searching
779 The function is an analog of \p extract(Q const&) but \p pred is used for key compare.
780 \p Less functor has the interface like \p std::less.
781 \p pred must imply the same element order as the comparator used for building the set.
783 template <typename Q, typename Less>
784 exempt_ptr extract_with( Q const& key, Less pred )
786 return exempt_ptr( extract_with_( key, pred ));
789 /// Finds the key \p key
790 /** \anchor cds_intrusive_SplitListSet_rcu_find_func
791 The function searches the item with key equal to \p key and calls the functor \p f for item found.
792 The interface of \p Func functor is:
795 void operator()( value_type& item, Q& key );
798 where \p item is the item found, \p key is the <tt>find</tt> function argument.
800 The functor can change non-key fields of \p item. Note that the functor is only guarantee
801 that \p item cannot be disposed during functor is executing.
802 The functor does not serialize simultaneous access to the set \p item. If such access is
803 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
805 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
806 can modify both arguments.
808 Note the hash functor specified for class \p Traits template parameter
809 should accept a parameter of type \p Q that can be not the same as \p value_type.
811 The function applies RCU lock internally.
813 The function returns \p true if \p key is found, \p false otherwise.
815 template <typename Q, typename Func>
816 bool find( Q& key, Func f )
818 return find_( key, key_comparator(), f );
821 template <typename Q, typename Func>
822 bool find( Q const& key, Func f )
824 return find_( key, key_comparator(), f );
828 /// Finds the key \p key with \p pred predicate for comparing
830 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)"
831 but \p cmp is used for key compare.
832 \p Less has the interface like \p std::less.
833 \p cmp must imply the same element order as the comparator used for building the set.
835 template <typename Q, typename Less, typename Func>
836 bool find_with( Q& key, Less pred, Func f )
839 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
842 template <typename Q, typename Less, typename Func>
843 bool find_with( Q const& key, Less pred, Func f )
846 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
850 /// Finds the key \p key
851 /** \anchor cds_intrusive_SplitListSet_rcu_find_val
852 The function searches the item with key equal to \p key
853 and returns \p true if \p key found or \p false otherwise.
855 template <typename Q>
856 bool find( Q const& key )
858 return find_value( key, key_comparator() );
861 /// Finds the key \p key with \p pred predicate for comparing
863 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_val "find(Q const&)"
864 but \p cmp is used for key compare.
865 \p Less has the interface like \p std::less.
866 \p pred must imply the same element order as the comparator used for building the set.
868 template <typename Q, typename Less>
869 bool find_with( Q const& key, Less pred )
872 return find_value( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
875 /// Finds the key \p key and return the item found
876 /** \anchor cds_intrusive_SplitListSet_rcu_get
877 The function searches the item with key equal to \p key and returns the pointer to item found.
878 If \p key is not found it returns \p nullptr.
880 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
882 RCU should be locked before call of this function.
883 Returned item is valid only while RCU is locked:
885 cds::intrusive::SplitListSet< your_template_parameters > theSet;
889 hash_set::rcu_lock lock;
891 foo * pVal = theSet.get( 5 );
896 // Unlock RCU by rcu_lock destructor
897 // pVal can be retired by disposer at any time after RCU has been unlocked
901 template <typename Q>
902 value_type * get( Q const& key )
904 return get_( key, key_comparator() );
907 /// Finds the key \p key and return the item found
909 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)"
910 but \p pred is used for comparing the keys.
912 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
914 \p pred must imply the same element order as the comparator used for building the set.
916 template <typename Q, typename Less>
917 value_type * get_with( Q const& key, Less pred )
920 return get_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
924 /// Returns item count in the set
927 return m_ItemCounter;
930 /// Checks if the set is empty
932 Emptiness is checked by item counting: if item count is zero then the set is empty.
933 Thus, the correct item counting feature is an important part of split-list set implementation.
940 /// Clears the set (not atomic)
943 iterator it = begin();
944 while ( it != end() ) {
952 /// Returns internal statistics
953 stat const& statistics() const
960 template <bool IsConst>
962 :public split_list::details::iterator_type<node_traits, ordered_list, IsConst>
964 typedef split_list::details::iterator_type<node_traits, ordered_list, IsConst> iterator_base_class;
965 typedef typename iterator_base_class::list_iterator list_iterator;
968 : iterator_base_class()
971 iterator_type( iterator_type const& src )
972 : iterator_base_class( src )
975 // This ctor should be protected...
976 iterator_type( list_iterator itCur, list_iterator itEnd )
977 : iterator_base_class( itCur, itEnd )
984 The forward iterator for a split-list has some features:
985 - it has no post-increment operator
986 - it depends on iterator of underlying \p OrderedList
987 - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data.
988 - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent
989 deleting operations it is no guarantee that you iterate all item in the split-list.
991 Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container
992 for debug purpose only.
994 typedef iterator_type<false> iterator;
995 /// Const forward iterator
997 For iterator's features and requirements see \ref iterator
999 typedef iterator_type<true> const_iterator;
1001 /// Returns a forward iterator addressing the first element in a split-list
1003 For empty list \code begin() == end() \endcode
1007 return iterator( m_List.begin(), m_List.end() );
1010 /// Returns an iterator that addresses the location succeeding the last element in a split-list
1012 Do not use the value returned by <tt>end</tt> function to access any item.
1014 The returned value can be used only to control reaching the end of the split-list.
1015 For empty list \code begin() == end() \endcode
1019 return iterator( m_List.end(), m_List.end() );
1022 /// Returns a forward const iterator addressing the first element in a split-list
1023 const_iterator begin() const
1027 /// Returns a forward const iterator addressing the first element in a split-list
1028 const_iterator cbegin() const
1030 return const_iterator( m_List.cbegin(), m_List.cend() );
1033 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
1034 const_iterator end() const
1038 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
1039 const_iterator cend() const
1041 return const_iterator( m_List.cend(), m_List.cend() );
1046 }} // namespace cds::intrusive
1048 #endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H