2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
32 #define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
36 #include <cds/intrusive/details/split_list_base.h>
37 #include <cds/details/binary_functor_wrapper.h>
39 namespace cds { namespace intrusive {
41 /// Split-ordered list RCU specialization
42 /** @ingroup cds_intrusive_map
43 \anchor cds_intrusive_SplitListSet_rcu
45 Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see
46 - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables"
47 - [2008] Nir Shavit "The Art of Multiprocessor Programming"
49 The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original
50 recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets
51 without moving an item on resizing, see \ref cds_SplitList_algo_desc "short algo description".
55 Template parameters are:
56 - \p RCU - one of \ref cds_urcu_gc "RCU type"
57 - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList.
58 The intrusive ordered list implementation specifies the type \p T stored in the hash-set,
59 the comparing functor for the type \p T and other features specific for the ordered list.
60 - \p Traits - set traits, default isd \p split_list::traits.
61 Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction.
63 @note About required features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
66 Before including <tt><cds/intrusive/split_list_rcu.h></tt> you should include appropriate RCU header file,
67 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
68 For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" and
69 MichaelList-based split-list you should include:
71 #include <cds/urcu/general_buffered.h>
72 #include <cds/intrusive/michael_list_rcu.h>
73 #include <cds/intrusive/split_list_rcu.h>
75 // Declare Michael's list for type Foo and default traits:
76 typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo > rcu_michael_list;
78 // Declare split-list based on rcu_michael_list
79 typedef cds::intrusive::SplitListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, rcu_michael_list > rcu_split_list;
86 # ifdef CDS_DOXYGEN_INVOKED
87 class Traits = split_list::traits
92 class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits >
95 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
96 typedef Traits traits; ///< Traits template parameters
98 /// Hash functor for \ref value_type and all its derivatives that you use
99 typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
103 typedef split_list::details::rebind_list_traits<OrderedList, traits> wrapped_ordered_list;
107 # ifdef CDS_DOXYGEN_INVOKED
108 typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list
110 typedef typename wrapped_ordered_list::result ordered_list;
112 typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list
113 typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor
114 typedef typename ordered_list::disposer disposer; ///< Node disposer functor
115 typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
116 typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
117 typedef typename ordered_list::raw_ptr raw_ptr; ///< pointer to the node for \p get() function
118 /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
119 static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
121 typedef typename traits::item_counter item_counter; ///< Item counter type
122 typedef typename traits::back_off back_off; ///< back-off strategy for spinning
123 typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
124 typedef typename traits::stat stat; ///< Internal statistics
127 typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list
128 typedef split_list::node<list_node_type> node_type; ///< split-list node type
129 typedef node_type dummy_node_type; ///< dummy node type
131 /// Split-list node traits
133 This traits is intended for converting between underlying ordered list node type \ref list_node_type
134 and split-list node type \ref node_type
136 typedef split_list::node_traits<typename ordered_list::node_traits> node_traits;
139 /// Bucket table implementation
140 typedef typename split_list::details::bucket_table_selector<
141 traits::dynamic_bucket_table
144 , opt::allocator< typename traits::allocator >
145 , opt::memory_model< memory_model >
146 >::type bucket_table;
152 /// Ordered list wrapper to access protected members of OrderedList
153 class ordered_list_wrapper: public ordered_list
155 typedef ordered_list base_class;
156 typedef typename base_class::auxiliary_head bucket_head_type;
159 bool insert_at( dummy_node_type * pHead, value_type& val )
161 assert( pHead != nullptr );
162 bucket_head_type h(pHead);
163 return base_class::insert_at( h, val );
166 template <typename Func>
167 bool insert_at( dummy_node_type * pHead, value_type& val, Func f )
169 assert( pHead != nullptr );
170 bucket_head_type h(pHead);
171 return base_class::insert_at( h, val, f );
174 template <typename Func>
175 std::pair<bool, bool> update_at( dummy_node_type * pHead, value_type& val, Func func, bool bAllowInsert )
177 assert( pHead != nullptr );
178 bucket_head_type h(pHead);
179 return base_class::update_at( h, val, func, bAllowInsert );
182 bool unlink_at( dummy_node_type * pHead, value_type& val )
184 assert( pHead != nullptr );
185 bucket_head_type h(pHead);
186 return base_class::unlink_at( h, val );
189 template <typename Q, typename Compare, typename Func>
190 bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
192 assert( pHead != nullptr );
193 bucket_head_type h(pHead);
194 return base_class::erase_at( h, val, cmp, f );
197 template <typename Q, typename Compare>
198 bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
200 assert( pHead != nullptr );
201 bucket_head_type h(pHead);
202 return base_class::erase_at( h, val, cmp );
205 template <typename Q, typename Compare>
206 value_type * extract_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
208 assert( pHead != nullptr );
209 bucket_head_type h(pHead);
210 return base_class::extract_at( h, val, cmp );
213 template <typename Q, typename Compare, typename Func>
214 bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f )
216 assert( pHead != nullptr );
217 bucket_head_type h(pHead);
218 return base_class::find_at( h, val, cmp, f );
221 template <typename Q, typename Compare>
222 bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp )
224 assert( pHead != nullptr );
225 bucket_head_type h(pHead);
226 return base_class::find_at( h, val, cmp );
229 template <typename Q, typename Compare>
230 raw_ptr get_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
232 assert( pHead != nullptr );
233 bucket_head_type h(pHead);
234 return base_class::get_at( h, val, cmp );
237 bool insert_aux_node( dummy_node_type * pNode )
239 return base_class::insert_aux_node( pNode );
241 bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode )
243 bucket_head_type h(pHead);
244 return base_class::insert_aux_node( h, pNode );
248 template <typename Less>
249 struct less_wrapper: public cds::opt::details::make_comparator_from_less<Less>
251 typedef cds::opt::details::make_comparator_from_less<Less> base_wrapper;
253 template <typename Q1, typename Q2>
254 int operator()( split_list::details::search_value_type<Q1> const& v1, Q2 const& v2 ) const
256 return base_wrapper::operator()( v1.val, v2 );
259 template <typename Q1, typename Q2>
260 int operator()( Q1 const& v1, split_list::details::search_value_type<Q2> const& v2 ) const
262 return base_wrapper::operator()( v1, v2.val );
268 ordered_list_wrapper m_List; ///< Ordered list containing split-list items
269 bucket_table m_Buckets; ///< bucket table
270 atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
271 atomics::atomic<size_t> m_nMaxItemCount; ///< number of items container can hold, before we have to resize
272 item_counter m_ItemCounter; ///< Item counter
273 hash m_HashFunctor; ///< Hash functor
274 stat m_Stat; ///< Internal statistics accumulator
278 typedef cds::details::Allocator< dummy_node_type, typename traits::allocator > dummy_node_allocator;
280 dummy_node_type * alloc_dummy_node( size_t nHash )
282 m_Stat.onHeadNodeAllocated();
283 return dummy_node_allocator().New( nHash );
285 void free_dummy_node( dummy_node_type * p )
287 dummy_node_allocator().Delete( p );
288 m_Stat.onHeadNodeFreed();
291 /// Calculates hash value of \p key
292 template <typename Q>
293 size_t hash_value( Q const& key ) const
295 return m_HashFunctor( key );
298 size_t bucket_no( size_t nHash ) const
300 return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 );
303 static size_t parent_bucket( size_t nBucket )
305 assert( nBucket > 0 );
306 return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
309 dummy_node_type * init_bucket( size_t nBucket )
311 assert( nBucket > 0 );
312 size_t nParent = parent_bucket( nBucket );
314 dummy_node_type * pParentBucket = m_Buckets.bucket( nParent );
315 if ( pParentBucket == nullptr ) {
316 pParentBucket = init_bucket( nParent );
317 m_Stat.onRecursiveInitBucket();
320 assert( pParentBucket != nullptr );
322 // Allocate a dummy node for new bucket
324 dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) );
325 if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
326 m_Buckets.bucket( nBucket, pBucket );
327 m_Stat.onNewBucket();
330 free_dummy_node( pBucket );
333 // Another thread set the bucket. Wait while it done
335 // In this point, we must wait while nBucket is empty.
336 // The compiler can decide that waiting loop can be "optimized" (stripped)
337 // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
339 m_Stat.onBucketInitContenton();
342 dummy_node_type volatile * p = m_Buckets.bucket( nBucket );
344 return const_cast<dummy_node_type *>( p );
346 m_Stat.onBusyWaitBucketInit();
350 dummy_node_type * get_bucket( size_t nHash )
352 size_t nBucket = bucket_no( nHash );
354 dummy_node_type * pHead = m_Buckets.bucket( nBucket );
355 if ( pHead == nullptr )
356 pHead = init_bucket( nBucket );
358 assert( pHead->is_dummy() );
365 // GC and OrderedList::gc must be the same
366 static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
368 // atomicity::empty_item_counter is not allowed as a item counter
369 static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
370 "cds::atomicity::empty_item_counter is not allowed as a item counter");
372 // Initialize bucket 0
373 dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ );
375 // insert_aux_node cannot return false for empty list
376 CDS_VERIFY( m_List.insert_aux_node( pNode ));
378 m_Buckets.bucket( 0, pNode );
381 static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor )
383 return nBucketCount * nLoadFactor;
386 void inc_item_count()
388 size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed);
389 if ( ++m_ItemCounter <= nMaxCount )
392 size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed);
393 const size_t nBucketCount = static_cast<size_t>(1) << sz;
394 if ( nBucketCount < m_Buckets.capacity() ) {
395 // we may grow the bucket table
396 const size_t nLoadFactor = m_Buckets.load_factor();
397 if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
398 return; // someone already have updated m_nBucketCountLog2, so stop here
400 m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
401 memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
402 m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
405 m_nMaxItemCount.store( std::numeric_limits<size_t>::max(), memory_model::memory_order_relaxed );
408 template <typename Q, typename Compare, typename Func>
409 bool find_( Q& val, Compare cmp, Func f )
411 size_t nHash = hash_value( val );
412 split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
413 dummy_node_type * pHead = get_bucket( nHash );
414 assert( pHead != nullptr );
416 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
417 [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
420 template <typename Q, typename Compare>
421 bool find_value( Q const& val, Compare cmp )
423 size_t nHash = hash_value( val );
424 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
425 dummy_node_type * pHead = get_bucket( nHash );
426 assert( pHead != nullptr );
428 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
431 template <typename Q, typename Compare>
432 raw_ptr get_( Q const& val, Compare cmp )
434 size_t nHash = hash_value( val );
435 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
436 dummy_node_type * pHead = get_bucket( nHash );
437 assert( pHead != nullptr );
439 raw_ptr p = m_List.get_at( pHead, sv, cmp );
440 m_Stat.onFind( !!p );
444 template <typename Q, typename Compare>
445 value_type * extract_( Q const& val, Compare cmp )
447 size_t nHash = hash_value( val );
448 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
449 dummy_node_type * pHead = get_bucket( nHash );
450 assert( pHead != nullptr );
452 value_type * pNode = m_List.extract_at( pHead, sv, cmp );
455 m_Stat.onExtractSuccess();
458 m_Stat.onExtractFailed();
462 template <typename Q, typename Less>
463 value_type * extract_with_( Q const& val, Less pred )
466 return extract_( val, typename wrapped_ordered_list::template make_compare_from_less<Less>());
469 template <typename Q, typename Compare>
470 bool erase_( const Q& val, Compare cmp )
472 size_t nHash = hash_value( val );
473 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
474 dummy_node_type * pHead = get_bucket( nHash );
475 assert( pHead != nullptr );
477 if ( m_List.erase_at( pHead, sv, cmp ) ) {
479 m_Stat.onEraseSuccess();
482 m_Stat.onEraseFailed();
486 template <typename Q, typename Compare, typename Func>
487 bool erase_( Q const& val, Compare cmp, Func f )
489 size_t nHash = hash_value( val );
490 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
491 dummy_node_type * pHead = get_bucket( nHash );
492 assert( pHead != nullptr );
494 if ( m_List.erase_at( pHead, sv, cmp, f )) {
496 m_Stat.onEraseSuccess();
499 m_Stat.onEraseFailed();
506 /// Initialize split-ordered list of default capacity
508 The default capacity is defined in bucket table constructor.
509 See split_list::expandable_bucket_table, split_list::static_ducket_table
510 which selects by split_list::dynamic_bucket_table option.
513 : m_nBucketCountLog2(1)
514 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
519 /// Initialize split-ordered list
521 size_t nItemCount ///< estimate average of item count
522 , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1.
524 : m_Buckets( nItemCount, nLoadFactor )
525 , m_nBucketCountLog2(1)
526 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
534 The function inserts \p val in the set if it does not contain
535 an item with key equal to \p val.
537 The function makes RCU lock internally.
539 Returns \p true if \p val is placed into the set, \p false otherwise.
541 bool insert( value_type& val )
543 size_t nHash = hash_value( val );
544 dummy_node_type * pHead = get_bucket( nHash );
545 assert( pHead != nullptr );
547 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
549 if ( m_List.insert_at( pHead, val )) {
551 m_Stat.onInsertSuccess();
554 m_Stat.onInsertFailed();
560 This function is intended for derived non-intrusive containers.
562 The function allows to split creating of new item into two part:
563 - create item with key only
564 - insert new item into the set
565 - if inserting is success, calls \p f functor to initialize value-field of \p val.
567 The functor signature is:
569 void func( value_type& val );
571 where \p val is the item inserted.
573 The function makes RCU lock internally.
575 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
576 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
579 template <typename Func>
580 bool insert( value_type& val, Func f )
582 size_t nHash = hash_value( val );
583 dummy_node_type * pHead = get_bucket( nHash );
584 assert( pHead != nullptr );
586 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
588 if ( m_List.insert_at( pHead, val, f )) {
590 m_Stat.onInsertSuccess();
593 m_Stat.onInsertFailed();
599 The operation performs inserting or changing data with lock-free manner.
601 If the item \p val is not found in the set, then \p val is inserted
602 iff \p bAllowInsert is \p true.
603 Otherwise, the functor \p func is called with item found.
604 The functor signature is:
606 void func( bool bNew, value_type& item, value_type& val );
609 - \p bNew - \p true if the item has been inserted, \p false otherwise
610 - \p item - item of the set
611 - \p val - argument \p val passed into the \p update() function
612 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
613 refers to the same stuff.
615 The functor may change non-key fields of the \p item.
617 The function applies RCU lock internally.
619 Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
620 \p second is \p true if new item has been added or \p false if the item with \p key
621 already is in the list.
623 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
624 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
627 template <typename Func>
628 std::pair<bool, bool> update( value_type& val, Func func, bool bAllowInsert = true )
630 size_t nHash = hash_value( val );
631 dummy_node_type * pHead = get_bucket( nHash );
632 assert( pHead != nullptr );
634 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
636 std::pair<bool, bool> bRet = m_List.update_at( pHead, val, func, bAllowInsert );
637 if ( bRet.first && bRet.second ) {
639 m_Stat.onUpdateNew();
642 m_Stat.onUpdateExist();
646 template <typename Func>
647 CDS_DEPRECATED("ensure() is deprecated, use update()")
648 std::pair<bool, bool> ensure( value_type& val, Func func )
650 return update( val, func, true );
654 /// Unlinks the item \p val from the set
656 The function searches the item \p val in the set and unlinks it from the set
657 if it is found and is equal to \p val.
659 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
660 and deletes the item found. \p unlink finds an item by key and deletes it
661 only if \p val is an item of that set, i.e. the pointer to item found
662 is equal to <tt> &val </tt>.
664 RCU \p synchronize method can be called, therefore, RCU should not be locked.
666 The function returns \p true if success and \p false otherwise.
668 bool unlink( value_type& val )
670 size_t nHash = hash_value( val );
671 dummy_node_type * pHead = get_bucket( nHash );
672 assert( pHead != nullptr );
674 if ( m_List.unlink_at( pHead, val ) ) {
676 m_Stat.onEraseSuccess();
679 m_Stat.onEraseFailed();
683 /// Deletes the item from the set
684 /** \anchor cds_intrusive_SplitListSet_rcu_erase
685 The function searches an item with key equal to \p key in the set,
686 unlinks it from the set, and returns \p true.
687 If the item with key equal to \p key is not found the function return \p false.
689 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
690 and deletes the item found. \p unlink finds an item by key and deletes it
691 only if \p key is an item of that set, i.e. the pointer to item found
692 is equal to <tt> &key </tt>.
694 RCU \p synchronize method can be called, therefore, RCU should not be locked.
696 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
698 template <typename Q>
699 bool erase( Q const& key )
701 return erase_( key, key_comparator() );
704 /// Deletes the item from the set using \p pred for searching
706 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase "erase(Q const&)"
707 but \p cmp is used for key compare.
708 \p Less has the interface like \p std::less.
709 \p pred must imply the same element order as the comparator used for building the set.
711 template <typename Q, typename Less>
712 bool erase_with( Q const& key, Less pred )
715 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
718 /// Deletes the item from the set
719 /** \anchor cds_intrusive_SplitListSet_rcu_erase_func
720 The function searches an item with key equal to \p key in the set,
721 call \p f functor with item found, unlinks it from the set, and returns \p true.
722 The \ref disposer specified by \p OrderedList class template parameter is called
723 by garbage collector \p GC asynchronously.
725 The \p Func interface is
728 void operator()( value_type const& item );
732 If the item with key equal to \p key is not found the function return \p false.
734 RCU \p synchronize method can be called, therefore, RCU should not be locked.
736 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
738 template <typename Q, typename Func>
739 bool erase( Q const& key, Func f )
741 return erase_( key, key_comparator(), f );
744 /// Deletes the item from the set using \p pred for searching
746 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)"
747 but \p cmp is used for key compare.
748 \p Less has the interface like \p std::less.
749 \p pred must imply the same element order as the comparator used for building the set.
751 template <typename Q, typename Less, typename Func>
752 bool erase_with( Q const& key, Less pred, Func f )
755 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
758 /// Extracts an item from the set
759 /** \anchor cds_intrusive_SplitListSet_rcu_extract
760 The function searches an item with key equal to \p key in the set,
761 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
762 If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr.
764 Depends on \p bucket_type you should or should not lock RCU before calling of this function:
765 - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked
766 - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked
767 See ordered list implementation for details.
770 typedef cds::urcu::gc< general_buffered<> > rcu;
771 typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list;
772 typedef cds::intrusive::SplitListSet< rcu, rcu_michael_list, foo_traits > rcu_splitlist_set;
774 rcu_splitlist_set theSet;
777 rcu_splitlist_set::exempt_ptr p;
779 // For MichaelList we should not lock RCU
781 // Now, you can apply extract function
782 // Note that you must not delete the item found inside the RCU lock
783 p = theList.extract( 10 );
785 // do something with p
789 // We may safely release p here
790 // release() passes the pointer to RCU reclamation cycle:
791 // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list.
795 template <typename Q>
796 exempt_ptr extract( Q const& key )
798 return exempt_ptr(extract_( key, key_comparator() ));
801 /// Extracts an item from the set using \p pred for searching
803 The function is an analog of \p extract(Q const&) but \p pred is used for key compare.
804 \p Less functor has the interface like \p std::less.
805 \p pred must imply the same element order as the comparator used for building the set.
807 template <typename Q, typename Less>
808 exempt_ptr extract_with( Q const& key, Less pred )
810 return exempt_ptr( extract_with_( key, pred ));
813 /// Finds the key \p key
814 /** \anchor cds_intrusive_SplitListSet_rcu_find_func
815 The function searches the item with key equal to \p key and calls the functor \p f for item found.
816 The interface of \p Func functor is:
819 void operator()( value_type& item, Q& key );
822 where \p item is the item found, \p key is the <tt>find</tt> function argument.
824 The functor can change non-key fields of \p item. Note that the functor is only guarantee
825 that \p item cannot be disposed during functor is executing.
826 The functor does not serialize simultaneous access to the set \p item. If such access is
827 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
829 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
830 can modify both arguments.
832 Note the hash functor specified for class \p Traits template parameter
833 should accept a parameter of type \p Q that can be not the same as \p value_type.
835 The function applies RCU lock internally.
837 The function returns \p true if \p key is found, \p false otherwise.
839 template <typename Q, typename Func>
840 bool find( Q& key, Func f )
842 return find_( key, key_comparator(), f );
845 template <typename Q, typename Func>
846 bool find( Q const& key, Func f )
848 return find_( key, key_comparator(), f );
852 /// Finds the key \p key with \p pred predicate for comparing
854 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)"
855 but \p cmp is used for key compare.
856 \p Less has the interface like \p std::less.
857 \p cmp must imply the same element order as the comparator used for building the set.
859 template <typename Q, typename Less, typename Func>
860 bool find_with( Q& key, Less pred, Func f )
863 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
866 template <typename Q, typename Less, typename Func>
867 bool find_with( Q const& key, Less pred, Func f )
870 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
874 /// Checks whether the set contains \p key
876 The function searches the item with key equal to \p key
877 and returns \p true if it is found, and \p false otherwise.
879 Note the hash functor specified for class \p Traits template parameter
880 should accept a parameter of type \p Q that can be not the same as \p value_type.
881 Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing.
883 template <typename Q>
884 bool contains( Q const& key )
886 return find_value( key, key_comparator() );
889 template <typename Q>
890 CDS_DEPRECATED("deprecated, use contains()")
891 bool find( Q const& key )
893 return contains( key );
897 /// Checks whether the set contains \p key using \p pred predicate for searching
899 The function is an analog of <tt>contains( key )</tt> but \p pred is used for key comparing.
900 \p Less functor has the interface like \p std::less.
901 \p Less must imply the same element order as the comparator used for building the list.
903 template <typename Q, typename Less>
904 bool contains( Q const& key, Less pred )
907 return find_value( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
910 template <typename Q, typename Less>
911 CDS_DEPRECATED("deprecated, use contains()")
912 bool find_with( Q const& key, Less pred )
914 return contains( key, pred );
918 /// Finds the key \p key and return the item found
919 /** \anchor cds_intrusive_SplitListSet_rcu_get
920 The function searches the item with key equal to \p key and returns the pointer to item found.
921 If \p key is not found it returns \p nullptr.
923 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
925 RCU should be locked before call of this function.
926 Returned item is valid only while RCU is locked:
928 typedef cds::intrusive::SplitListSet< your_template_parameters > set_class;
931 typename set_class::raw_ptr rp;
934 hash_set::rcu_lock lock;
936 rp = theSet.get( 5 );
941 // Unlock RCU by rcu_lock destructor
942 // rp can be retired by disposer at any time after RCU has been unlocked
946 template <typename Q>
947 raw_ptr get( Q const& key )
949 return get_( key, key_comparator() );
952 /// Finds the key \p key and return the item found
954 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)"
955 but \p pred is used for comparing the keys.
957 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
959 \p pred must imply the same element order as the comparator used for building the set.
961 template <typename Q, typename Less>
962 raw_ptr get_with( Q const& key, Less pred )
965 return get_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
969 /// Returns item count in the set
972 return m_ItemCounter;
975 /// Checks if the set is empty
977 Emptiness is checked by item counting: if item count is zero then the set is empty.
978 Thus, the correct item counting feature is an important part of split-list set implementation.
985 /// Clears the set (not atomic)
988 iterator it = begin();
989 while ( it != end() ) {
997 /// Returns internal statistics
998 stat const& statistics() const
1005 template <bool IsConst>
1007 :public split_list::details::iterator_type<node_traits, ordered_list, IsConst>
1009 typedef split_list::details::iterator_type<node_traits, ordered_list, IsConst> iterator_base_class;
1010 typedef typename iterator_base_class::list_iterator list_iterator;
1013 : iterator_base_class()
1016 iterator_type( iterator_type const& src )
1017 : iterator_base_class( src )
1020 // This ctor should be protected...
1021 iterator_type( list_iterator itCur, list_iterator itEnd )
1022 : iterator_base_class( itCur, itEnd )
1028 ///@name Forward iterators (thread-safe under RCU lock)
1030 /// Forward iterator
1032 The forward iterator for a split-list has some features:
1033 - it has no post-increment operator
1034 - it depends on iterator of underlying \p OrderedList
1036 You may safely use iterators in multi-threaded environment only under RCU lock.
1037 Otherwise, a crash is possible if another thread deletes the element the iterator points to.
1039 typedef iterator_type<false> iterator;
1041 /// Const forward iterator
1043 For iterator's features and requirements see \ref iterator
1045 typedef iterator_type<true> const_iterator;
1047 /// Returns a forward iterator addressing the first element in a split-list
1049 For empty list \code begin() == end() \endcode
1053 return iterator( m_List.begin(), m_List.end() );
1056 /// Returns an iterator that addresses the location succeeding the last element in a split-list
1058 Do not use the value returned by <tt>end</tt> function to access any item.
1060 The returned value can be used only to control reaching the end of the split-list.
1061 For empty list \code begin() == end() \endcode
1065 return iterator( m_List.end(), m_List.end() );
1068 /// Returns a forward const iterator addressing the first element in a split-list
1069 const_iterator begin() const
1073 /// Returns a forward const iterator addressing the first element in a split-list
1074 const_iterator cbegin() const
1076 return const_iterator( m_List.cbegin(), m_List.cend() );
1079 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
1080 const_iterator end() const
1084 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
1085 const_iterator cend() const
1087 return const_iterator( m_List.cend(), m_List.cend() );
1092 }} // namespace cds::intrusive
1094 #endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H