2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
32 #define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
36 #include <cds/intrusive/details/split_list_base.h>
37 #include <cds/details/binary_functor_wrapper.h>
38 #include <cds/details/type_padding.h>
40 namespace cds { namespace intrusive {
42 /// Split-ordered list RCU specialization
43 /** @ingroup cds_intrusive_map
44 \anchor cds_intrusive_SplitListSet_rcu
46 Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see
47 - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables"
48 - [2008] Nir Shavit "The Art of Multiprocessor Programming"
50 The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original
51 recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets
52 without moving an item on resizing, see \ref cds_SplitList_algo_desc "short algo description".
56 Template parameters are:
57 - \p RCU - one of \ref cds_urcu_gc "RCU type"
58 - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList.
59 The intrusive ordered list implementation specifies the type \p T stored in the hash-set,
60 the comparing functor for the type \p T and other features specific for the ordered list.
61 - \p Traits - set traits, default isd \p split_list::traits.
62 Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction.
64 @note About required features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
67 Before including <tt><cds/intrusive/split_list_rcu.h></tt> you should include appropriate RCU header file,
68 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
69 For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" and
70 MichaelList-based split-list you should include:
72 #include <cds/urcu/general_buffered.h>
73 #include <cds/intrusive/michael_list_rcu.h>
74 #include <cds/intrusive/split_list_rcu.h>
76 // Declare Michael's list for type Foo and default traits:
77 typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo > rcu_michael_list;
79 // Declare split-list based on rcu_michael_list
80 typedef cds::intrusive::SplitListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, rcu_michael_list > rcu_split_list;
87 # ifdef CDS_DOXYGEN_INVOKED
88 class Traits = split_list::traits
93 class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits >
96 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
97 typedef Traits traits; ///< Traits template parameters
99 /// Hash functor for \ref value_type and all its derivatives that you use
100 typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
104 typedef split_list::details::rebind_list_traits<OrderedList, traits> wrapped_ordered_list;
108 # ifdef CDS_DOXYGEN_INVOKED
109 typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list
111 typedef typename wrapped_ordered_list::result ordered_list;
113 typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list
114 typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor
115 typedef typename ordered_list::disposer disposer; ///< Node disposer functor
116 typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
117 typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
118 typedef typename ordered_list::raw_ptr raw_ptr; ///< pointer to the node for \p get() function
119 /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
120 static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
122 typedef typename traits::item_counter item_counter; ///< Item counter type
123 typedef typename traits::back_off back_off; ///< back-off strategy for spinning
124 typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
125 typedef typename traits::stat stat; ///< Internal statistics
127 // GC and OrderedList::gc must be the same
128 static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
130 // atomicity::empty_item_counter is not allowed as a item counter
131 static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
132 "cds::atomicity::empty_item_counter is not allowed as a item counter");
135 typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list
136 typedef split_list::node<list_node_type> node_type; ///< split-list node type
137 typedef node_type aux_node_type; ///< dummy node type
139 /// Split-list node traits
141 This traits is intended for converting between underlying ordered list node type \ref list_node_type
142 and split-list node type \ref node_type
144 typedef split_list::node_traits<typename ordered_list::node_traits> node_traits;
147 /// Bucket table implementation
148 typedef typename split_list::details::bucket_table_selector<
149 traits::dynamic_bucket_table
152 , opt::allocator< typename traits::allocator >
153 , opt::memory_model< memory_model >
154 >::type bucket_table;
160 /// Ordered list wrapper to access protected members of OrderedList
161 class ordered_list_wrapper: public ordered_list
163 typedef ordered_list base_class;
164 typedef typename base_class::auxiliary_head bucket_head_type;
167 bool insert_at( aux_node_type * pHead, value_type& val )
169 assert( pHead != nullptr );
170 bucket_head_type h(pHead);
171 return base_class::insert_at( h, val );
174 template <typename Func>
175 bool insert_at( aux_node_type * pHead, value_type& val, Func f )
177 assert( pHead != nullptr );
178 bucket_head_type h(pHead);
179 return base_class::insert_at( h, val, f );
182 template <typename Func>
183 std::pair<bool, bool> update_at( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert )
185 assert( pHead != nullptr );
186 bucket_head_type h(pHead);
187 return base_class::update_at( h, val, func, bAllowInsert );
190 bool unlink_at( aux_node_type * pHead, value_type& val )
192 assert( pHead != nullptr );
193 bucket_head_type h(pHead);
194 return base_class::unlink_at( h, val );
197 template <typename Q, typename Compare, typename Func>
198 bool erase_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
200 assert( pHead != nullptr );
201 bucket_head_type h(pHead);
202 return base_class::erase_at( h, val, cmp, f );
205 template <typename Q, typename Compare>
206 bool erase_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
208 assert( pHead != nullptr );
209 bucket_head_type h(pHead);
210 return base_class::erase_at( h, val, cmp );
213 template <typename Q, typename Compare>
214 value_type * extract_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
216 assert( pHead != nullptr );
217 bucket_head_type h(pHead);
218 return base_class::extract_at( h, val, cmp );
221 template <typename Q, typename Compare, typename Func>
222 bool find_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f )
224 assert( pHead != nullptr );
225 bucket_head_type h(pHead);
226 return base_class::find_at( h, val, cmp, f );
229 template <typename Q, typename Compare>
230 bool find_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp )
232 assert( pHead != nullptr );
233 bucket_head_type h(pHead);
234 return base_class::find_at( h, val, cmp );
237 template <typename Q, typename Compare>
238 raw_ptr get_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
240 assert( pHead != nullptr );
241 bucket_head_type h(pHead);
242 return base_class::get_at( h, val, cmp );
245 bool insert_aux_node( aux_node_type * pNode )
247 return base_class::insert_aux_node( pNode );
249 bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode )
251 bucket_head_type h(pHead);
252 return base_class::insert_aux_node( h, pNode );
256 template <typename Less>
257 struct less_wrapper: public cds::opt::details::make_comparator_from_less<Less>
259 typedef cds::opt::details::make_comparator_from_less<Less> base_wrapper;
261 template <typename Q1, typename Q2>
262 int operator()( split_list::details::search_value_type<Q1> const& v1, Q2 const& v2 ) const
264 return base_wrapper::operator()( v1.val, v2 );
267 template <typename Q1, typename Q2>
268 int operator()( Q1 const& v1, split_list::details::search_value_type<Q2> const& v2 ) const
270 return base_wrapper::operator()( v1, v2.val );
276 /// Initialize split-ordered list of default capacity
278 The default capacity is defined in bucket table constructor.
279 See split_list::expandable_bucket_table, split_list::static_ducket_table
280 which selects by split_list::dynamic_bucket_table option.
283 : m_nBucketCountLog2(1)
284 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
289 /// Initialize split-ordered list
291 size_t nItemCount ///< estimate average of item count
292 , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1.
294 : m_Buckets( nItemCount, nLoadFactor )
295 , m_nBucketCountLog2(1)
296 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
304 The function inserts \p val in the set if it does not contain
305 an item with key equal to \p val.
307 The function makes RCU lock internally.
309 Returns \p true if \p val is placed into the set, \p false otherwise.
311 bool insert( value_type& val )
313 size_t nHash = hash_value( val );
314 aux_node_type * pHead = get_bucket( nHash );
315 assert( pHead != nullptr );
317 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
319 if ( m_List.insert_at( pHead, val )) {
321 m_Stat.onInsertSuccess();
324 m_Stat.onInsertFailed();
330 This function is intended for derived non-intrusive containers.
332 The function allows to split creating of new item into two part:
333 - create item with key only
334 - insert new item into the set
335 - if inserting is success, calls \p f functor to initialize value-field of \p val.
337 The functor signature is:
339 void func( value_type& val );
341 where \p val is the item inserted.
343 The function makes RCU lock internally.
345 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
346 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
349 template <typename Func>
350 bool insert( value_type& val, Func f )
352 size_t nHash = hash_value( val );
353 aux_node_type * pHead = get_bucket( nHash );
354 assert( pHead != nullptr );
356 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
358 if ( m_List.insert_at( pHead, val, f )) {
360 m_Stat.onInsertSuccess();
363 m_Stat.onInsertFailed();
369 The operation performs inserting or changing data with lock-free manner.
371 If the item \p val is not found in the set, then \p val is inserted
372 iff \p bAllowInsert is \p true.
373 Otherwise, the functor \p func is called with item found.
374 The functor signature is:
376 void func( bool bNew, value_type& item, value_type& val );
379 - \p bNew - \p true if the item has been inserted, \p false otherwise
380 - \p item - item of the set
381 - \p val - argument \p val passed into the \p update() function
382 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
383 refers to the same stuff.
385 The functor may change non-key fields of the \p item.
387 The function applies RCU lock internally.
389 Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
390 \p second is \p true if new item has been added or \p false if the item with \p key
391 already is in the list.
393 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
394 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
397 template <typename Func>
398 std::pair<bool, bool> update( value_type& val, Func func, bool bAllowInsert = true )
400 size_t nHash = hash_value( val );
401 aux_node_type * pHead = get_bucket( nHash );
402 assert( pHead != nullptr );
404 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
406 std::pair<bool, bool> bRet = m_List.update_at( pHead, val, func, bAllowInsert );
407 if ( bRet.first && bRet.second ) {
409 m_Stat.onUpdateNew();
412 m_Stat.onUpdateExist();
416 template <typename Func>
417 CDS_DEPRECATED("ensure() is deprecated, use update()")
418 std::pair<bool, bool> ensure( value_type& val, Func func )
420 return update( val, func, true );
424 /// Unlinks the item \p val from the set
426 The function searches the item \p val in the set and unlinks it from the set
427 if it is found and is equal to \p val.
429 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
430 and deletes the item found. \p unlink finds an item by key and deletes it
431 only if \p val is an item of that set, i.e. the pointer to item found
432 is equal to <tt> &val </tt>.
434 RCU \p synchronize method can be called, therefore, RCU should not be locked.
436 The function returns \p true if success and \p false otherwise.
438 bool unlink( value_type& val )
440 size_t nHash = hash_value( val );
441 aux_node_type * pHead = get_bucket( nHash );
442 assert( pHead != nullptr );
444 if ( m_List.unlink_at( pHead, val ) ) {
446 m_Stat.onEraseSuccess();
449 m_Stat.onEraseFailed();
453 /// Deletes the item from the set
454 /** \anchor cds_intrusive_SplitListSet_rcu_erase
455 The function searches an item with key equal to \p key in the set,
456 unlinks it from the set, and returns \p true.
457 If the item with key equal to \p key is not found the function return \p false.
459 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
460 and deletes the item found. \p unlink finds an item by key and deletes it
461 only if \p key is an item of that set, i.e. the pointer to item found
462 is equal to <tt> &key </tt>.
464 RCU \p synchronize method can be called, therefore, RCU should not be locked.
466 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
468 template <typename Q>
469 bool erase( Q const& key )
471 return erase_( key, key_comparator() );
474 /// Deletes the item from the set using \p pred for searching
476 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase "erase(Q const&)"
477 but \p cmp is used for key compare.
478 \p Less has the interface like \p std::less.
479 \p pred must imply the same element order as the comparator used for building the set.
481 template <typename Q, typename Less>
482 bool erase_with( Q const& key, Less pred )
485 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
488 /// Deletes the item from the set
489 /** \anchor cds_intrusive_SplitListSet_rcu_erase_func
490 The function searches an item with key equal to \p key in the set,
491 call \p f functor with item found, unlinks it from the set, and returns \p true.
492 The \ref disposer specified by \p OrderedList class template parameter is called
493 by garbage collector \p GC asynchronously.
495 The \p Func interface is
498 void operator()( value_type const& item );
502 If the item with key equal to \p key is not found the function return \p false.
504 RCU \p synchronize method can be called, therefore, RCU should not be locked.
506 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
508 template <typename Q, typename Func>
509 bool erase( Q const& key, Func f )
511 return erase_( key, key_comparator(), f );
514 /// Deletes the item from the set using \p pred for searching
516 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)"
517 but \p cmp is used for key compare.
518 \p Less has the interface like \p std::less.
519 \p pred must imply the same element order as the comparator used for building the set.
521 template <typename Q, typename Less, typename Func>
522 bool erase_with( Q const& key, Less pred, Func f )
525 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
528 /// Extracts an item from the set
529 /** \anchor cds_intrusive_SplitListSet_rcu_extract
530 The function searches an item with key equal to \p key in the set,
531 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
532 If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr.
534 Depends on \p bucket_type you should or should not lock RCU before calling of this function:
535 - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked
536 - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked
537 See ordered list implementation for details.
540 typedef cds::urcu::gc< general_buffered<> > rcu;
541 typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list;
542 typedef cds::intrusive::SplitListSet< rcu, rcu_michael_list, foo_traits > rcu_splitlist_set;
544 rcu_splitlist_set theSet;
547 rcu_splitlist_set::exempt_ptr p;
549 // For MichaelList we should not lock RCU
551 // Now, you can apply extract function
552 // Note that you must not delete the item found inside the RCU lock
553 p = theList.extract( 10 );
555 // do something with p
559 // We may safely release p here
560 // release() passes the pointer to RCU reclamation cycle:
561 // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list.
565 template <typename Q>
566 exempt_ptr extract( Q const& key )
568 return exempt_ptr(extract_( key, key_comparator() ));
571 /// Extracts an item from the set using \p pred for searching
573 The function is an analog of \p extract(Q const&) but \p pred is used for key compare.
574 \p Less functor has the interface like \p std::less.
575 \p pred must imply the same element order as the comparator used for building the set.
577 template <typename Q, typename Less>
578 exempt_ptr extract_with( Q const& key, Less pred )
580 return exempt_ptr( extract_with_( key, pred ));
583 /// Finds the key \p key
584 /** \anchor cds_intrusive_SplitListSet_rcu_find_func
585 The function searches the item with key equal to \p key and calls the functor \p f for item found.
586 The interface of \p Func functor is:
589 void operator()( value_type& item, Q& key );
592 where \p item is the item found, \p key is the <tt>find</tt> function argument.
594 The functor can change non-key fields of \p item. Note that the functor is only guarantee
595 that \p item cannot be disposed during functor is executing.
596 The functor does not serialize simultaneous access to the set \p item. If such access is
597 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
599 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
600 can modify both arguments.
602 Note the hash functor specified for class \p Traits template parameter
603 should accept a parameter of type \p Q that can be not the same as \p value_type.
605 The function applies RCU lock internally.
607 The function returns \p true if \p key is found, \p false otherwise.
609 template <typename Q, typename Func>
610 bool find( Q& key, Func f )
612 return find_( key, key_comparator(), f );
615 template <typename Q, typename Func>
616 bool find( Q const& key, Func f )
618 return find_( key, key_comparator(), f );
622 /// Finds the key \p key with \p pred predicate for comparing
624 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)"
625 but \p cmp is used for key compare.
626 \p Less has the interface like \p std::less.
627 \p cmp must imply the same element order as the comparator used for building the set.
629 template <typename Q, typename Less, typename Func>
630 bool find_with( Q& key, Less pred, Func f )
633 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
636 template <typename Q, typename Less, typename Func>
637 bool find_with( Q const& key, Less pred, Func f )
640 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
644 /// Checks whether the set contains \p key
646 The function searches the item with key equal to \p key
647 and returns \p true if it is found, and \p false otherwise.
649 Note the hash functor specified for class \p Traits template parameter
650 should accept a parameter of type \p Q that can be not the same as \p value_type.
651 Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing.
653 template <typename Q>
654 bool contains( Q const& key )
656 return find_value( key, key_comparator() );
659 template <typename Q>
660 CDS_DEPRECATED("deprecated, use contains()")
661 bool find( Q const& key )
663 return contains( key );
667 /// Checks whether the set contains \p key using \p pred predicate for searching
669 The function is an analog of <tt>contains( key )</tt> but \p pred is used for key comparing.
670 \p Less functor has the interface like \p std::less.
671 \p Less must imply the same element order as the comparator used for building the list.
673 template <typename Q, typename Less>
674 bool contains( Q const& key, Less pred )
677 return find_value( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
680 template <typename Q, typename Less>
681 CDS_DEPRECATED("deprecated, use contains()")
682 bool find_with( Q const& key, Less pred )
684 return contains( key, pred );
688 /// Finds the key \p key and return the item found
689 /** \anchor cds_intrusive_SplitListSet_rcu_get
690 The function searches the item with key equal to \p key and returns the pointer to item found.
691 If \p key is not found it returns \p nullptr.
693 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
695 RCU should be locked before call of this function.
696 Returned item is valid only while RCU is locked:
698 typedef cds::intrusive::SplitListSet< your_template_parameters > set_class;
701 typename set_class::raw_ptr rp;
704 hash_set::rcu_lock lock;
706 rp = theSet.get( 5 );
711 // Unlock RCU by rcu_lock destructor
712 // rp can be retired by disposer at any time after RCU has been unlocked
716 template <typename Q>
717 raw_ptr get( Q const& key )
719 return get_( key, key_comparator() );
722 /// Finds the key \p key and return the item found
724 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)"
725 but \p pred is used for comparing the keys.
727 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
729 \p pred must imply the same element order as the comparator used for building the set.
731 template <typename Q, typename Less>
732 raw_ptr get_with( Q const& key, Less pred )
735 return get_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
739 /// Returns item count in the set
742 return m_ItemCounter;
745 /// Checks if the set is empty
747 Emptiness is checked by item counting: if item count is zero then the set is empty.
748 Thus, the correct item counting feature is an important part of split-list set implementation.
755 /// Clears the set (not atomic)
758 iterator it = begin();
759 while ( it != end() ) {
767 /// Returns internal statistics
768 stat const& statistics() const
775 template <bool IsConst>
777 :public split_list::details::iterator_type<node_traits, ordered_list, IsConst>
779 typedef split_list::details::iterator_type<node_traits, ordered_list, IsConst> iterator_base_class;
780 typedef typename iterator_base_class::list_iterator list_iterator;
783 : iterator_base_class()
786 iterator_type( iterator_type const& src )
787 : iterator_base_class( src )
790 // This ctor should be protected...
791 iterator_type( list_iterator itCur, list_iterator itEnd )
792 : iterator_base_class( itCur, itEnd )
798 ///@name Forward iterators (thread-safe under RCU lock)
802 The forward iterator for a split-list has some features:
803 - it has no post-increment operator
804 - it depends on iterator of underlying \p OrderedList
806 You may safely use iterators in multi-threaded environment only under RCU lock.
807 Otherwise, a crash is possible if another thread deletes the element the iterator points to.
809 typedef iterator_type<false> iterator;
811 /// Const forward iterator
813 For iterator's features and requirements see \ref iterator
815 typedef iterator_type<true> const_iterator;
817 /// Returns a forward iterator addressing the first element in a split-list
819 For empty list \code begin() == end() \endcode
823 return iterator( m_List.begin(), m_List.end() );
826 /// Returns an iterator that addresses the location succeeding the last element in a split-list
828 Do not use the value returned by <tt>end</tt> function to access any item.
830 The returned value can be used only to control reaching the end of the split-list.
831 For empty list \code begin() == end() \endcode
835 return iterator( m_List.end(), m_List.end() );
838 /// Returns a forward const iterator addressing the first element in a split-list
839 const_iterator begin() const
843 /// Returns a forward const iterator addressing the first element in a split-list
844 const_iterator cbegin() const
846 return const_iterator( m_List.cbegin(), m_List.cend() );
849 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
850 const_iterator end() const
854 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
855 const_iterator cend() const
857 return const_iterator( m_List.cend(), m_List.cend() );
863 aux_node_type * alloc_aux_node( size_t nHash )
865 m_Stat.onHeadNodeAllocated();
866 aux_node_type* p = m_Buckets.alloc_aux_node();
872 void free_aux_node( aux_node_type * p )
874 m_Buckets.free_aux_node( p );
875 m_Stat.onHeadNodeFreed();
878 /// Calculates hash value of \p key
879 template <typename Q>
880 size_t hash_value( Q const& key ) const
882 return m_HashFunctor( key );
885 size_t bucket_no( size_t nHash ) const
887 return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 );
890 static size_t parent_bucket( size_t nBucket )
892 assert( nBucket > 0 );
893 return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
896 aux_node_type * init_bucket( size_t nBucket )
898 assert( nBucket > 0 );
899 size_t nParent = parent_bucket( nBucket );
901 aux_node_type * pParentBucket = m_Buckets.bucket( nParent );
902 if ( pParentBucket == nullptr ) {
903 pParentBucket = init_bucket( nParent );
904 m_Stat.onRecursiveInitBucket();
907 assert( pParentBucket != nullptr );
909 // Allocate a dummy node for new bucket
911 aux_node_type * pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
912 if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
913 m_Buckets.bucket( nBucket, pBucket );
914 m_Stat.onNewBucket();
917 free_aux_node( pBucket );
920 // Another thread set the bucket. Wait while it done
922 // In this point, we must wait while nBucket is empty.
923 // The compiler can decide that waiting loop can be "optimized" (stripped)
924 // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
926 m_Stat.onBucketInitContenton();
929 aux_node_type volatile * p = m_Buckets.bucket( nBucket );
931 return const_cast<aux_node_type *>( p );
933 m_Stat.onBusyWaitBucketInit();
937 aux_node_type * get_bucket( size_t nHash )
939 size_t nBucket = bucket_no( nHash );
941 aux_node_type * pHead = m_Buckets.bucket( nBucket );
942 if ( pHead == nullptr )
943 pHead = init_bucket( nBucket );
945 assert( pHead->is_dummy() );
952 // Initialize bucket 0
953 aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ );
955 // insert_aux_node cannot return false for empty list
956 CDS_VERIFY( m_List.insert_aux_node( pNode ));
958 m_Buckets.bucket( 0, pNode );
961 static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor )
963 return nBucketCount * nLoadFactor;
966 void inc_item_count()
968 size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed);
969 if ( ++m_ItemCounter <= nMaxCount )
972 size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed);
973 const size_t nBucketCount = static_cast<size_t>(1) << sz;
974 if ( nBucketCount < m_Buckets.capacity() ) {
975 // we may grow the bucket table
976 const size_t nLoadFactor = m_Buckets.load_factor();
977 if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
978 return; // someone already have updated m_nBucketCountLog2, so stop here
980 m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
981 memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
982 m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
985 m_nMaxItemCount.store( std::numeric_limits<size_t>::max(), memory_model::memory_order_relaxed );
988 template <typename Q, typename Compare, typename Func>
989 bool find_( Q& val, Compare cmp, Func f )
991 size_t nHash = hash_value( val );
992 split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
993 aux_node_type * pHead = get_bucket( nHash );
994 assert( pHead != nullptr );
996 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
997 [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
1000 template <typename Q, typename Compare>
1001 bool find_value( Q const& val, Compare cmp )
1003 size_t nHash = hash_value( val );
1004 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1005 aux_node_type * pHead = get_bucket( nHash );
1006 assert( pHead != nullptr );
1008 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
1011 template <typename Q, typename Compare>
1012 raw_ptr get_( Q const& val, Compare cmp )
1014 size_t nHash = hash_value( val );
1015 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1016 aux_node_type * pHead = get_bucket( nHash );
1017 assert( pHead != nullptr );
1019 raw_ptr p = m_List.get_at( pHead, sv, cmp );
1020 m_Stat.onFind( !!p );
1024 template <typename Q, typename Compare>
1025 value_type * extract_( Q const& val, Compare cmp )
1027 size_t nHash = hash_value( val );
1028 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1029 aux_node_type * pHead = get_bucket( nHash );
1030 assert( pHead != nullptr );
1032 value_type * pNode = m_List.extract_at( pHead, sv, cmp );
1035 m_Stat.onExtractSuccess();
1038 m_Stat.onExtractFailed();
1042 template <typename Q, typename Less>
1043 value_type * extract_with_( Q const& val, Less pred )
1046 return extract_( val, typename wrapped_ordered_list::template make_compare_from_less<Less>());
1049 template <typename Q, typename Compare>
1050 bool erase_( const Q& val, Compare cmp )
1052 size_t nHash = hash_value( val );
1053 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1054 aux_node_type * pHead = get_bucket( nHash );
1055 assert( pHead != nullptr );
1057 if ( m_List.erase_at( pHead, sv, cmp ) ) {
1059 m_Stat.onEraseSuccess();
1062 m_Stat.onEraseFailed();
1066 template <typename Q, typename Compare, typename Func>
1067 bool erase_( Q const& val, Compare cmp, Func f )
1069 size_t nHash = hash_value( val );
1070 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1071 aux_node_type * pHead = get_bucket( nHash );
1072 assert( pHead != nullptr );
1074 if ( m_List.erase_at( pHead, sv, cmp, f )) {
1076 m_Stat.onEraseSuccess();
1079 m_Stat.onEraseFailed();
1086 typedef typename cds::details::type_padding< bucket_table, traits::padding >::type padded_bucket_table;
1087 padded_bucket_table m_Buckets; ///< bucket table
1089 typedef typename cds::details::type_padding< ordered_list_wrapper, traits::padding>::type padded_ordered_list;
1090 padded_ordered_list m_List; ///< Ordered list containing split-list items
1092 atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
1093 atomics::atomic<size_t> m_nMaxItemCount; ///< number of items container can hold, before we have to resize
1094 item_counter m_ItemCounter; ///< Item counter
1095 hash m_HashFunctor; ///< Hash functor
1096 stat m_Stat; ///< Internal statistics accumulator
1100 }} // namespace cds::intrusive
1102 #endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H