2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
32 #define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
36 #include <cds/intrusive/details/split_list_base.h>
37 #include <cds/details/binary_functor_wrapper.h>
38 #include <cds/details/type_padding.h>
40 namespace cds { namespace intrusive {
42 /// Split-ordered list RCU specialization
43 /** @ingroup cds_intrusive_map
44 \anchor cds_intrusive_SplitListSet_rcu
46 Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see
47 - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables"
48 - [2008] Nir Shavit "The Art of Multiprocessor Programming"
50 The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original
51 recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets
52 without moving an item on resizing, see \ref cds_SplitList_algo_desc "short algo description".
56 Template parameters are:
57 - \p RCU - one of \ref cds_urcu_gc "RCU type"
58 - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList.
59 The intrusive ordered list implementation specifies the type \p T stored in the hash-set,
60 the comparing functor for the type \p T and other features specific for the ordered list.
61 - \p Traits - set traits, default isd \p split_list::traits.
62 Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction.
64 @note About required features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
67 Before including <tt><cds/intrusive/split_list_rcu.h></tt> you should include appropriate RCU header file,
68 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
69 For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" and
70 MichaelList-based split-list you should include:
72 #include <cds/urcu/general_buffered.h>
73 #include <cds/intrusive/michael_list_rcu.h>
74 #include <cds/intrusive/split_list_rcu.h>
76 // Declare Michael's list for type Foo and default traits:
77 typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo > rcu_michael_list;
79 // Declare split-list based on rcu_michael_list
80 typedef cds::intrusive::SplitListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, rcu_michael_list > rcu_split_list;
87 # ifdef CDS_DOXYGEN_INVOKED
88 class Traits = split_list::traits
93 class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits >
96 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
97 typedef Traits traits; ///< Traits template parameters
99 /// Hash functor for \ref value_type and all its derivatives that you use
100 typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
104 typedef split_list::details::rebind_list_traits<OrderedList, traits> wrapped_ordered_list;
108 # ifdef CDS_DOXYGEN_INVOKED
109 typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list
111 typedef typename wrapped_ordered_list::result ordered_list;
113 typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list
114 typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor
115 typedef typename ordered_list::disposer disposer; ///< Node disposer functor
116 typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
117 typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
118 typedef typename ordered_list::raw_ptr raw_ptr; ///< pointer to the node for \p get() function
119 /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
120 static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
122 typedef typename traits::item_counter item_counter; ///< Item counter type
123 typedef typename traits::back_off back_off; ///< back-off strategy for spinning
124 typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
125 typedef typename traits::stat stat; ///< Internal statistics
127 // GC and OrderedList::gc must be the same
128 static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
130 // atomicity::empty_item_counter is not allowed as a item counter
131 static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
132 "cds::atomicity::empty_item_counter is not allowed as a item counter");
135 typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list
136 typedef split_list::node<list_node_type> node_type; ///< split-list node type
137 typedef node_type aux_node_type; ///< dummy node type
139 /// Split-list node traits
141 This traits is intended for converting between underlying ordered list node type \ref list_node_type
142 and split-list node type \ref node_type
144 typedef split_list::node_traits<typename ordered_list::node_traits> node_traits;
147 /// Bucket table implementation
148 typedef typename split_list::details::bucket_table_selector<
149 traits::dynamic_bucket_table
152 , opt::allocator< typename traits::allocator >
153 , opt::memory_model< memory_model >
154 >::type bucket_table;
160 /// Ordered list wrapper to access protected members of OrderedList
161 class ordered_list_wrapper: public ordered_list
163 typedef ordered_list base_class;
164 typedef typename base_class::auxiliary_head bucket_head_type;
167 bool insert_at( aux_node_type * pHead, value_type& val )
169 assert( pHead != nullptr );
170 bucket_head_type h(pHead);
171 return base_class::insert_at( h, val );
174 template <typename Func>
175 bool insert_at( aux_node_type * pHead, value_type& val, Func f )
177 assert( pHead != nullptr );
178 bucket_head_type h(pHead);
179 return base_class::insert_at( h, val, f );
182 template <typename Func>
183 std::pair<bool, bool> update_at( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert )
185 assert( pHead != nullptr );
186 bucket_head_type h(pHead);
187 return base_class::update_at( h, val, func, bAllowInsert );
190 bool unlink_at( aux_node_type * pHead, value_type& val )
192 assert( pHead != nullptr );
193 bucket_head_type h(pHead);
194 return base_class::unlink_at( h, val );
197 template <typename Q, typename Compare, typename Func>
198 bool erase_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
200 assert( pHead != nullptr );
201 bucket_head_type h(pHead);
202 return base_class::erase_at( h, val, cmp, f );
205 template <typename Q, typename Compare>
206 bool erase_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
208 assert( pHead != nullptr );
209 bucket_head_type h(pHead);
210 return base_class::erase_at( h, val, cmp );
213 template <typename Q, typename Compare>
214 value_type * extract_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
216 assert( pHead != nullptr );
217 bucket_head_type h(pHead);
218 return base_class::extract_at( h, val, cmp );
221 template <typename Q, typename Compare, typename Func>
222 bool find_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f )
224 assert( pHead != nullptr );
225 bucket_head_type h(pHead);
226 return base_class::find_at( h, val, cmp, f );
229 template <typename Q, typename Compare>
230 bool find_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp )
232 assert( pHead != nullptr );
233 bucket_head_type h(pHead);
234 return base_class::find_at( h, val, cmp );
237 template <typename Q, typename Compare>
238 raw_ptr get_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
240 assert( pHead != nullptr );
241 bucket_head_type h(pHead);
242 return base_class::get_at( h, val, cmp );
245 bool insert_aux_node( aux_node_type * pNode )
247 return base_class::insert_aux_node( pNode );
249 bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode )
251 bucket_head_type h(pHead);
252 return base_class::insert_aux_node( h, pNode );
256 template <typename Less>
257 struct less_wrapper: public cds::opt::details::make_comparator_from_less<Less>
259 typedef cds::opt::details::make_comparator_from_less<Less> base_wrapper;
261 template <typename Q1, typename Q2>
262 int operator()( split_list::details::search_value_type<Q1> const& v1, Q2 const& v2 ) const
264 return base_wrapper::operator()( v1.val, v2 );
267 template <typename Q1, typename Q2>
268 int operator()( Q1 const& v1, split_list::details::search_value_type<Q2> const& v2 ) const
270 return base_wrapper::operator()( v1, v2.val );
276 /// Initialize split-ordered list of default capacity
278 The default capacity is defined in bucket table constructor.
279 See split_list::expandable_bucket_table, split_list::static_ducket_table
280 which selects by split_list::dynamic_bucket_table option.
283 : m_nBucketCountLog2(1)
284 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
289 /// Initialize split-ordered list
291 size_t nItemCount ///< estimate average of item count
292 , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1.
294 : m_Buckets( nItemCount, nLoadFactor )
295 , m_nBucketCountLog2(1)
296 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
301 /// Destroys split-list
311 The function inserts \p val in the set if it does not contain
312 an item with key equal to \p val.
314 The function makes RCU lock internally.
316 Returns \p true if \p val is placed into the set, \p false otherwise.
318 bool insert( value_type& val )
320 size_t nHash = hash_value( val );
321 aux_node_type * pHead = get_bucket( nHash );
322 assert( pHead != nullptr );
324 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
326 if ( m_List.insert_at( pHead, val )) {
328 m_Stat.onInsertSuccess();
331 m_Stat.onInsertFailed();
337 This function is intended for derived non-intrusive containers.
339 The function allows to split creating of new item into two part:
340 - create item with key only
341 - insert new item into the set
342 - if inserting is success, calls \p f functor to initialize value-field of \p val.
344 The functor signature is:
346 void func( value_type& val );
348 where \p val is the item inserted.
350 The function makes RCU lock internally.
352 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
353 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
356 template <typename Func>
357 bool insert( value_type& val, Func f )
359 size_t nHash = hash_value( val );
360 aux_node_type * pHead = get_bucket( nHash );
361 assert( pHead != nullptr );
363 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
365 if ( m_List.insert_at( pHead, val, f )) {
367 m_Stat.onInsertSuccess();
370 m_Stat.onInsertFailed();
376 The operation performs inserting or changing data with lock-free manner.
378 If the item \p val is not found in the set, then \p val is inserted
379 iff \p bAllowInsert is \p true.
380 Otherwise, the functor \p func is called with item found.
381 The functor signature is:
383 void func( bool bNew, value_type& item, value_type& val );
386 - \p bNew - \p true if the item has been inserted, \p false otherwise
387 - \p item - item of the set
388 - \p val - argument \p val passed into the \p update() function
389 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
390 refers to the same stuff.
392 The functor may change non-key fields of the \p item.
394 The function applies RCU lock internally.
396 Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
397 \p second is \p true if new item has been added or \p false if the item with \p key
398 already is in the list.
400 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
401 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
404 template <typename Func>
405 std::pair<bool, bool> update( value_type& val, Func func, bool bAllowInsert = true )
407 size_t nHash = hash_value( val );
408 aux_node_type * pHead = get_bucket( nHash );
409 assert( pHead != nullptr );
411 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
413 std::pair<bool, bool> bRet = m_List.update_at( pHead, val, func, bAllowInsert );
414 if ( bRet.first && bRet.second ) {
416 m_Stat.onUpdateNew();
419 m_Stat.onUpdateExist();
423 template <typename Func>
424 CDS_DEPRECATED("ensure() is deprecated, use update()")
425 std::pair<bool, bool> ensure( value_type& val, Func func )
427 return update( val, func, true );
431 /// Unlinks the item \p val from the set
433 The function searches the item \p val in the set and unlinks it from the set
434 if it is found and is equal to \p val.
436 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
437 and deletes the item found. \p unlink finds an item by key and deletes it
438 only if \p val is an item of that set, i.e. the pointer to item found
439 is equal to <tt> &val </tt>.
441 RCU \p synchronize method can be called, therefore, RCU should not be locked.
443 The function returns \p true if success and \p false otherwise.
445 bool unlink( value_type& val )
447 size_t nHash = hash_value( val );
448 aux_node_type * pHead = get_bucket( nHash );
449 assert( pHead != nullptr );
451 if ( m_List.unlink_at( pHead, val ) ) {
453 m_Stat.onEraseSuccess();
456 m_Stat.onEraseFailed();
460 /// Deletes the item from the set
461 /** \anchor cds_intrusive_SplitListSet_rcu_erase
462 The function searches an item with key equal to \p key in the set,
463 unlinks it from the set, and returns \p true.
464 If the item with key equal to \p key is not found the function return \p false.
466 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
467 and deletes the item found. \p unlink finds an item by key and deletes it
468 only if \p key is an item of that set, i.e. the pointer to item found
469 is equal to <tt> &key </tt>.
471 RCU \p synchronize method can be called, therefore, RCU should not be locked.
473 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
475 template <typename Q>
476 bool erase( Q const& key )
478 return erase_( key, key_comparator() );
481 /// Deletes the item from the set using \p pred for searching
483 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase "erase(Q const&)"
484 but \p cmp is used for key compare.
485 \p Less has the interface like \p std::less.
486 \p pred must imply the same element order as the comparator used for building the set.
488 template <typename Q, typename Less>
489 bool erase_with( Q const& key, Less pred )
492 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
495 /// Deletes the item from the set
496 /** \anchor cds_intrusive_SplitListSet_rcu_erase_func
497 The function searches an item with key equal to \p key in the set,
498 call \p f functor with item found, unlinks it from the set, and returns \p true.
499 The \ref disposer specified by \p OrderedList class template parameter is called
500 by garbage collector \p GC asynchronously.
502 The \p Func interface is
505 void operator()( value_type const& item );
509 If the item with key equal to \p key is not found the function return \p false.
511 RCU \p synchronize method can be called, therefore, RCU should not be locked.
513 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
515 template <typename Q, typename Func>
516 bool erase( Q const& key, Func f )
518 return erase_( key, key_comparator(), f );
521 /// Deletes the item from the set using \p pred for searching
523 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)"
524 but \p cmp is used for key compare.
525 \p Less has the interface like \p std::less.
526 \p pred must imply the same element order as the comparator used for building the set.
528 template <typename Q, typename Less, typename Func>
529 bool erase_with( Q const& key, Less pred, Func f )
532 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
535 /// Extracts an item from the set
536 /** \anchor cds_intrusive_SplitListSet_rcu_extract
537 The function searches an item with key equal to \p key in the set,
538 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
539 If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr.
541 Depends on \p bucket_type you should or should not lock RCU before calling of this function:
542 - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked
543 - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked
544 See ordered list implementation for details.
547 typedef cds::urcu::gc< general_buffered<> > rcu;
548 typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list;
549 typedef cds::intrusive::SplitListSet< rcu, rcu_michael_list, foo_traits > rcu_splitlist_set;
551 rcu_splitlist_set theSet;
554 rcu_splitlist_set::exempt_ptr p;
556 // For MichaelList we should not lock RCU
558 // Now, you can apply extract function
559 // Note that you must not delete the item found inside the RCU lock
560 p = theList.extract( 10 );
562 // do something with p
566 // We may safely release p here
567 // release() passes the pointer to RCU reclamation cycle:
568 // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list.
572 template <typename Q>
573 exempt_ptr extract( Q const& key )
575 return exempt_ptr(extract_( key, key_comparator() ));
578 /// Extracts an item from the set using \p pred for searching
580 The function is an analog of \p extract(Q const&) but \p pred is used for key compare.
581 \p Less functor has the interface like \p std::less.
582 \p pred must imply the same element order as the comparator used for building the set.
584 template <typename Q, typename Less>
585 exempt_ptr extract_with( Q const& key, Less pred )
587 return exempt_ptr( extract_with_( key, pred ));
590 /// Finds the key \p key
591 /** \anchor cds_intrusive_SplitListSet_rcu_find_func
592 The function searches the item with key equal to \p key and calls the functor \p f for item found.
593 The interface of \p Func functor is:
596 void operator()( value_type& item, Q& key );
599 where \p item is the item found, \p key is the <tt>find</tt> function argument.
601 The functor can change non-key fields of \p item. Note that the functor is only guarantee
602 that \p item cannot be disposed during functor is executing.
603 The functor does not serialize simultaneous access to the set \p item. If such access is
604 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
606 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
607 can modify both arguments.
609 Note the hash functor specified for class \p Traits template parameter
610 should accept a parameter of type \p Q that can be not the same as \p value_type.
612 The function applies RCU lock internally.
614 The function returns \p true if \p key is found, \p false otherwise.
616 template <typename Q, typename Func>
617 bool find( Q& key, Func f )
619 return find_( key, key_comparator(), f );
622 template <typename Q, typename Func>
623 bool find( Q const& key, Func f )
625 return find_( key, key_comparator(), f );
629 /// Finds the key \p key with \p pred predicate for comparing
631 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)"
632 but \p cmp is used for key compare.
633 \p Less has the interface like \p std::less.
634 \p cmp must imply the same element order as the comparator used for building the set.
636 template <typename Q, typename Less, typename Func>
637 bool find_with( Q& key, Less pred, Func f )
640 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
643 template <typename Q, typename Less, typename Func>
644 bool find_with( Q const& key, Less pred, Func f )
647 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
651 /// Checks whether the set contains \p key
653 The function searches the item with key equal to \p key
654 and returns \p true if it is found, and \p false otherwise.
656 Note the hash functor specified for class \p Traits template parameter
657 should accept a parameter of type \p Q that can be not the same as \p value_type.
658 Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing.
660 template <typename Q>
661 bool contains( Q const& key )
663 return find_value( key, key_comparator() );
666 template <typename Q>
667 CDS_DEPRECATED("deprecated, use contains()")
668 bool find( Q const& key )
670 return contains( key );
674 /// Checks whether the set contains \p key using \p pred predicate for searching
676 The function is an analog of <tt>contains( key )</tt> but \p pred is used for key comparing.
677 \p Less functor has the interface like \p std::less.
678 \p Less must imply the same element order as the comparator used for building the list.
680 template <typename Q, typename Less>
681 bool contains( Q const& key, Less pred )
684 return find_value( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
687 template <typename Q, typename Less>
688 CDS_DEPRECATED("deprecated, use contains()")
689 bool find_with( Q const& key, Less pred )
691 return contains( key, pred );
695 /// Finds the key \p key and return the item found
696 /** \anchor cds_intrusive_SplitListSet_rcu_get
697 The function searches the item with key equal to \p key and returns the pointer to item found.
698 If \p key is not found it returns \p nullptr.
700 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
702 RCU should be locked before call of this function.
703 Returned item is valid only while RCU is locked:
705 typedef cds::intrusive::SplitListSet< your_template_parameters > set_class;
708 typename set_class::raw_ptr rp;
711 hash_set::rcu_lock lock;
713 rp = theSet.get( 5 );
718 // Unlock RCU by rcu_lock destructor
719 // rp can be retired by disposer at any time after RCU has been unlocked
723 template <typename Q>
724 raw_ptr get( Q const& key )
726 return get_( key, key_comparator() );
729 /// Finds the key \p key and return the item found
731 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)"
732 but \p pred is used for comparing the keys.
734 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
736 \p pred must imply the same element order as the comparator used for building the set.
738 template <typename Q, typename Less>
739 raw_ptr get_with( Q const& key, Less pred )
742 return get_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
746 /// Returns item count in the set
749 return m_ItemCounter;
752 /// Checks if the set is empty
754 Emptiness is checked by item counting: if item count is zero then the set is empty.
755 Thus, the correct item counting feature is an important part of split-list set implementation.
762 /// Clears the set (not atomic)
765 iterator it = begin();
766 while ( it != end() ) {
774 /// Returns internal statistics
775 stat const& statistics() const
782 template <bool IsConst>
784 :public split_list::details::iterator_type<node_traits, ordered_list, IsConst>
786 typedef split_list::details::iterator_type<node_traits, ordered_list, IsConst> iterator_base_class;
787 typedef typename iterator_base_class::list_iterator list_iterator;
790 : iterator_base_class()
793 iterator_type( iterator_type const& src )
794 : iterator_base_class( src )
797 // This ctor should be protected...
798 iterator_type( list_iterator itCur, list_iterator itEnd )
799 : iterator_base_class( itCur, itEnd )
805 ///@name Forward iterators (thread-safe under RCU lock)
809 The forward iterator for a split-list has some features:
810 - it has no post-increment operator
811 - it depends on iterator of underlying \p OrderedList
813 You may safely use iterators in multi-threaded environment only under RCU lock.
814 Otherwise, a crash is possible if another thread deletes the element the iterator points to.
816 typedef iterator_type<false> iterator;
818 /// Const forward iterator
820 For iterator's features and requirements see \ref iterator
822 typedef iterator_type<true> const_iterator;
824 /// Returns a forward iterator addressing the first element in a split-list
826 For empty list \code begin() == end() \endcode
830 return iterator( m_List.begin(), m_List.end() );
833 /// Returns an iterator that addresses the location succeeding the last element in a split-list
835 Do not use the value returned by <tt>end</tt> function to access any item.
837 The returned value can be used only to control reaching the end of the split-list.
838 For empty list \code begin() == end() \endcode
842 return iterator( m_List.end(), m_List.end() );
845 /// Returns a forward const iterator addressing the first element in a split-list
846 const_iterator begin() const
850 /// Returns a forward const iterator addressing the first element in a split-list
851 const_iterator cbegin() const
853 return const_iterator( m_List.cbegin(), m_List.cend() );
856 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
857 const_iterator end() const
861 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
862 const_iterator cend() const
864 return const_iterator( m_List.cend(), m_List.cend() );
870 aux_node_type * alloc_aux_node( size_t nHash )
872 m_Stat.onHeadNodeAllocated();
873 aux_node_type* p = m_Buckets.alloc_aux_node();
879 void free_aux_node( aux_node_type * p )
881 m_Buckets.free_aux_node( p );
882 m_Stat.onHeadNodeFreed();
885 /// Calculates hash value of \p key
886 template <typename Q>
887 size_t hash_value( Q const& key ) const
889 return m_HashFunctor( key );
892 size_t bucket_no( size_t nHash ) const
894 return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 );
897 static size_t parent_bucket( size_t nBucket )
899 assert( nBucket > 0 );
900 return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
903 aux_node_type * init_bucket( size_t nBucket )
905 assert( nBucket > 0 );
906 size_t nParent = parent_bucket( nBucket );
908 aux_node_type * pParentBucket = m_Buckets.bucket( nParent );
909 if ( pParentBucket == nullptr ) {
910 pParentBucket = init_bucket( nParent );
911 m_Stat.onRecursiveInitBucket();
914 assert( pParentBucket != nullptr );
916 // Allocate a dummy node for new bucket
918 aux_node_type * pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
919 if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
920 m_Buckets.bucket( nBucket, pBucket );
921 m_Stat.onNewBucket();
924 free_aux_node( pBucket );
927 // Another thread set the bucket. Wait while it done
929 // In this point, we must wait while nBucket is empty.
930 // The compiler can decide that waiting loop can be "optimized" (stripped)
931 // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
933 m_Stat.onBucketInitContenton();
936 aux_node_type volatile * p = m_Buckets.bucket( nBucket );
938 return const_cast<aux_node_type *>( p );
940 m_Stat.onBusyWaitBucketInit();
944 aux_node_type * get_bucket( size_t nHash )
946 size_t nBucket = bucket_no( nHash );
948 aux_node_type * pHead = m_Buckets.bucket( nBucket );
949 if ( pHead == nullptr )
950 pHead = init_bucket( nBucket );
952 assert( pHead->is_dummy() );
959 // Initialize bucket 0
960 aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ );
962 // insert_aux_node cannot return false for empty list
963 CDS_VERIFY( m_List.insert_aux_node( pNode ));
965 m_Buckets.bucket( 0, pNode );
968 static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor )
970 return nBucketCount * nLoadFactor;
973 void inc_item_count()
975 size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed);
976 if ( ++m_ItemCounter <= nMaxCount )
979 size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed);
980 const size_t nBucketCount = static_cast<size_t>(1) << sz;
981 if ( nBucketCount < m_Buckets.capacity() ) {
982 // we may grow the bucket table
983 const size_t nLoadFactor = m_Buckets.load_factor();
984 if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
985 return; // someone already have updated m_nBucketCountLog2, so stop here
987 m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
988 memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
989 m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
992 m_nMaxItemCount.store( std::numeric_limits<size_t>::max(), memory_model::memory_order_relaxed );
995 template <typename Q, typename Compare, typename Func>
996 bool find_( Q& val, Compare cmp, Func f )
998 size_t nHash = hash_value( val );
999 split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
1000 aux_node_type * pHead = get_bucket( nHash );
1001 assert( pHead != nullptr );
1003 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
1004 [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
1007 template <typename Q, typename Compare>
1008 bool find_value( Q const& val, Compare cmp )
1010 size_t nHash = hash_value( val );
1011 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1012 aux_node_type * pHead = get_bucket( nHash );
1013 assert( pHead != nullptr );
1015 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
1018 template <typename Q, typename Compare>
1019 raw_ptr get_( Q const& val, Compare cmp )
1021 size_t nHash = hash_value( val );
1022 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1023 aux_node_type * pHead = get_bucket( nHash );
1024 assert( pHead != nullptr );
1026 raw_ptr p = m_List.get_at( pHead, sv, cmp );
1027 m_Stat.onFind( !!p );
1031 template <typename Q, typename Compare>
1032 value_type * extract_( Q const& val, Compare cmp )
1034 size_t nHash = hash_value( val );
1035 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1036 aux_node_type * pHead = get_bucket( nHash );
1037 assert( pHead != nullptr );
1039 value_type * pNode = m_List.extract_at( pHead, sv, cmp );
1042 m_Stat.onExtractSuccess();
1045 m_Stat.onExtractFailed();
1049 template <typename Q, typename Less>
1050 value_type * extract_with_( Q const& val, Less pred )
1053 return extract_( val, typename wrapped_ordered_list::template make_compare_from_less<Less>());
1056 template <typename Q, typename Compare>
1057 bool erase_( const Q& val, Compare cmp )
1059 size_t nHash = hash_value( val );
1060 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1061 aux_node_type * pHead = get_bucket( nHash );
1062 assert( pHead != nullptr );
1064 if ( m_List.erase_at( pHead, sv, cmp ) ) {
1066 m_Stat.onEraseSuccess();
1069 m_Stat.onEraseFailed();
1073 template <typename Q, typename Compare, typename Func>
1074 bool erase_( Q const& val, Compare cmp, Func f )
1076 size_t nHash = hash_value( val );
1077 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1078 aux_node_type * pHead = get_bucket( nHash );
1079 assert( pHead != nullptr );
1081 if ( m_List.erase_at( pHead, sv, cmp, f )) {
1083 m_Stat.onEraseSuccess();
1086 m_Stat.onEraseFailed();
1093 typedef typename cds::details::type_padding< bucket_table, traits::padding >::type padded_bucket_table;
1094 padded_bucket_table m_Buckets; ///< bucket table
1096 typedef typename cds::details::type_padding< ordered_list_wrapper, traits::padding>::type padded_ordered_list;
1097 padded_ordered_list m_List; ///< Ordered list containing split-list items
1099 atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
1100 atomics::atomic<size_t> m_nMaxItemCount; ///< number of items container can hold, before we have to resize
1101 item_counter m_ItemCounter; ///< Item counter
1102 hash m_HashFunctor; ///< Hash functor
1103 stat m_Stat; ///< Internal statistics accumulator
1107 }} // namespace cds::intrusive
1109 #endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H