2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
32 #define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
36 #include <cds/intrusive/details/split_list_base.h>
37 #include <cds/details/binary_functor_wrapper.h>
38 #include <cds/details/type_padding.h>
40 namespace cds { namespace intrusive {
42 /// Split-ordered list RCU specialization
43 /** @ingroup cds_intrusive_map
44 \anchor cds_intrusive_SplitListSet_rcu
46 Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see
47 - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables"
48 - [2008] Nir Shavit "The Art of Multiprocessor Programming"
50 The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original
51 recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets
52 without moving an item on resizing, see \ref cds_SplitList_algo_desc "short algo description".
56 Template parameters are:
57 - \p RCU - one of \ref cds_urcu_gc "RCU type"
58 - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList.
59 The intrusive ordered list implementation specifies the type \p T stored in the hash-set,
60 the comparing functor for the type \p T and other features specific for the ordered list.
61 - \p Traits - set traits, default isd \p split_list::traits.
62 Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction.
64 @note About required features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
67 Before including <tt><cds/intrusive/split_list_rcu.h></tt> you should include appropriate RCU header file,
68 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
69 For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" and
70 MichaelList-based split-list you should include:
72 #include <cds/urcu/general_buffered.h>
73 #include <cds/intrusive/michael_list_rcu.h>
74 #include <cds/intrusive/split_list_rcu.h>
76 // Declare Michael's list for type Foo and default traits:
77 typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo > rcu_michael_list;
79 // Declare split-list based on rcu_michael_list
80 typedef cds::intrusive::SplitListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, rcu_michael_list > rcu_split_list;
87 # ifdef CDS_DOXYGEN_INVOKED
88 class Traits = split_list::traits
93 class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits >
96 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
97 typedef Traits traits; ///< Traits template parameters
99 /// Hash functor for \ref value_type and all its derivatives that you use
100 typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
104 typedef split_list::details::rebind_list_traits<OrderedList, traits> wrapped_ordered_list;
108 # ifdef CDS_DOXYGEN_INVOKED
109 typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list
111 typedef typename wrapped_ordered_list::result ordered_list;
113 typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list
114 typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor
115 typedef typename ordered_list::disposer disposer; ///< Node disposer functor
116 typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
117 typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
118 typedef typename ordered_list::raw_ptr raw_ptr; ///< pointer to the node for \p get() function
119 /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
120 static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
122 typedef typename traits::item_counter item_counter; ///< Item counter type
123 typedef typename traits::back_off back_off; ///< back-off strategy for spinning
124 typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
125 typedef typename traits::stat stat; ///< Internal statistics
127 // GC and OrderedList::gc must be the same
128 static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
130 // atomicity::empty_item_counter is not allowed as a item counter
131 static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
132 "cds::atomicity::empty_item_counter is not allowed as a item counter");
136 typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list
137 typedef split_list::node<list_node_type> node_type; ///< split-list node type
139 /// Split-list node traits
141 This traits is intended for converting between underlying ordered list node type \ref list_node_type
142 and split-list node type \ref node_type
144 typedef split_list::node_traits<typename ordered_list::node_traits> node_traits;
146 /// Bucket table implementation
147 typedef typename split_list::details::bucket_table_selector<
148 traits::dynamic_bucket_table
151 , opt::allocator< typename traits::allocator >
152 , opt::memory_model< memory_model >
153 >::type bucket_table;
155 typedef typename bucket_table::aux_node_type aux_node_type; ///< auxiliary node type
161 /// Ordered list wrapper to access protected members of OrderedList
162 class ordered_list_wrapper: public ordered_list
164 typedef ordered_list base_class;
165 typedef typename base_class::auxiliary_head bucket_head_type;
168 bool insert_at( aux_node_type * pHead, value_type& val )
170 assert( pHead != nullptr );
171 bucket_head_type h(pHead);
172 return base_class::insert_at( h, val );
175 template <typename Func>
176 bool insert_at( aux_node_type * pHead, value_type& val, Func f )
178 assert( pHead != nullptr );
179 bucket_head_type h(pHead);
180 return base_class::insert_at( h, val, f );
183 template <typename Func>
184 std::pair<bool, bool> update_at( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert )
186 assert( pHead != nullptr );
187 bucket_head_type h(pHead);
188 return base_class::update_at( h, val, func, bAllowInsert );
191 bool unlink_at( aux_node_type * pHead, value_type& val )
193 assert( pHead != nullptr );
194 bucket_head_type h(pHead);
195 return base_class::unlink_at( h, val );
198 template <typename Q, typename Compare, typename Func>
199 bool erase_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
201 assert( pHead != nullptr );
202 bucket_head_type h(pHead);
203 return base_class::erase_at( h, val, cmp, f );
206 template <typename Q, typename Compare>
207 bool erase_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
209 assert( pHead != nullptr );
210 bucket_head_type h(pHead);
211 return base_class::erase_at( h, val, cmp );
214 template <typename Q, typename Compare>
215 value_type * extract_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
217 assert( pHead != nullptr );
218 bucket_head_type h(pHead);
219 return base_class::extract_at( h, val, cmp );
222 template <typename Q, typename Compare, typename Func>
223 bool find_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f )
225 assert( pHead != nullptr );
226 bucket_head_type h(pHead);
227 return base_class::find_at( h, val, cmp, f );
230 template <typename Q, typename Compare>
231 bool find_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp )
233 assert( pHead != nullptr );
234 bucket_head_type h(pHead);
235 return base_class::find_at( h, val, cmp );
238 template <typename Q, typename Compare>
239 raw_ptr get_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
241 assert( pHead != nullptr );
242 bucket_head_type h(pHead);
243 return base_class::get_at( h, val, cmp );
246 bool insert_aux_node( aux_node_type * pNode )
248 return base_class::insert_aux_node( pNode );
250 bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode )
252 bucket_head_type h(pHead);
253 return base_class::insert_aux_node( h, pNode );
257 template <typename Less>
258 struct less_wrapper: public cds::opt::details::make_comparator_from_less<Less>
260 typedef cds::opt::details::make_comparator_from_less<Less> base_wrapper;
262 template <typename Q1, typename Q2>
263 int operator()( split_list::details::search_value_type<Q1> const& v1, Q2 const& v2 ) const
265 return base_wrapper::operator()( v1.val, v2 );
268 template <typename Q1, typename Q2>
269 int operator()( Q1 const& v1, split_list::details::search_value_type<Q2> const& v2 ) const
271 return base_wrapper::operator()( v1, v2.val );
277 /// Initialize split-ordered list of default capacity
279 The default capacity is defined in bucket table constructor.
280 See split_list::expandable_bucket_table, split_list::static_ducket_table
281 which selects by split_list::dynamic_bucket_table option.
284 : m_nBucketCountLog2(1)
285 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
290 /// Initialize split-ordered list
292 size_t nItemCount ///< estimate average of item count
293 , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1.
295 : m_Buckets( nItemCount, nLoadFactor )
296 , m_nBucketCountLog2(1)
297 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
302 /// Destroys split-list
312 The function inserts \p val in the set if it does not contain
313 an item with key equal to \p val.
315 The function makes RCU lock internally.
317 Returns \p true if \p val is placed into the set, \p false otherwise.
319 bool insert( value_type& val )
321 size_t nHash = hash_value( val );
322 aux_node_type * pHead = get_bucket( nHash );
323 assert( pHead != nullptr );
325 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
327 if ( m_List.insert_at( pHead, val )) {
329 m_Stat.onInsertSuccess();
332 m_Stat.onInsertFailed();
338 This function is intended for derived non-intrusive containers.
340 The function allows to split creating of new item into two part:
341 - create item with key only
342 - insert new item into the set
343 - if inserting is success, calls \p f functor to initialize value-field of \p val.
345 The functor signature is:
347 void func( value_type& val );
349 where \p val is the item inserted.
351 The function makes RCU lock internally.
353 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
354 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
357 template <typename Func>
358 bool insert( value_type& val, Func f )
360 size_t nHash = hash_value( val );
361 aux_node_type * pHead = get_bucket( nHash );
362 assert( pHead != nullptr );
364 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
366 if ( m_List.insert_at( pHead, val, f )) {
368 m_Stat.onInsertSuccess();
371 m_Stat.onInsertFailed();
377 The operation performs inserting or changing data with lock-free manner.
379 If the item \p val is not found in the set, then \p val is inserted
380 iff \p bAllowInsert is \p true.
381 Otherwise, the functor \p func is called with item found.
382 The functor signature is:
384 void func( bool bNew, value_type& item, value_type& val );
387 - \p bNew - \p true if the item has been inserted, \p false otherwise
388 - \p item - item of the set
389 - \p val - argument \p val passed into the \p update() function
390 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
391 refers to the same stuff.
393 The functor may change non-key fields of the \p item.
395 The function applies RCU lock internally.
397 Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
398 \p second is \p true if new item has been added or \p false if the item with \p key
399 already is in the list.
401 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
402 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
405 template <typename Func>
406 std::pair<bool, bool> update( value_type& val, Func func, bool bAllowInsert = true )
408 size_t nHash = hash_value( val );
409 aux_node_type * pHead = get_bucket( nHash );
410 assert( pHead != nullptr );
412 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
414 std::pair<bool, bool> bRet = m_List.update_at( pHead, val, func, bAllowInsert );
415 if ( bRet.first && bRet.second ) {
417 m_Stat.onUpdateNew();
420 m_Stat.onUpdateExist();
424 template <typename Func>
425 CDS_DEPRECATED("ensure() is deprecated, use update()")
426 std::pair<bool, bool> ensure( value_type& val, Func func )
428 return update( val, func, true );
432 /// Unlinks the item \p val from the set
434 The function searches the item \p val in the set and unlinks it from the set
435 if it is found and is equal to \p val.
437 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
438 and deletes the item found. \p unlink finds an item by key and deletes it
439 only if \p val is an item of that set, i.e. the pointer to item found
440 is equal to <tt> &val </tt>.
442 RCU \p synchronize method can be called, therefore, RCU should not be locked.
444 The function returns \p true if success and \p false otherwise.
446 bool unlink( value_type& val )
448 size_t nHash = hash_value( val );
449 aux_node_type * pHead = get_bucket( nHash );
450 assert( pHead != nullptr );
452 if ( m_List.unlink_at( pHead, val ) ) {
454 m_Stat.onEraseSuccess();
457 m_Stat.onEraseFailed();
461 /// Deletes the item from the set
462 /** \anchor cds_intrusive_SplitListSet_rcu_erase
463 The function searches an item with key equal to \p key in the set,
464 unlinks it from the set, and returns \p true.
465 If the item with key equal to \p key is not found the function return \p false.
467 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
468 and deletes the item found. \p unlink finds an item by key and deletes it
469 only if \p key is an item of that set, i.e. the pointer to item found
470 is equal to <tt> &key </tt>.
472 RCU \p synchronize method can be called, therefore, RCU should not be locked.
474 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
476 template <typename Q>
477 bool erase( Q const& key )
479 return erase_( key, key_comparator() );
482 /// Deletes the item from the set using \p pred for searching
484 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase "erase(Q const&)"
485 but \p cmp is used for key compare.
486 \p Less has the interface like \p std::less.
487 \p pred must imply the same element order as the comparator used for building the set.
489 template <typename Q, typename Less>
490 bool erase_with( Q const& key, Less pred )
493 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
496 /// Deletes the item from the set
497 /** \anchor cds_intrusive_SplitListSet_rcu_erase_func
498 The function searches an item with key equal to \p key in the set,
499 call \p f functor with item found, unlinks it from the set, and returns \p true.
500 The \ref disposer specified by \p OrderedList class template parameter is called
501 by garbage collector \p GC asynchronously.
503 The \p Func interface is
506 void operator()( value_type const& item );
510 If the item with key equal to \p key is not found the function return \p false.
512 RCU \p synchronize method can be called, therefore, RCU should not be locked.
514 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
516 template <typename Q, typename Func>
517 bool erase( Q const& key, Func f )
519 return erase_( key, key_comparator(), f );
522 /// Deletes the item from the set using \p pred for searching
524 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)"
525 but \p cmp is used for key compare.
526 \p Less has the interface like \p std::less.
527 \p pred must imply the same element order as the comparator used for building the set.
529 template <typename Q, typename Less, typename Func>
530 bool erase_with( Q const& key, Less pred, Func f )
533 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
536 /// Extracts an item from the set
537 /** \anchor cds_intrusive_SplitListSet_rcu_extract
538 The function searches an item with key equal to \p key in the set,
539 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
540 If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr.
542 Depends on \p bucket_type you should or should not lock RCU before calling of this function:
543 - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked
544 - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked
545 See ordered list implementation for details.
548 typedef cds::urcu::gc< general_buffered<> > rcu;
549 typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list;
550 typedef cds::intrusive::SplitListSet< rcu, rcu_michael_list, foo_traits > rcu_splitlist_set;
552 rcu_splitlist_set theSet;
555 rcu_splitlist_set::exempt_ptr p;
557 // For MichaelList we should not lock RCU
559 // Now, you can apply extract function
560 // Note that you must not delete the item found inside the RCU lock
561 p = theList.extract( 10 );
563 // do something with p
567 // We may safely release p here
568 // release() passes the pointer to RCU reclamation cycle:
569 // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list.
573 template <typename Q>
574 exempt_ptr extract( Q const& key )
576 return exempt_ptr(extract_( key, key_comparator() ));
579 /// Extracts an item from the set using \p pred for searching
581 The function is an analog of \p extract(Q const&) but \p pred is used for key compare.
582 \p Less functor has the interface like \p std::less.
583 \p pred must imply the same element order as the comparator used for building the set.
585 template <typename Q, typename Less>
586 exempt_ptr extract_with( Q const& key, Less pred )
588 return exempt_ptr( extract_with_( key, pred ));
591 /// Finds the key \p key
592 /** \anchor cds_intrusive_SplitListSet_rcu_find_func
593 The function searches the item with key equal to \p key and calls the functor \p f for item found.
594 The interface of \p Func functor is:
597 void operator()( value_type& item, Q& key );
600 where \p item is the item found, \p key is the <tt>find</tt> function argument.
602 The functor can change non-key fields of \p item. Note that the functor is only guarantee
603 that \p item cannot be disposed during functor is executing.
604 The functor does not serialize simultaneous access to the set \p item. If such access is
605 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
607 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
608 can modify both arguments.
610 Note the hash functor specified for class \p Traits template parameter
611 should accept a parameter of type \p Q that can be not the same as \p value_type.
613 The function applies RCU lock internally.
615 The function returns \p true if \p key is found, \p false otherwise.
617 template <typename Q, typename Func>
618 bool find( Q& key, Func f )
620 return find_( key, key_comparator(), f );
623 template <typename Q, typename Func>
624 bool find( Q const& key, Func f )
626 return find_( key, key_comparator(), f );
630 /// Finds the key \p key with \p pred predicate for comparing
632 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)"
633 but \p cmp is used for key compare.
634 \p Less has the interface like \p std::less.
635 \p cmp must imply the same element order as the comparator used for building the set.
637 template <typename Q, typename Less, typename Func>
638 bool find_with( Q& key, Less pred, Func f )
641 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
644 template <typename Q, typename Less, typename Func>
645 bool find_with( Q const& key, Less pred, Func f )
648 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
652 /// Checks whether the set contains \p key
654 The function searches the item with key equal to \p key
655 and returns \p true if it is found, and \p false otherwise.
657 Note the hash functor specified for class \p Traits template parameter
658 should accept a parameter of type \p Q that can be not the same as \p value_type.
659 Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing.
661 template <typename Q>
662 bool contains( Q const& key )
664 return find_value( key, key_comparator() );
667 template <typename Q>
668 CDS_DEPRECATED("deprecated, use contains()")
669 bool find( Q const& key )
671 return contains( key );
675 /// Checks whether the set contains \p key using \p pred predicate for searching
677 The function is an analog of <tt>contains( key )</tt> but \p pred is used for key comparing.
678 \p Less functor has the interface like \p std::less.
679 \p Less must imply the same element order as the comparator used for building the list.
681 template <typename Q, typename Less>
682 bool contains( Q const& key, Less pred )
685 return find_value( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
688 template <typename Q, typename Less>
689 CDS_DEPRECATED("deprecated, use contains()")
690 bool find_with( Q const& key, Less pred )
692 return contains( key, pred );
696 /// Finds the key \p key and return the item found
697 /** \anchor cds_intrusive_SplitListSet_rcu_get
698 The function searches the item with key equal to \p key and returns the pointer to item found.
699 If \p key is not found it returns \p nullptr.
701 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
703 RCU should be locked before call of this function.
704 Returned item is valid only while RCU is locked:
706 typedef cds::intrusive::SplitListSet< your_template_parameters > set_class;
709 typename set_class::raw_ptr rp;
712 hash_set::rcu_lock lock;
714 rp = theSet.get( 5 );
719 // Unlock RCU by rcu_lock destructor
720 // rp can be retired by disposer at any time after RCU has been unlocked
724 template <typename Q>
725 raw_ptr get( Q const& key )
727 return get_( key, key_comparator() );
730 /// Finds the key \p key and return the item found
732 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)"
733 but \p pred is used for comparing the keys.
735 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
737 \p pred must imply the same element order as the comparator used for building the set.
739 template <typename Q, typename Less>
740 raw_ptr get_with( Q const& key, Less pred )
743 return get_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
747 /// Returns item count in the set
750 return m_ItemCounter;
753 /// Checks if the set is empty
755 Emptiness is checked by item counting: if item count is zero then the set is empty.
756 Thus, the correct item counting feature is an important part of split-list set implementation.
763 /// Clears the set (not atomic)
766 iterator it = begin();
767 while ( it != end() ) {
775 /// Returns internal statistics
776 stat const& statistics() const
783 template <bool IsConst>
785 :public split_list::details::iterator_type<node_traits, ordered_list, IsConst>
787 typedef split_list::details::iterator_type<node_traits, ordered_list, IsConst> iterator_base_class;
788 typedef typename iterator_base_class::list_iterator list_iterator;
791 : iterator_base_class()
794 iterator_type( iterator_type const& src )
795 : iterator_base_class( src )
798 // This ctor should be protected...
799 iterator_type( list_iterator itCur, list_iterator itEnd )
800 : iterator_base_class( itCur, itEnd )
806 ///@name Forward iterators (thread-safe under RCU lock)
810 The forward iterator for a split-list has some features:
811 - it has no post-increment operator
812 - it depends on iterator of underlying \p OrderedList
814 You may safely use iterators in multi-threaded environment only under RCU lock.
815 Otherwise, a crash is possible if another thread deletes the element the iterator points to.
817 typedef iterator_type<false> iterator;
819 /// Const forward iterator
821 For iterator's features and requirements see \ref iterator
823 typedef iterator_type<true> const_iterator;
825 /// Returns a forward iterator addressing the first element in a split-list
827 For empty list \code begin() == end() \endcode
831 return iterator( m_List.begin(), m_List.end() );
834 /// Returns an iterator that addresses the location succeeding the last element in a split-list
836 Do not use the value returned by <tt>end</tt> function to access any item.
838 The returned value can be used only to control reaching the end of the split-list.
839 For empty list \code begin() == end() \endcode
843 return iterator( m_List.end(), m_List.end() );
846 /// Returns a forward const iterator addressing the first element in a split-list
847 const_iterator begin() const
851 /// Returns a forward const iterator addressing the first element in a split-list
852 const_iterator cbegin() const
854 return const_iterator( m_List.cbegin(), m_List.cend() );
857 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
858 const_iterator end() const
862 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
863 const_iterator cend() const
865 return const_iterator( m_List.cend(), m_List.cend() );
871 aux_node_type * alloc_aux_node( size_t nHash )
873 m_Stat.onHeadNodeAllocated();
874 aux_node_type* p = m_Buckets.alloc_aux_node();
880 void free_aux_node( aux_node_type * p )
882 m_Buckets.free_aux_node( p );
883 m_Stat.onHeadNodeFreed();
886 /// Calculates hash value of \p key
887 template <typename Q>
888 size_t hash_value( Q const& key ) const
890 return m_HashFunctor( key );
893 size_t bucket_no( size_t nHash ) const
895 return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 );
898 static size_t parent_bucket( size_t nBucket )
900 assert( nBucket > 0 );
901 return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
904 aux_node_type * init_bucket( size_t nBucket )
906 assert( nBucket > 0 );
907 size_t nParent = parent_bucket( nBucket );
909 aux_node_type * pParentBucket = m_Buckets.bucket( nParent );
910 if ( pParentBucket == nullptr ) {
911 pParentBucket = init_bucket( nParent );
912 m_Stat.onRecursiveInitBucket();
915 assert( pParentBucket != nullptr );
917 // Allocate a dummy node for new bucket
919 aux_node_type * pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
920 if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
921 m_Buckets.bucket( nBucket, pBucket );
922 m_Stat.onNewBucket();
925 free_aux_node( pBucket );
928 // Another thread set the bucket. Wait while it done
930 // In this point, we must wait while nBucket is empty.
931 // The compiler can decide that waiting loop can be "optimized" (stripped)
932 // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
934 m_Stat.onBucketInitContenton();
937 aux_node_type volatile * p = m_Buckets.bucket( nBucket );
939 return const_cast<aux_node_type *>( p );
941 m_Stat.onBusyWaitBucketInit();
945 aux_node_type * get_bucket( size_t nHash )
947 size_t nBucket = bucket_no( nHash );
949 aux_node_type * pHead = m_Buckets.bucket( nBucket );
950 if ( pHead == nullptr )
951 pHead = init_bucket( nBucket );
953 assert( pHead->is_dummy() );
960 // Initialize bucket 0
961 aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ );
963 // insert_aux_node cannot return false for empty list
964 CDS_VERIFY( m_List.insert_aux_node( pNode ));
966 m_Buckets.bucket( 0, pNode );
969 static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor )
971 return nBucketCount * nLoadFactor;
974 void inc_item_count()
976 size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed);
977 if ( ++m_ItemCounter <= nMaxCount )
980 size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed);
981 const size_t nBucketCount = static_cast<size_t>(1) << sz;
982 if ( nBucketCount < m_Buckets.capacity() ) {
983 // we may grow the bucket table
984 const size_t nLoadFactor = m_Buckets.load_factor();
985 if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
986 return; // someone already have updated m_nBucketCountLog2, so stop here
988 m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
989 memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
990 m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
993 m_nMaxItemCount.store( std::numeric_limits<size_t>::max(), memory_model::memory_order_relaxed );
996 template <typename Q, typename Compare, typename Func>
997 bool find_( Q& val, Compare cmp, Func f )
999 size_t nHash = hash_value( val );
1000 split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
1001 aux_node_type * pHead = get_bucket( nHash );
1002 assert( pHead != nullptr );
1004 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
1005 [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
1008 template <typename Q, typename Compare>
1009 bool find_value( Q const& val, Compare cmp )
1011 size_t nHash = hash_value( val );
1012 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1013 aux_node_type * pHead = get_bucket( nHash );
1014 assert( pHead != nullptr );
1016 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
1019 template <typename Q, typename Compare>
1020 raw_ptr get_( Q const& val, Compare cmp )
1022 size_t nHash = hash_value( val );
1023 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1024 aux_node_type * pHead = get_bucket( nHash );
1025 assert( pHead != nullptr );
1027 raw_ptr p = m_List.get_at( pHead, sv, cmp );
1028 m_Stat.onFind( !!p );
1032 template <typename Q, typename Compare>
1033 value_type * extract_( Q const& val, Compare cmp )
1035 size_t nHash = hash_value( val );
1036 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1037 aux_node_type * pHead = get_bucket( nHash );
1038 assert( pHead != nullptr );
1040 value_type * pNode = m_List.extract_at( pHead, sv, cmp );
1043 m_Stat.onExtractSuccess();
1046 m_Stat.onExtractFailed();
1050 template <typename Q, typename Less>
1051 value_type * extract_with_( Q const& val, Less pred )
1054 return extract_( val, typename wrapped_ordered_list::template make_compare_from_less<Less>());
1057 template <typename Q, typename Compare>
1058 bool erase_( const Q& val, Compare cmp )
1060 size_t nHash = hash_value( val );
1061 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1062 aux_node_type * pHead = get_bucket( nHash );
1063 assert( pHead != nullptr );
1065 if ( m_List.erase_at( pHead, sv, cmp ) ) {
1067 m_Stat.onEraseSuccess();
1070 m_Stat.onEraseFailed();
1074 template <typename Q, typename Compare, typename Func>
1075 bool erase_( Q const& val, Compare cmp, Func f )
1077 size_t nHash = hash_value( val );
1078 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
1079 aux_node_type * pHead = get_bucket( nHash );
1080 assert( pHead != nullptr );
1082 if ( m_List.erase_at( pHead, sv, cmp, f )) {
1084 m_Stat.onEraseSuccess();
1087 m_Stat.onEraseFailed();
1094 static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value;
1096 typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table;
1097 padded_bucket_table m_Buckets; ///< bucket table
1099 typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list;
1100 padded_ordered_list m_List; ///< Ordered list containing split-list items
1102 atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
1103 atomics::atomic<size_t> m_nMaxItemCount; ///< number of items container can hold, before we have to resize
1104 item_counter m_ItemCounter; ///< Item counter
1105 hash m_HashFunctor; ///< Hash functor
1106 stat m_Stat; ///< Internal statistics accumulator
1110 }} // namespace cds::intrusive
1112 #endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H