2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H
32 #define CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H
34 #include <functional> // std::ref
35 #include <iterator> // std::iterator_traits
38 #include <cds/intrusive/details/feldman_hashset_base.h>
39 #include <cds/details/allocator.h>
40 #include <cds/urcu/details/check_deadlock.h>
41 #include <cds/urcu/exempt_ptr.h>
42 #include <cds/intrusive/details/raw_ptr_disposer.h>
45 namespace cds { namespace intrusive {
46 /// Intrusive hash set based on multi-level array, \ref cds_urcu_desc "RCU" specialization
47 /** @ingroup cds_intrusive_map
48 @anchor cds_intrusive_FeldmanHashSet_rcu
51 - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays:
52 Wait-free Extensible Hash Maps"
54 See algorithm short description @ref cds_intrusive_FeldmanHashSet_hp "here"
56 @note Two important things you should keep in mind when you're using \p %FeldmanHashSet:
57 - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet.
58 Instead, for the strings you should use well-known hashing algorithms like <a href="https://en.wikipedia.org/wiki/Secure_Hash_Algorithm">SHA1, SHA2</a>,
59 <a href="https://en.wikipedia.org/wiki/MurmurHash">MurmurHash</a>, <a href="https://en.wikipedia.org/wiki/CityHash">CityHash</a>
60 or its successor <a href="https://code.google.com/p/farmhash/">FarmHash</a> and so on, which
61 converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet.
62 - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string,
63 have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key,
64 it maintains its fixed-size hash value.
67 - \p RCU - one of \ref cds_urcu_gc "RCU type"
68 - \p T - a value type to be stored in the set
69 - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction.
70 \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor"
71 to hash value of \p T. The set algorithm does not calculate that hash value.
73 @note Before including <tt><cds/intrusive/feldman_hashset_rcu.h></tt> you should include appropriate RCU header file,
74 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
76 The set supports @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional thread-safe iterators"
78 with some restrictions.
83 #ifdef CDS_DOXYGEN_INVOKED
84 class Traits = feldman_hashset::traits
89 class FeldmanHashSet< cds::urcu::gc< RCU >, T, Traits >: protected feldman_hashset::multilevel_array<T, Traits>
92 typedef feldman_hashset::multilevel_array<T, Traits> base_class;
96 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
97 typedef T value_type; ///< type of value stored in the set
98 typedef Traits traits; ///< Traits template parameter
100 typedef typename traits::hash_accessor hash_accessor; ///< Hash accessor functor
101 typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type
102 typedef typename traits::disposer disposer; ///< data node disposer
103 typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p traits::compare and \p traits::less options
105 typedef typename traits::item_counter item_counter; ///< Item counter type
106 typedef typename traits::node_allocator node_allocator; ///< Array node allocator
107 typedef typename traits::memory_model memory_model; ///< Memory model
108 typedef typename traits::back_off back_off; ///< Backoff strategy
109 typedef typename traits::stat stat; ///< Internal statistics type
110 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
111 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
112 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
114 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, disposer, void >; ///< pointer to extracted node
118 typedef typename base_class::node_ptr node_ptr;
119 typedef typename base_class::atomic_node_ptr atomic_node_ptr;
120 typedef typename base_class::array_node array_node;
121 typedef typename base_class::traverse_data traverse_data;
123 using base_class::to_array;
124 using base_class::to_node;
125 using base_class::stats;
126 using base_class::head;
127 using base_class::metrics;
129 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
134 item_counter m_ItemCounter; ///< Item counter
138 /// Creates empty set
140 @param head_bits: 2<sup>head_bits</sup> specifies the size of head array, minimum is 4.
141 @param array_bits: 2<sup>array_bits</sup> specifies the size of array node, minimum is 2.
143 Equation for \p head_bits and \p array_bits:
145 sizeof(hash_type) * 8 == head_bits + N * array_bits
147 where \p N is multi-level array depth.
149 FeldmanHashSet(size_t head_bits = 8, size_t array_bits = 4)
150 : base_class(head_bits, array_bits)
153 /// Destructs the set and frees all data
161 The function inserts \p val in the set if it does not contain
162 an item with that hash.
164 Returns \p true if \p val is placed into the set, \p false otherwise.
166 The function locks RCU internally.
168 bool insert( value_type& val )
170 return insert( val, [](value_type&) {} );
175 This function is intended for derived non-intrusive containers.
177 The function allows to split creating of new item into two part:
178 - create item with key only
179 - insert new item into the set
180 - if inserting is success, calls \p f functor to initialize \p val.
182 The functor signature is:
184 void func( value_type& val );
186 where \p val is the item inserted.
188 The user-defined functor is called only if the inserting is success.
190 The function locks RCU internally.
191 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting".
193 template <typename Func>
194 bool insert( value_type& val, Func f )
196 hash_type const& hash = hash_accessor()( val );
197 traverse_data pos( hash, *this );
203 node_ptr slot = base_class::traverse( pos );
204 assert(slot.bits() == 0);
206 if ( pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire) == slot) {
208 if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) {
209 // the item with that hash value already exists
210 stats().onInsertFailed();
214 // the slot must be expanded
215 base_class::expand_slot( pos, slot );
218 // the slot is empty, try to insert data node
220 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed))
222 // the new data node has been inserted
225 stats().onInsertSuccess();
226 stats().height( pos.nHeight );
230 // insert failed - slot has been changed by another thread
232 stats().onInsertRetry();
236 stats().onSlotChanged();
242 Performs inserting or updating the item with hash value equal to \p val.
243 - If hash value is found then existing item is replaced with \p val, old item is disposed
244 with \p Traits::disposer. Note that the disposer is called by \p GC asynchronously.
245 The function returns <tt> std::pair<true, false> </tt>
246 - If hash value is not found and \p bInsert is \p true then \p val is inserted,
247 the function returns <tt> std::pair<true, true> </tt>
248 - If hash value is not found and \p bInsert is \p false then the set is unchanged,
249 the function returns <tt> std::pair<false, false> </tt>
251 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull
252 (i.e. the item has been inserted or updated),
253 \p second is \p true if new item has been added or \p false if the set contains that hash.
255 The function locks RCU internally.
257 std::pair<bool, bool> update( value_type& val, bool bInsert = true )
259 return do_update(val, [](value_type&, value_type *) {}, bInsert );
262 /// Unlinks the item \p val from the set
264 The function searches the item \p val in the set and unlink it
265 if it is found and its address is equal to <tt>&val</tt>.
267 The function returns \p true if success and \p false otherwise.
269 RCU should not be locked. The function locks RCU internally.
271 bool unlink( value_type const& val )
273 check_deadlock_policy::check();
275 auto pred = [&val](value_type const& item) -> bool { return &item == &val; };
279 p = do_erase( hash_accessor()( val ), std::ref( pred ));
282 gc::template retire_ptr<disposer>( p );
288 /// Deletes the item from the set
290 The function searches \p hash in the set,
291 unlinks the item found, and returns \p true.
292 If that item is not found the function returns \p false.
294 The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously.
296 RCU should not be locked. The function locks RCU internally.
298 bool erase( hash_type const& hash )
300 return erase(hash, [](value_type const&) {} );
303 /// Deletes the item from the set
305 The function searches \p hash in the set,
306 call \p f functor with item found, and unlinks it from the set.
307 The \ref disposer specified in \p Traits is called
308 by garbage collector \p GC asynchronously.
310 The \p Func interface is
313 void operator()( value_type& item );
317 If \p hash is not found the function returns \p false.
319 RCU should not be locked. The function locks RCU internally.
321 template <typename Func>
322 bool erase( hash_type const& hash, Func f )
324 check_deadlock_policy::check();
330 p = do_erase( hash, []( value_type const&) -> bool { return true; } );
333 // p is guarded by HP
336 gc::template retire_ptr<disposer>(p);
342 /// Extracts the item with specified \p hash
344 The function searches \p hash in the set,
345 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
346 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
348 RCU \p synchronize method can be called. RCU should NOT be locked.
349 The function does not call the disposer for the item found.
350 The disposer will be implicitly invoked when the returned object is destroyed or when
351 its \p release() member function is called.
354 typedef cds::intrusive::FeldmanHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > set_type;
358 typename set_type::exempt_ptr ep( theSet.extract( 5 ));
363 // Dispose returned item.
368 exempt_ptr extract( hash_type const& hash )
370 check_deadlock_policy::check();
375 p = do_erase( hash, []( value_type const&) -> bool {return true;} );
377 return exempt_ptr( p );
380 /// Finds an item by it's \p hash
382 The function searches the item by \p hash and calls the functor \p f for item found.
383 The interface of \p Func functor is:
386 void operator()( value_type& item );
389 where \p item is the item found.
391 The functor may change non-key fields of \p item. Note that the functor is only guarantee
392 that \p item cannot be disposed during the functor is executing.
393 The functor does not serialize simultaneous access to the set's \p item. If such access is
394 possible you must provide your own synchronization schema on item level to prevent unsafe item modifications.
396 The function returns \p true if \p hash is found, \p false otherwise.
398 The function applies RCU lock internally.
400 template <typename Func>
401 bool find( hash_type const& hash, Func f )
405 value_type * p = search( hash );
413 /// Checks whether the set contains \p hash
415 The function searches the item by its \p hash
416 and returns \p true if it is found, or \p false otherwise.
418 The function applies RCU lock internally.
420 bool contains( hash_type const& hash )
422 return find( hash, [](value_type&) {} );
425 /// Finds an item by it's \p hash and returns the item found
427 The function searches the item by its \p hash
428 and returns the pointer to the item found.
429 If \p hash is not found the function returns \p nullptr.
431 RCU should be locked before the function invocation.
432 Returned pointer is valid only while RCU is locked.
436 typedef cds::intrusive::FeldmanHashSet< your_template_params > my_set;
443 foo * p = theSet.get( 5 );
451 value_type * get( hash_type const& hash )
453 assert( gc::is_locked());
454 return search( hash );
457 /// Clears the set (non-atomic)
459 The function unlink all data node from the set.
460 The function is not atomic but is thread-safe.
461 After \p %clear() the set may not be empty because another threads may insert items.
463 For each item the \p disposer is called after unlinking.
467 clear_array( head(), head_size());
470 /// Checks if the set is empty
472 Emptiness is checked by item counting: if item count is zero then the set is empty.
473 Thus, the correct item counting feature is an important part of the set implementation.
480 /// Returns item count in the set
483 return m_ItemCounter;
486 /// Returns const reference to internal statistics
487 stat const& statistics() const
492 /// Returns the size of head node
493 using base_class::head_size;
495 /// Returns the size of the array node
496 using base_class::array_node_size;
498 /// Collects tree level statistics into \p stat
500 The function traverses the set and collects staistics for each level of the tree
501 into \p feldman_hashset::level_statistics struct. The element of \p stat[i]
502 represents statistics for level \p i, level 0 is head array.
503 The function is thread-safe and may be called in multi-threaded environment.
505 Result can be useful for estimating efficiency of hash functor you use.
507 void get_level_statistics(std::vector<feldman_hashset::level_statistics>& stat) const
509 base_class::get_level_statistics(stat);
516 friend class FeldmanHashSet;
519 array_node * m_pNode; ///< current array node
520 size_t m_idx; ///< current position in m_pNode
521 value_type * m_pValue; ///< current value
522 FeldmanHashSet const* m_set; ///< Hash set
525 iterator_base() CDS_NOEXCEPT
532 iterator_base(iterator_base const& rhs) CDS_NOEXCEPT
533 : m_pNode(rhs.m_pNode)
535 , m_pValue(rhs.m_pValue)
539 iterator_base& operator=(iterator_base const& rhs) CDS_NOEXCEPT
541 m_pNode = rhs.m_pNode;
543 m_pValue = rhs.m_pValue;
548 iterator_base& operator++()
554 iterator_base& operator--()
560 bool operator ==(iterator_base const& rhs) const CDS_NOEXCEPT
562 return m_pNode == rhs.m_pNode && m_idx == rhs.m_idx && m_set == rhs.m_set;
565 bool operator !=(iterator_base const& rhs) const CDS_NOEXCEPT
567 return !(*this == rhs);
571 iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx, bool)
578 iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx)
587 value_type * pointer() const CDS_NOEXCEPT
589 assert(gc::is_locked());
595 assert( gc::is_locked());
596 assert(m_set != nullptr);
597 assert(m_pNode != nullptr);
599 size_t const arrayNodeSize = m_set->array_node_size();
600 size_t const headSize = m_set->head_size();
601 array_node * pNode = m_pNode;
602 size_t idx = m_idx + 1;
603 size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize;
606 if (idx < nodeSize) {
607 node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire);
608 if (slot.bits() == base_class::flag_array_node ) {
609 // array node, go down the tree
610 assert(slot.ptr() != nullptr);
611 pNode = to_array(slot.ptr());
613 nodeSize = arrayNodeSize;
615 else if (slot.bits() == base_class::flag_array_converting ) {
616 // the slot is converting to array node right now - skip the node
624 m_pValue = slot.ptr();
632 if (pNode->pParent) {
633 idx = pNode->idxParent + 1;
634 pNode = pNode->pParent;
635 nodeSize = pNode->pParent ? arrayNodeSize : headSize;
639 assert(pNode == m_set->head());
640 assert(idx == headSize);
652 assert(gc::is_locked());
653 assert(m_set != nullptr);
654 assert(m_pNode != nullptr);
656 size_t const arrayNodeSize = m_set->array_node_size();
657 size_t const headSize = m_set->head_size();
658 size_t const endIdx = size_t(0) - 1;
660 array_node * pNode = m_pNode;
661 size_t idx = m_idx - 1;
662 size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize;
666 node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire);
667 if (slot.bits() == base_class::flag_array_node ) {
668 // array node, go down the tree
669 assert(slot.ptr() != nullptr);
670 pNode = to_array(slot.ptr());
671 nodeSize = arrayNodeSize;
674 else if (slot.bits() == base_class::flag_array_converting ) {
675 // the slot is converting to array node right now - skip the node
683 m_pValue = slot.ptr();
691 if (pNode->pParent) {
692 idx = pNode->idxParent - 1;
693 pNode = pNode->pParent;
694 nodeSize = pNode->pParent ? arrayNodeSize : headSize;
698 assert(pNode == m_set->head());
699 assert(idx == endIdx);
710 template <class Iterator>
711 Iterator init_begin() const
713 return Iterator(*this, head(), size_t(0) - 1);
716 template <class Iterator>
717 Iterator init_end() const
719 return Iterator(*this, head(), head_size(), false);
722 template <class Iterator>
723 Iterator init_rbegin() const
725 return Iterator(*this, head(), head_size());
728 template <class Iterator>
729 Iterator init_rend() const
731 return Iterator(*this, head(), size_t(0) - 1, false);
734 /// Bidirectional iterator class
735 template <bool IsConst>
736 class bidirectional_iterator : protected iterator_base
738 friend class FeldmanHashSet;
741 static CDS_CONSTEXPR bool const c_bConstantIterator = IsConst;
744 typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer
745 typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference
748 bidirectional_iterator() CDS_NOEXCEPT
751 bidirectional_iterator(bidirectional_iterator const& rhs) CDS_NOEXCEPT
755 bidirectional_iterator& operator=(bidirectional_iterator const& rhs) CDS_NOEXCEPT
757 iterator_base::operator=(rhs);
761 bidirectional_iterator& operator++()
763 iterator_base::operator++();
767 bidirectional_iterator& operator--()
769 iterator_base::operator--();
773 value_ptr operator ->() const CDS_NOEXCEPT
775 return iterator_base::pointer();
778 value_ref operator *() const CDS_NOEXCEPT
780 value_ptr p = iterator_base::pointer();
785 template <bool IsConst2>
786 bool operator ==(bidirectional_iterator<IsConst2> const& rhs) const CDS_NOEXCEPT
788 return iterator_base::operator==(rhs);
791 template <bool IsConst2>
792 bool operator !=(bidirectional_iterator<IsConst2> const& rhs) const CDS_NOEXCEPT
794 return !(*this == rhs);
798 bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool)
799 : iterator_base(set, pNode, idx, false)
802 bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx)
803 : iterator_base(set, pNode, idx)
807 /// Reverse bidirectional iterator
808 template <bool IsConst>
809 class reverse_bidirectional_iterator : public iterator_base
811 friend class FeldmanHashSet;
814 typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer
815 typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference
818 reverse_bidirectional_iterator() CDS_NOEXCEPT
822 reverse_bidirectional_iterator(reverse_bidirectional_iterator const& rhs) CDS_NOEXCEPT
826 reverse_bidirectional_iterator& operator=(reverse_bidirectional_iterator const& rhs) CDS_NOEXCEPT
828 iterator_base::operator=(rhs);
832 reverse_bidirectional_iterator& operator++()
834 iterator_base::operator--();
838 reverse_bidirectional_iterator& operator--()
840 iterator_base::operator++();
844 value_ptr operator ->() const CDS_NOEXCEPT
846 return iterator_base::pointer();
849 value_ref operator *() const CDS_NOEXCEPT
851 value_ptr p = iterator_base::pointer();
856 template <bool IsConst2>
857 bool operator ==(reverse_bidirectional_iterator<IsConst2> const& rhs) const
859 return iterator_base::operator==(rhs);
862 template <bool IsConst2>
863 bool operator !=(reverse_bidirectional_iterator<IsConst2> const& rhs)
865 return !(*this == rhs);
869 reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool)
870 : iterator_base(set, pNode, idx, false)
873 reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx)
874 : iterator_base(set, pNode, idx, false)
876 iterator_base::backward();
882 #ifdef CDS_DOXYGEN_INVOKED
883 typedef implementation_defined iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional iterator" type
884 typedef implementation_defined const_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional const iterator" type
885 typedef implementation_defined reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse iterator" type
886 typedef implementation_defined const_reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse const iterator" type
888 typedef bidirectional_iterator<false> iterator;
889 typedef bidirectional_iterator<true> const_iterator;
890 typedef reverse_bidirectional_iterator<false> reverse_iterator;
891 typedef reverse_bidirectional_iterator<true> const_reverse_iterator;
894 ///@name Thread-safe iterators
895 /** @anchor cds_intrusive_FeldmanHashSet_rcu_iterators
896 The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment
897 under explicit RCU lock.
898 RCU lock requirement means that inserting or searching is allowed but you must not erase the items from the set
899 since erasing under RCU lock can lead to a deadlock. However, another thread can call \p erase() safely
900 while your thread is iterating.
902 A typical example is:
907 uint32_t payload; // only for example
909 struct set_traits: cds::intrusive::feldman_hashset::traits
911 struct hash_accessor {
912 uint32_t operator()( foo const& src ) const
919 typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu;
920 typedef cds::intrusive::FeldmanHashSet< rcu, foo, set_traits > set_type;
926 // iterate over the set
929 typename set_type::rcu_lock l; // scoped RCU lock
932 for ( auto i = s.begin(); i != s.end(); ++i ) {
933 // deal with i. Remember, erasing is prohibited here!
936 } // at this point RCU lock is released
939 Each iterator object supports the common interface:
940 - dereference operators:
942 value_type [const] * operator ->() noexcept
943 value_type [const] & operator *() noexcept
945 - pre-increment and pre-decrement. Post-operators is not supported
946 - equality operators <tt>==</tt> and <tt>!=</tt>.
947 Iterators are equal iff they point to the same cell of the same array node.
948 Note that for two iterators \p it1 and \p it2 the condition <tt> it1 == it2 </tt>
949 does not entail <tt> &(*it1) == &(*it2) </tt>: welcome to concurrent containers
951 @note It is possible the item can be iterated more that once, for example, if an iterator points to the item
952 in an array node that is being splitted.
956 /// Returns an iterator to the beginning of the set
959 return iterator(*this, head(), size_t(0) - 1);
962 /// Returns an const iterator to the beginning of the set
963 const_iterator begin() const
965 return const_iterator(*this, head(), size_t(0) - 1);
968 /// Returns an const iterator to the beginning of the set
969 const_iterator cbegin()
971 return const_iterator(*this, head(), size_t(0) - 1);
974 /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
977 return iterator(*this, head(), head_size(), false);
980 /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
981 const_iterator end() const
983 return const_iterator(*this, head(), head_size(), false);
986 /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
987 const_iterator cend()
989 return const_iterator(*this, head(), head_size(), false);
992 /// Returns a reverse iterator to the first element of the reversed set
993 reverse_iterator rbegin()
995 return reverse_iterator(*this, head(), head_size());
998 /// Returns a const reverse iterator to the first element of the reversed set
999 const_reverse_iterator rbegin() const
1001 return const_reverse_iterator(*this, head(), head_size());
1004 /// Returns a const reverse iterator to the first element of the reversed set
1005 const_reverse_iterator crbegin()
1007 return const_reverse_iterator(*this, head(), head_size());
1010 /// Returns a reverse iterator to the element following the last element of the reversed set
1012 It corresponds to the element preceding the first element of the non-reversed container.
1013 This element acts as a placeholder, attempting to access it results in undefined behavior.
1015 reverse_iterator rend()
1017 return reverse_iterator(*this, head(), size_t(0) - 1, false);
1020 /// Returns a const reverse iterator to the element following the last element of the reversed set
1022 It corresponds to the element preceding the first element of the non-reversed container.
1023 This element acts as a placeholder, attempting to access it results in undefined behavior.
1025 const_reverse_iterator rend() const
1027 return const_reverse_iterator(*this, head(), size_t(0) - 1, false);
1030 /// Returns a const reverse iterator to the element following the last element of the reversed set
1032 It corresponds to the element preceding the first element of the non-reversed container.
1033 This element acts as a placeholder, attempting to access it results in undefined behavior.
1035 const_reverse_iterator crend()
1037 return const_reverse_iterator(*this, head(), size_t(0) - 1, false);
1043 template <typename Func>
1044 std::pair<bool, bool> do_update(value_type& val, Func f, bool bInsert = true)
1046 hash_type const& hash = hash_accessor()(val);
1047 traverse_data pos( hash, *this );
1048 hash_comparator cmp;
1054 node_ptr slot = base_class::traverse( pos );
1055 assert(slot.bits() == 0);
1058 if ( pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire) == slot) {
1060 if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) {
1061 // the item with that hash value already exists
1062 // Replace it with val
1063 if ( slot.ptr() == &val ) {
1064 stats().onUpdateExisting();
1065 return std::make_pair(true, false);
1068 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(slot, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) {
1069 // slot can be disposed
1070 f( val, slot.ptr());
1072 stats().onUpdateExisting();
1073 goto update_existing_done;
1076 stats().onUpdateRetry();
1080 // the slot must be expanded
1081 base_class::expand_slot( pos, slot );
1084 stats().onUpdateFailed();
1085 return std::make_pair(false, false);
1090 // the slot is empty, try to insert data node
1093 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed))
1095 // the new data node has been inserted
1098 stats().onUpdateNew();
1099 stats().height( pos.nHeight );
1100 return std::make_pair(true, true);
1104 stats().onUpdateFailed();
1105 return std::make_pair(false, false);
1108 // insert failed - slot has been changed by another thread
1110 stats().onUpdateRetry();
1114 stats().onSlotChanged();
1118 // retire_ptr must be called only outside of RCU lock
1119 update_existing_done:
1121 gc::template retire_ptr<disposer>(pOld);
1122 return std::make_pair(true, false);
1125 template <typename Predicate>
1126 value_type * do_erase( hash_type const& hash, Predicate pred)
1128 assert(gc::is_locked());
1129 traverse_data pos( hash, *this );
1130 hash_comparator cmp;
1133 node_ptr slot = base_class::traverse( pos );
1134 assert( slot.bits() == 0 );
1136 if ( pos.pArr->nodes[pos.nSlot].load( memory_model::memory_order_acquire ) == slot ) {
1138 if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 && pred( *slot.ptr())) {
1139 // item found - replace it with nullptr
1140 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( slot, node_ptr( nullptr ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
1142 stats().onEraseSuccess();
1146 stats().onEraseRetry();
1149 stats().onEraseFailed();
1153 // the slot is empty
1154 stats().onEraseFailed();
1159 stats().onSlotChanged();
1163 value_type * search(hash_type const& hash )
1165 assert( gc::is_locked());
1166 traverse_data pos( hash, *this );
1167 hash_comparator cmp;
1170 node_ptr slot = base_class::traverse( pos );
1171 assert( slot.bits() == 0 );
1173 if ( pos.pArr->nodes[pos.nSlot].load( memory_model::memory_order_acquire ) != slot ) {
1174 // slot value has been changed - retry
1175 stats().onSlotChanged();
1178 else if ( slot.ptr() && cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) {
1180 stats().onFindSuccess();
1183 stats().onFindFailed();
1192 void clear_array(array_node * pArrNode, size_t nSize)
1197 for (atomic_node_ptr * pArr = pArrNode->nodes, *pLast = pArr + nSize; pArr != pLast; ++pArr) {
1199 node_ptr slot = pArr->load(memory_model::memory_order_acquire);
1200 if (slot.bits() == base_class::flag_array_node ) {
1201 // array node, go down the tree
1202 assert(slot.ptr() != nullptr);
1203 clear_array(to_array(slot.ptr()), array_node_size());
1206 else if (slot.bits() == base_class::flag_array_converting ) {
1207 // the slot is converting to array node right now
1208 while ((slot = pArr->load(memory_model::memory_order_acquire)).bits() == base_class::flag_array_converting ) {
1210 stats().onSlotConverting();
1214 assert(slot.ptr() != nullptr);
1215 assert(slot.bits() == base_class::flag_array_node );
1216 clear_array(to_array(slot.ptr()), array_node_size());
1221 if (pArr->compare_exchange_strong(slot, node_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) {
1223 gc::template retire_ptr<disposer>(slot.ptr());
1225 stats().onEraseSuccess();
1236 }} // namespace cds::intrusive
1238 #endif // #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H