3 #ifndef CDSLIB_INTRUSIVE_MULTILEVEL_HASHSET_RCU_H
4 #define CDSLIB_INTRUSIVE_MULTILEVEL_HASHSET_RCU_H
6 #include <functional> // std::ref
7 #include <iterator> // std::iterator_traits
9 #include <cds/intrusive/details/multilevel_hashset_base.h>
10 #include <cds/details/allocator.h>
11 #include <cds/urcu/details/check_deadlock.h>
12 #include <cds/urcu/exempt_ptr.h>
13 #include <cds/intrusive/details/raw_ptr_disposer.h>
16 namespace cds { namespace intrusive {
17 /// Intrusive hash set based on multi-level array, \ref cds_urcu_desc "RCU" specialization
18 /** @ingroup cds_intrusive_map
19 @anchor cds_intrusive_MultilevelHashSet_rcu
22 - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays:
23 Wait-free Extensible Hash Maps"
25 See algorithm short description @ref cds_intrusive_MultilevelHashSet_hp "here"
27 @note Two important things you should keep in mind when you're using \p %MultiLevelHashSet:
28 - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %MultiLevelHashSet.
29 Instead, for the strings you should use well-known hashing algorithms like <a href="https://en.wikipedia.org/wiki/Secure_Hash_Algorithm">SHA1, SHA2</a>,
30 <a href="https://en.wikipedia.org/wiki/MurmurHash">MurmurHash</a>, <a href="https://en.wikipedia.org/wiki/CityHash">CityHash</a>
31 or its successor <a href="https://code.google.com/p/farmhash/">FarmHash</a> and so on, which
32 converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %MultiLevelHashSet.
33 - \p %MultiLevelHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string,
34 have identical hash then you cannot insert both that keys in the set. \p %MultiLevelHashSet does not maintain the key,
35 it maintains its fixed-size hash value.
38 - \p RCU - one of \ref cds_urcu_gc "RCU type"
39 - \p T - a value type to be stored in the set
40 - \p Traits - type traits, the structure based on \p multilevel_hashset::traits or result of \p multilevel_hashset::make_traits metafunction.
41 \p Traits is the mandatory argument because it has one mandatory type - an @ref multilevel_hashset::traits::hash_accessor "accessor"
42 to hash value of \p T. The set algorithm does not calculate that hash value.
44 @note Before including <tt><cds/intrusive/multilevel_hashset_rcu.h></tt> you should include appropriate RCU header file,
45 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
47 The set supports @ref cds_intrusive_MultilevelHashSet_rcu_iterators "bidirectional thread-safe iterators"
48 with some restrictions.
53 #ifdef CDS_DOXYGEN_INVOKED
54 class Traits = multilevel_hashset::traits
59 class MultiLevelHashSet< cds::urcu::gc< RCU >, T, Traits >
62 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
63 typedef T value_type; ///< type of value stored in the set
64 typedef Traits traits; ///< Traits template parameter
66 typedef typename traits::hash_accessor hash_accessor; ///< Hash accessor functor
67 static_assert(!std::is_same< hash_accessor, cds::opt::none >::value, "hash_accessor functor must be specified in Traits");
69 /// Hash type deduced from \p hash_accessor return type
70 typedef typename std::decay<
71 typename std::remove_reference<
72 decltype(hash_accessor()(std::declval<T>()))
75 //typedef typename std::result_of< hash_accessor( std::declval<T>()) >::type hash_type;
76 static_assert(!std::is_pointer<hash_type>::value, "hash_accessor should return a reference to hash value");
78 typedef typename traits::disposer disposer; ///< data node disposer
80 # ifdef CDS_DOXYGEN_INVOKED
81 typedef implementation_defined hash_comparator; ///< hash compare functor based on opt::compare and opt::less option setter
83 typedef typename cds::opt::details::make_comparator_from<
86 multilevel_hashset::bitwise_compare< hash_type >
87 >::type hash_comparator;
90 typedef typename traits::item_counter item_counter; ///< Item counter type
91 typedef typename traits::node_allocator node_allocator; ///< Array node allocator
92 typedef typename traits::memory_model memory_model; ///< Memory model
93 typedef typename traits::back_off back_off; ///< Backoff strategy
94 typedef typename traits::stat stat; ///< Internal statistics type
95 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
96 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
97 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
99 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, disposer, void >; ///< pointer to extracted node
101 /// Node marked poiter
102 typedef cds::details::marked_ptr< value_type, 3 > node_ptr;
103 /// Array node element
104 typedef atomics::atomic< node_ptr > atomic_node_ptr;
109 flag_array_converting = 1, ///< the cell is converting from data node to an array node
110 flag_array_node = 2 ///< the cell is a pointer to an array node
114 array_node * const pParent; ///< parent array node
115 size_t const idxParent; ///< index in parent array node
116 atomic_node_ptr nodes[1]; ///< node array
118 array_node(array_node * parent, size_t idx)
123 array_node() = delete;
124 array_node(array_node const&) = delete;
125 array_node(array_node&&) = delete;
128 typedef cds::details::Allocator< array_node, node_allocator > cxx_array_node_allocator;
129 typedef multilevel_hashset::details::hash_splitter< hash_type > hash_splitter;
130 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
136 multilevel_hashset::details::metrics const m_Metrics; ///< Metrics
137 array_node * m_Head; ///< Head array
138 item_counter m_ItemCounter; ///< Item counter
139 stat m_Stat; ///< Internal statistics
143 /// Creates empty set
145 @param head_bits: 2<sup>head_bits</sup> specifies the size of head array, minimum is 4.
146 @param array_bits: 2<sup>array_bits</sup> specifies the size of array node, minimum is 2.
148 Equation for \p head_bits and \p array_bits:
150 sizeof(hash_type) * 8 == head_bits + N * array_bits
152 where \p N is multi-level array depth.
154 MultiLevelHashSet(size_t head_bits = 8, size_t array_bits = 4)
155 : m_Metrics(multilevel_hashset::details::metrics::make(head_bits, array_bits, sizeof(hash_type)))
156 , m_Head(alloc_head_node())
159 /// Destructs the set and frees all data
163 free_array_node(m_Head);
168 The function inserts \p val in the set if it does not contain
169 an item with that hash.
171 Returns \p true if \p val is placed into the set, \p false otherwise.
173 The function locks RCU internally.
175 bool insert( value_type& val )
177 return insert( val, [](value_type&) {} );
182 This function is intended for derived non-intrusive containers.
184 The function allows to split creating of new item into two part:
185 - create item with key only
186 - insert new item into the set
187 - if inserting is success, calls \p f functor to initialize \p val.
189 The functor signature is:
191 void func( value_type& val );
193 where \p val is the item inserted.
195 The user-defined functor is called only if the inserting is success.
197 The function locks RCU internally.
198 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting".
200 template <typename Func>
201 bool insert( value_type& val, Func f )
203 hash_type const& hash = hash_accessor()( val );
204 hash_splitter splitter( hash );
208 size_t nOffset = m_Metrics.head_node_size_log;
209 array_node * pArr = m_Head;
210 size_t nSlot = splitter.cut( m_Metrics.head_node_size_log );
211 assert( nSlot < m_Metrics.head_node_size );
215 node_ptr slot = pArr->nodes[nSlot].load( memory_model::memory_order_acquire );
216 if ( slot.bits() == flag_array_node ) {
217 // array node, go down the tree
218 assert( slot.ptr() != nullptr );
219 nSlot = splitter.cut( m_Metrics.array_node_size_log );
220 assert( nSlot < m_Metrics.array_node_size );
221 pArr = to_array( slot.ptr() );
222 nOffset += m_Metrics.array_node_size_log;
225 else if ( slot.bits() == flag_array_converting ) {
226 // the slot is converting to array node right now
228 m_Stat.onSlotConverting();
232 assert(slot.bits() == 0 );
235 if ( pArr->nodes[nSlot].load(memory_model::memory_order_acquire) == slot ) {
237 if ( cmp( hash, hash_accessor()( *slot.ptr() )) == 0 ) {
238 // the item with that hash value already exists
239 m_Stat.onInsertFailed();
243 // the slot must be expanded
244 expand_slot( pArr, nSlot, slot, nOffset );
247 // the slot is empty, try to insert data node
249 if ( pArr->nodes[nSlot].compare_exchange_strong( pNull, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
251 // the new data node has been inserted
254 m_Stat.onInsertSuccess();
255 m_Stat.height( nHeight );
259 // insert failed - slot has been changed by another thread
261 m_Stat.onInsertRetry();
265 m_Stat.onSlotChanged();
272 Performs inserting or updating the item with hash value equal to \p val.
273 - If hash value is found then existing item is replaced with \p val, old item is disposed
274 with \p Traits::disposer. Note that the disposer is called by \p GC asynchronously.
275 The function returns <tt> std::pair<true, false> </tt>
276 - If hash value is not found and \p bInsert is \p true then \p val is inserted,
277 the function returns <tt> std::pair<true, true> </tt>
278 - If hash value is not found and \p bInsert is \p false then the set is unchanged,
279 the function returns <tt> std::pair<false, false> </tt>
281 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull
282 (i.e. the item has been inserted or updated),
283 \p second is \p true if new item has been added or \p false if the set contains that hash.
285 The function locks RCU internally.
287 std::pair<bool, bool> update( value_type& val, bool bInsert = true )
289 return do_update(val, [](value_type&, value_type *) {}, bInsert );
292 /// Unlinks the item \p val from the set
294 The function searches the item \p val in the set and unlink it
295 if it is found and its address is equal to <tt>&val</tt>.
297 The function returns \p true if success and \p false otherwise.
299 RCU should not be locked. The function locks RCU internally.
301 bool unlink( value_type const& val )
303 check_deadlock_policy::check();
305 auto pred = [&val](value_type const& item) -> bool { return &item == &val; };
309 p = do_erase( hash_accessor()( val ), std::ref( pred ));
312 gc::template retire_ptr<disposer>( p );
318 /// Deletes the item from the set
320 The function searches \p hash in the set,
321 unlinks the item found, and returns \p true.
322 If that item is not found the function returns \p false.
324 The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously.
326 RCU should not be locked. The function locks RCU internally.
328 bool erase( hash_type const& hash )
330 return erase(hash, [](value_type const&) {} );
333 /// Deletes the item from the set
335 The function searches \p hash in the set,
336 call \p f functor with item found, and unlinks it from the set.
337 The \ref disposer specified in \p Traits is called
338 by garbage collector \p GC asynchronously.
340 The \p Func interface is
343 void operator()( value_type& item );
347 If \p hash is not found the function returns \p false.
349 RCU should not be locked. The function locks RCU internally.
351 template <typename Func>
352 bool erase( hash_type const& hash, Func f )
354 check_deadlock_policy::check();
360 p = do_erase( hash, []( value_type const&) -> bool { return true; } );
363 // p is guarded by HP
366 gc::template retire_ptr<disposer>(p);
372 /// Extracts the item with specified \p hash
374 The function searches \p hash in the set,
375 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
376 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
378 RCU \p synchronize method can be called. RCU should NOT be locked.
379 The function does not call the disposer for the item found.
380 The disposer will be implicitly invoked when the returned object is destroyed or when
381 its \p release() member function is called.
384 typedef cds::intrusive::MultiLevelHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > set_type;
388 typename set_type::exempt_ptr ep( theSet.extract( 5 ));
393 // Dispose returned item.
398 exempt_ptr extract( hash_type const& hash )
400 check_deadlock_policy::check();
405 p = do_erase( hash, []( value_type const&) -> bool {return true;} );
407 return exempt_ptr( p );
410 /// Finds an item by it's \p hash
412 The function searches the item by \p hash and calls the functor \p f for item found.
413 The interface of \p Func functor is:
416 void operator()( value_type& item );
419 where \p item is the item found.
421 The functor may change non-key fields of \p item. Note that the functor is only guarantee
422 that \p item cannot be disposed during the functor is executing.
423 The functor does not serialize simultaneous access to the set's \p item. If such access is
424 possible you must provide your own synchronization schema on item level to prevent unsafe item modifications.
426 The function returns \p true if \p hash is found, \p false otherwise.
428 The function applies RCU lock internally.
430 template <typename Func>
431 bool find( hash_type const& hash, Func f )
435 value_type * p = search( hash );
443 /// Checks whether the set contains \p hash
445 The function searches the item by its \p hash
446 and returns \p true if it is found, or \p false otherwise.
448 The function applies RCU lock internally.
450 bool contains( hash_type const& hash )
452 return find( hash, [](value_type&) {} );
455 /// Finds an item by it's \p hash and returns the item found
457 The function searches the item by its \p hash
458 and returns the pointer to the item found.
459 If \p hash is not found the function returns \p nullptr.
461 RCU should be locked before the function invocation.
462 Returned pointer is valid only while RCU is locked.
466 typedef cds::intrusive::MultiLevelHashSet< your_template_params > my_set;
473 foo * p = theSet.get( 5 );
481 value_type * get( hash_type const& hash )
483 assert( gc::is_locked());
484 return search( hash );
487 /// Clears the set (non-atomic)
489 The function unlink all data node from the set.
490 The function is not atomic but is thread-safe.
491 After \p %clear() the set may not be empty because another threads may insert items.
493 For each item the \p disposer is called after unlinking.
497 clear_array( m_Head, head_size() );
500 /// Checks if the set is empty
502 Emptiness is checked by item counting: if item count is zero then the set is empty.
503 Thus, the correct item counting feature is an important part of the set implementation.
510 /// Returns item count in the set
513 return m_ItemCounter;
516 /// Returns const reference to internal statistics
517 stat const& statistics() const
522 /// Returns the size of head node
523 size_t head_size() const
525 return m_Metrics.head_node_size;
528 /// Returns the size of the array node
529 size_t array_node_size() const
531 return m_Metrics.array_node_size;
538 friend class MultiLevelHashSet;
541 array_node * m_pNode; ///< current array node
542 size_t m_idx; ///< current position in m_pNode
543 value_type * m_pValue; ///< current value
544 MultiLevelHashSet const* m_set; ///< Hash set
547 iterator_base() CDS_NOEXCEPT
554 iterator_base(iterator_base const& rhs) CDS_NOEXCEPT
555 : m_pNode(rhs.m_pNode)
557 , m_pValue(rhs.m_pValue)
561 iterator_base& operator=(iterator_base const& rhs) CDS_NOEXCEPT
563 m_pNode = rhs.m_pNode;
565 m_pValue = rhs.m_pValue;
570 iterator_base& operator++()
576 iterator_base& operator--()
582 bool operator ==(iterator_base const& rhs) const CDS_NOEXCEPT
584 return m_pNode == rhs.m_pNode && m_idx == rhs.m_idx && m_set == rhs.m_set;
587 bool operator !=(iterator_base const& rhs) const CDS_NOEXCEPT
589 return !(*this == rhs);
593 iterator_base(MultiLevelHashSet const& set, array_node * pNode, size_t idx, bool)
600 iterator_base(MultiLevelHashSet const& set, array_node * pNode, size_t idx)
609 value_type * pointer() const CDS_NOEXCEPT
611 assert(gc::is_locked());
617 assert( gc::is_locked());
618 assert(m_set != nullptr);
619 assert(m_pNode != nullptr);
621 size_t const arrayNodeSize = m_set->array_node_size();
622 size_t const headSize = m_set->head_size();
623 array_node * pNode = m_pNode;
624 size_t idx = m_idx + 1;
625 size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize;
628 if (idx < nodeSize) {
629 node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire);
630 if (slot.bits() == flag_array_node) {
631 // array node, go down the tree
632 assert(slot.ptr() != nullptr);
633 pNode = to_array(slot.ptr());
635 nodeSize = arrayNodeSize;
637 else if (slot.bits() == flag_array_converting) {
638 // the slot is converting to array node right now - skip the node
646 m_pValue = slot.ptr();
654 if (pNode->pParent) {
655 idx = pNode->idxParent + 1;
656 pNode = pNode->pParent;
657 nodeSize = pNode->pParent ? arrayNodeSize : headSize;
661 assert(pNode == m_set->m_Head);
662 assert(idx == headSize);
674 assert(gc::is_locked());
675 assert(m_set != nullptr);
676 assert(m_pNode != nullptr);
678 size_t const arrayNodeSize = m_set->array_node_size();
679 size_t const headSize = m_set->head_size();
680 size_t const endIdx = size_t(0) - 1;
682 array_node * pNode = m_pNode;
683 size_t idx = m_idx - 1;
684 size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize;
688 node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire);
689 if (slot.bits() == flag_array_node) {
690 // array node, go down the tree
691 assert(slot.ptr() != nullptr);
692 pNode = to_array(slot.ptr());
693 nodeSize = arrayNodeSize;
696 else if (slot.bits() == flag_array_converting) {
697 // the slot is converting to array node right now - skip the node
705 m_pValue = slot.ptr();
713 if (pNode->pParent) {
714 idx = pNode->idxParent - 1;
715 pNode = pNode->pParent;
716 nodeSize = pNode->pParent ? arrayNodeSize : headSize;
720 assert(pNode == m_set->m_Head);
721 assert(idx == endIdx);
732 template <class Iterator>
733 Iterator init_begin() const
735 return Iterator(*this, m_Head, size_t(0) - 1);
738 template <class Iterator>
739 Iterator init_end() const
741 return Iterator(*this, m_Head, head_size(), false);
744 template <class Iterator>
745 Iterator init_rbegin() const
747 return Iterator(*this, m_Head, head_size());
750 template <class Iterator>
751 Iterator init_rend() const
753 return Iterator(*this, m_Head, size_t(0) - 1, false);
756 /// Bidirectional iterator class
757 template <bool IsConst>
758 class bidirectional_iterator : protected iterator_base
760 friend class MultiLevelHashSet;
763 static CDS_CONSTEXPR bool const c_bConstantIterator = IsConst;
766 typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer
767 typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference
770 bidirectional_iterator() CDS_NOEXCEPT
773 bidirectional_iterator(bidirectional_iterator const& rhs) CDS_NOEXCEPT
777 bidirectional_iterator& operator=(bidirectional_iterator const& rhs) CDS_NOEXCEPT
779 iterator_base::operator=(rhs);
783 bidirectional_iterator& operator++()
785 iterator_base::operator++();
789 bidirectional_iterator& operator--()
791 iterator_base::operator--();
795 value_ptr operator ->() const CDS_NOEXCEPT
797 return iterator_base::pointer();
800 value_ref operator *() const CDS_NOEXCEPT
802 value_ptr p = iterator_base::pointer();
807 template <bool IsConst2>
808 bool operator ==(bidirectional_iterator<IsConst2> const& rhs) const CDS_NOEXCEPT
810 return iterator_base::operator==(rhs);
813 template <bool IsConst2>
814 bool operator !=(bidirectional_iterator<IsConst2> const& rhs) const CDS_NOEXCEPT
816 return !(*this == rhs);
820 bidirectional_iterator(MultiLevelHashSet& set, array_node * pNode, size_t idx, bool)
821 : iterator_base(set, pNode, idx, false)
824 bidirectional_iterator(MultiLevelHashSet& set, array_node * pNode, size_t idx)
825 : iterator_base(set, pNode, idx)
829 /// Reverse bidirectional iterator
830 template <bool IsConst>
831 class reverse_bidirectional_iterator : public iterator_base
833 friend class MultiLevelHashSet;
836 typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer
837 typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference
840 reverse_bidirectional_iterator() CDS_NOEXCEPT
844 reverse_bidirectional_iterator(reverse_bidirectional_iterator const& rhs) CDS_NOEXCEPT
848 reverse_bidirectional_iterator& operator=(reverse_bidirectional_iterator const& rhs) CDS_NOEXCEPT
850 iterator_base::operator=(rhs);
854 reverse_bidirectional_iterator& operator++()
856 iterator_base::operator--();
860 reverse_bidirectional_iterator& operator--()
862 iterator_base::operator++();
866 value_ptr operator ->() const CDS_NOEXCEPT
868 return iterator_base::pointer();
871 value_ref operator *() const CDS_NOEXCEPT
873 value_ptr p = iterator_base::pointer();
878 template <bool IsConst2>
879 bool operator ==(reverse_bidirectional_iterator<IsConst2> const& rhs) const
881 return iterator_base::operator==(rhs);
884 template <bool IsConst2>
885 bool operator !=(reverse_bidirectional_iterator<IsConst2> const& rhs)
887 return !(*this == rhs);
891 reverse_bidirectional_iterator(MultiLevelHashSet& set, array_node * pNode, size_t idx, bool)
892 : iterator_base(set, pNode, idx, false)
895 reverse_bidirectional_iterator(MultiLevelHashSet& set, array_node * pNode, size_t idx)
896 : iterator_base(set, pNode, idx, false)
898 iterator_base::backward();
904 #ifdef CDS_DOXYGEN_INVOKED
905 typedef implementation_defined iterator; ///< @ref cds_intrusive_MultilevelHashSet_rcu_iterators "bidirectional iterator" type
906 typedef implementation_defined const_iterator; ///< @ref cds_intrusive_MultilevelHashSet_rcu_iterators "bidirectional const iterator" type
907 typedef implementation_defined reverse_iterator; ///< @ref cds_intrusive_MultilevelHashSet_rcu_iterators "bidirectional reverse iterator" type
908 typedef implementation_defined const_reverse_iterator; ///< @ref cds_intrusive_MultilevelHashSet_rcu_iterators "bidirectional reverse const iterator" type
910 typedef bidirectional_iterator<false> iterator;
911 typedef bidirectional_iterator<true> const_iterator;
912 typedef reverse_bidirectional_iterator<false> reverse_iterator;
913 typedef reverse_bidirectional_iterator<true> const_reverse_iterator;
916 ///@name Thread-safe iterators
917 /** @anchor cds_intrusive_MultilevelHashSet_rcu_iterators
918 The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment
919 under explicit RCU lock.
920 RCU lock requirement means that inserting or searching is allowed but you must not erase the items from the set
921 since erasing under RCU lock can lead to a deadlock. However, another thread can call \p erase() safely
922 while your thread is iterating.
924 A typical example is:
929 uint32_t payload; // only for example
931 struct set_traits: cds::intrusive::multilevel_hashset::traits
933 struct hash_accessor {
934 uint32_t operator()( foo const& src ) const
941 typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu;
942 typedef cds::intrusive::MultiLevelHashSet< rcu, foo, set_traits > set_type;
948 // iterate over the set
951 typename set_type::rcu_lock l; // scoped RCU lock
954 for ( auto i = s.begin(); i != s.end(); ++i ) {
955 // deal with i. Remember, erasing is prohibited here!
958 } // at this point RCU lock is released
961 Each iterator object supports the common interface:
962 - dereference operators:
964 value_type [const] * operator ->() noexcept
965 value_type [const] & operator *() noexcept
967 - pre-increment and pre-decrement. Post-operators is not supported
968 - equality operators <tt>==</tt> and <tt>!=</tt>.
969 Iterators are equal iff they point to the same cell of the same array node.
970 Note that for two iterators \p it1 and \p it2 the condition <tt> it1 == it2 </tt>
971 does not entail <tt> &(*it1) == &(*it2) </tt>: welcome to concurrent containers
973 @note It is possible the item can be iterated more that once, for example, if an iterator points to the item
974 in an array node that is being splitted.
978 /// Returns an iterator to the beginning of the set
981 return iterator(*this, m_Head, size_t(0) - 1);
984 /// Returns an const iterator to the beginning of the set
985 const_iterator begin() const
987 return const_iterator(*this, m_Head, size_t(0) - 1);
990 /// Returns an const iterator to the beginning of the set
991 const_iterator cbegin()
993 return const_iterator(*this, m_Head, size_t(0) - 1);
996 /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
999 return iterator(*this, m_Head, head_size(), false);
1002 /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
1003 const_iterator end() const
1005 return const_iterator(*this, m_Head, head_size(), false);
1008 /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
1009 const_iterator cend()
1011 return const_iterator(*this, m_Head, head_size(), false);
1014 /// Returns a reverse iterator to the first element of the reversed set
1015 reverse_iterator rbegin()
1017 return reverse_iterator(*this, m_Head, head_size());
1020 /// Returns a const reverse iterator to the first element of the reversed set
1021 const_reverse_iterator rbegin() const
1023 return const_reverse_iterator(*this, m_Head, head_size());
1026 /// Returns a const reverse iterator to the first element of the reversed set
1027 const_reverse_iterator crbegin()
1029 return const_reverse_iterator(*this, m_Head, head_size());
1032 /// Returns a reverse iterator to the element following the last element of the reversed set
1034 It corresponds to the element preceding the first element of the non-reversed container.
1035 This element acts as a placeholder, attempting to access it results in undefined behavior.
1037 reverse_iterator rend()
1039 return reverse_iterator(*this, m_Head, size_t(0) - 1, false);
1042 /// Returns a const reverse iterator to the element following the last element of the reversed set
1044 It corresponds to the element preceding the first element of the non-reversed container.
1045 This element acts as a placeholder, attempting to access it results in undefined behavior.
1047 const_reverse_iterator rend() const
1049 return const_reverse_iterator(*this, m_Head, size_t(0) - 1, false);
1052 /// Returns a const reverse iterator to the element following the last element of the reversed set
1054 It corresponds to the element preceding the first element of the non-reversed container.
1055 This element acts as a placeholder, attempting to access it results in undefined behavior.
1057 const_reverse_iterator crend()
1059 return const_reverse_iterator(*this, m_Head, size_t(0) - 1, false);
1065 template <typename Func>
1066 std::pair<bool, bool> do_update(value_type& val, Func f, bool bInsert = true)
1068 hash_type const& hash = hash_accessor()(val);
1069 hash_splitter splitter(hash);
1070 hash_comparator cmp;
1073 array_node * pArr = m_Head;
1074 size_t nSlot = splitter.cut(m_Metrics.head_node_size_log);
1075 assert(nSlot < m_Metrics.head_node_size);
1076 size_t nOffset = m_Metrics.head_node_size_log;
1080 node_ptr slot = pArr->nodes[nSlot].load(memory_model::memory_order_acquire);
1081 if (slot.bits() == flag_array_node) {
1082 // array node, go down the tree
1083 assert(slot.ptr() != nullptr);
1084 nSlot = splitter.cut(m_Metrics.array_node_size_log);
1085 assert(nSlot < m_Metrics.array_node_size);
1086 pArr = to_array(slot.ptr());
1087 nOffset += m_Metrics.array_node_size_log;
1090 else if (slot.bits() == flag_array_converting) {
1091 // the slot is converting to array node right now
1093 m_Stat.onSlotConverting();
1097 assert(slot.bits() == 0);
1099 value_type * pOld = nullptr;
1103 if ( pArr->nodes[nSlot].load(memory_model::memory_order_acquire) == slot ) {
1105 if (cmp(hash, hash_accessor()(*slot.ptr())) == 0) {
1106 // the item with that hash value already exists
1107 // Replace it with val
1108 if (slot.ptr() == &val) {
1109 m_Stat.onUpdateExisting();
1110 return std::make_pair(true, false);
1113 if (pArr->nodes[nSlot].compare_exchange_strong(slot, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) {
1114 // slot can be disposed
1117 m_Stat.onUpdateExisting();
1118 goto update_existing_done;
1121 m_Stat.onUpdateRetry();
1125 // the slot must be expanded
1126 expand_slot(pArr, nSlot, slot, nOffset);
1129 // the slot is empty, try to insert data node
1132 if (pArr->nodes[nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed))
1134 // the new data node has been inserted
1137 m_Stat.onUpdateNew();
1138 m_Stat.height(nHeight);
1139 return std::make_pair(true, true);
1143 m_Stat.onUpdateFailed();
1144 return std::make_pair(false, false);
1147 // insert failed - slot has been changed by another thread
1149 m_Stat.onUpdateRetry();
1153 m_Stat.onSlotChanged();
1158 update_existing_done:
1160 gc::template retire_ptr<disposer>( pOld );
1161 return std::make_pair(true, false);
1166 template <typename Predicate>
1167 value_type * do_erase( hash_type const& hash, Predicate pred)
1169 assert(gc::is_locked());
1171 hash_splitter splitter(hash);
1172 hash_comparator cmp;
1175 array_node * pArr = m_Head;
1176 size_t nSlot = splitter.cut(m_Metrics.head_node_size_log);
1177 assert(nSlot < m_Metrics.head_node_size);
1180 node_ptr slot = pArr->nodes[nSlot].load(memory_model::memory_order_acquire);
1181 if (slot.bits() == flag_array_node) {
1182 // array node, go down the tree
1183 assert(slot.ptr() != nullptr);
1184 nSlot = splitter.cut(m_Metrics.array_node_size_log);
1185 assert(nSlot < m_Metrics.array_node_size);
1186 pArr = to_array(slot.ptr());
1188 else if (slot.bits() == flag_array_converting) {
1189 // the slot is converting to array node right now
1191 m_Stat.onSlotConverting();
1195 assert(slot.bits() == 0);
1197 if ( pArr->nodes[nSlot].load(memory_model::memory_order_acquire) == slot ) {
1199 if (cmp(hash, hash_accessor()(*slot.ptr())) == 0 && pred(*slot.ptr())) {
1200 // item found - replace it with nullptr
1201 if (pArr->nodes[nSlot].compare_exchange_strong(slot, node_ptr(nullptr), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) {
1203 m_Stat.onEraseSuccess();
1207 m_Stat.onEraseRetry();
1210 m_Stat.onEraseFailed();
1214 // the slot is empty
1215 m_Stat.onEraseFailed();
1220 m_Stat.onSlotChanged();
1225 value_type * search(hash_type const& hash )
1227 assert( gc::is_locked() );
1229 hash_splitter splitter(hash);
1230 hash_comparator cmp;
1233 array_node * pArr = m_Head;
1234 size_t nSlot = splitter.cut(m_Metrics.head_node_size_log);
1235 assert(nSlot < m_Metrics.head_node_size);
1238 node_ptr slot = pArr->nodes[nSlot].load(memory_model::memory_order_acquire);
1239 if (slot.bits() == flag_array_node) {
1240 // array node, go down the tree
1241 assert(slot.ptr() != nullptr);
1242 nSlot = splitter.cut(m_Metrics.array_node_size_log);
1243 assert(nSlot < m_Metrics.array_node_size);
1244 pArr = to_array(slot.ptr());
1246 else if (slot.bits() == flag_array_converting) {
1247 // the slot is converting to array node right now
1249 m_Stat.onSlotConverting();
1253 assert(slot.bits() == 0);
1255 // protect data node by hazard pointer
1256 if ( pArr->nodes[nSlot].load(memory_model::memory_order_acquire) != slot) {
1257 // slot value has been changed - retry
1258 m_Stat.onSlotChanged();
1260 else if (slot.ptr() && cmp(hash, hash_accessor()(*slot.ptr())) == 0) {
1262 m_Stat.onFindSuccess();
1265 m_Stat.onFindFailed();
1275 array_node * alloc_head_node() const
1277 return alloc_array_node(head_size(), nullptr, 0);
1280 array_node * alloc_array_node(array_node * pParent, size_t idxParent) const
1282 return alloc_array_node(array_node_size(), pParent, idxParent);
1285 static array_node * alloc_array_node(size_t nSize, array_node * pParent, size_t idxParent)
1287 array_node * pNode = cxx_array_node_allocator().NewBlock(sizeof(array_node) + sizeof(atomic_node_ptr) * (nSize - 1), pParent, idxParent);
1288 for (atomic_node_ptr * p = pNode->nodes, *pEnd = pNode->nodes + nSize; p < pEnd; ++p)
1289 p->store(node_ptr(), memory_model::memory_order_release);
1293 static void free_array_node(array_node * parr)
1295 cxx_array_node_allocator().Delete(parr);
1300 // The function is not thread-safe. For use in dtor only
1304 // Destroy all array nodes
1305 destroy_array_nodes(m_Head, head_size());
1308 void destroy_array_nodes(array_node * pArr, size_t nSize)
1310 for (atomic_node_ptr * p = pArr->nodes, *pLast = pArr->nodes + nSize; p != pLast; ++p) {
1311 node_ptr slot = p->load(memory_model::memory_order_acquire);
1312 if (slot.bits() == flag_array_node) {
1313 destroy_array_nodes(to_array(slot.ptr()), array_node_size());
1314 free_array_node(to_array(slot.ptr()));
1315 p->store(node_ptr(), memory_model::memory_order_relaxed);
1320 void clear_array(array_node * pArrNode, size_t nSize)
1325 for (atomic_node_ptr * pArr = pArrNode->nodes, *pLast = pArr + nSize; pArr != pLast; ++pArr) {
1327 node_ptr slot = pArr->load(memory_model::memory_order_acquire);
1328 if (slot.bits() == flag_array_node) {
1329 // array node, go down the tree
1330 assert(slot.ptr() != nullptr);
1331 clear_array(to_array(slot.ptr()), array_node_size());
1334 else if (slot.bits() == flag_array_converting) {
1335 // the slot is converting to array node right now
1336 while ((slot = pArr->load(memory_model::memory_order_acquire)).bits() == flag_array_converting) {
1338 m_Stat.onSlotConverting();
1342 assert(slot.ptr() != nullptr);
1343 assert(slot.bits() == flag_array_node);
1344 clear_array(to_array(slot.ptr()), array_node_size());
1349 if (pArr->compare_exchange_strong(slot, node_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) {
1351 gc::template retire_ptr<disposer>(slot.ptr());
1353 m_Stat.onEraseSuccess();
1362 bool expand_slot(array_node * pParent, size_t idxParent, node_ptr current, size_t nOffset)
1364 assert(current.bits() == 0);
1365 assert(current.ptr());
1367 size_t idx = hash_splitter(hash_accessor()(*current.ptr()), nOffset).cut(m_Metrics.array_node_size_log);
1368 array_node * pArr = alloc_array_node(pParent, idxParent);
1370 node_ptr cur(current.ptr());
1371 atomic_node_ptr& slot = pParent->nodes[idxParent];
1372 if (!slot.compare_exchange_strong(cur, cur | flag_array_converting, memory_model::memory_order_release, atomics::memory_order_relaxed))
1374 m_Stat.onExpandNodeFailed();
1375 free_array_node(pArr);
1379 pArr->nodes[idx].store(current, memory_model::memory_order_release);
1381 cur = cur | flag_array_converting;
1383 slot.compare_exchange_strong(cur, node_ptr(to_node(pArr), flag_array_node), memory_model::memory_order_release, atomics::memory_order_relaxed)
1386 m_Stat.onExpandNodeSuccess();
1387 m_Stat.onArrayNodeCreated();
1395 converter(value_type * p)
1399 converter(array_node * p)
1404 static array_node * to_array(value_type * p)
1406 return converter(p).pArr;
1408 static value_type * to_node(array_node * p)
1410 return converter(p).pData;
1415 }} // namespace cds::intrusive
1417 #endif // #ifndef CDSLIB_INTRUSIVE_MULTILEVEL_HASHSET_RCU_H