3 #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H
4 #define CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H
6 #include <functional> // std::ref
7 #include <iterator> // std::iterator_traits
10 #include <cds/intrusive/details/feldman_hashset_base.h>
11 #include <cds/details/allocator.h>
12 #include <cds/urcu/details/check_deadlock.h>
13 #include <cds/urcu/exempt_ptr.h>
14 #include <cds/intrusive/details/raw_ptr_disposer.h>
17 namespace cds { namespace intrusive {
18 /// Intrusive hash set based on multi-level array, \ref cds_urcu_desc "RCU" specialization
19 /** @ingroup cds_intrusive_map
20 @anchor cds_intrusive_FeldmanHashSet_rcu
23 - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays:
24 Wait-free Extensible Hash Maps"
26 See algorithm short description @ref cds_intrusive_FeldmanHashSet_hp "here"
28 @note Two important things you should keep in mind when you're using \p %FeldmanHashSet:
29 - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet.
30 Instead, for the strings you should use well-known hashing algorithms like <a href="https://en.wikipedia.org/wiki/Secure_Hash_Algorithm">SHA1, SHA2</a>,
31 <a href="https://en.wikipedia.org/wiki/MurmurHash">MurmurHash</a>, <a href="https://en.wikipedia.org/wiki/CityHash">CityHash</a>
32 or its successor <a href="https://code.google.com/p/farmhash/">FarmHash</a> and so on, which
33 converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet.
34 - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string,
35 have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key,
36 it maintains its fixed-size hash value.
39 - \p RCU - one of \ref cds_urcu_gc "RCU type"
40 - \p T - a value type to be stored in the set
41 - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction.
42 \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor"
43 to hash value of \p T. The set algorithm does not calculate that hash value.
45 @note Before including <tt><cds/intrusive/feldman_hashset_rcu.h></tt> you should include appropriate RCU header file,
46 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
48 The set supports @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional thread-safe iterators"
49 with some restrictions.
54 #ifdef CDS_DOXYGEN_INVOKED
55 class Traits = feldman_hashset::traits
60 class FeldmanHashSet< cds::urcu::gc< RCU >, T, Traits >
63 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
64 typedef T value_type; ///< type of value stored in the set
65 typedef Traits traits; ///< Traits template parameter
67 typedef typename traits::hash_accessor hash_accessor; ///< Hash accessor functor
68 static_assert(!std::is_same< hash_accessor, cds::opt::none >::value, "hash_accessor functor must be specified in Traits");
70 /// Hash type deduced from \p hash_accessor return type
71 typedef typename std::decay<
72 typename std::remove_reference<
73 decltype(hash_accessor()(std::declval<T>()))
76 //typedef typename std::result_of< hash_accessor( std::declval<T>()) >::type hash_type;
77 static_assert(!std::is_pointer<hash_type>::value, "hash_accessor should return a reference to hash value");
79 typedef typename traits::disposer disposer; ///< data node disposer
81 # ifdef CDS_DOXYGEN_INVOKED
82 typedef implementation_defined hash_comparator; ///< hash compare functor based on opt::compare and opt::less option setter
84 typedef typename cds::opt::details::make_comparator_from<
87 feldman_hashset::bitwise_compare< hash_type >
88 >::type hash_comparator;
91 typedef typename traits::item_counter item_counter; ///< Item counter type
92 typedef typename traits::node_allocator node_allocator; ///< Array node allocator
93 typedef typename traits::memory_model memory_model; ///< Memory model
94 typedef typename traits::back_off back_off; ///< Backoff strategy
95 typedef typename traits::stat stat; ///< Internal statistics type
96 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
97 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
98 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
100 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, disposer, void >; ///< pointer to extracted node
102 /// Node marked poiter
103 typedef cds::details::marked_ptr< value_type, 3 > node_ptr;
104 /// Array node element
105 typedef atomics::atomic< node_ptr > atomic_node_ptr;
110 flag_array_converting = 1, ///< the cell is converting from data node to an array node
111 flag_array_node = 2 ///< the cell is a pointer to an array node
115 array_node * const pParent; ///< parent array node
116 size_t const idxParent; ///< index in parent array node
117 atomic_node_ptr nodes[1]; ///< node array
119 array_node(array_node * parent, size_t idx)
124 array_node() = delete;
125 array_node(array_node const&) = delete;
126 array_node(array_node&&) = delete;
129 typedef cds::details::Allocator< array_node, node_allocator > cxx_array_node_allocator;
130 typedef feldman_hashset::details::hash_splitter< hash_type > hash_splitter;
131 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
137 feldman_hashset::details::metrics const m_Metrics; ///< Metrics
138 array_node * m_Head; ///< Head array
139 item_counter m_ItemCounter; ///< Item counter
140 stat m_Stat; ///< Internal statistics
144 /// Creates empty set
146 @param head_bits: 2<sup>head_bits</sup> specifies the size of head array, minimum is 4.
147 @param array_bits: 2<sup>array_bits</sup> specifies the size of array node, minimum is 2.
149 Equation for \p head_bits and \p array_bits:
151 sizeof(hash_type) * 8 == head_bits + N * array_bits
153 where \p N is multi-level array depth.
155 FeldmanHashSet(size_t head_bits = 8, size_t array_bits = 4)
156 : m_Metrics(feldman_hashset::details::metrics::make(head_bits, array_bits, sizeof(hash_type)))
157 , m_Head(alloc_head_node())
160 /// Destructs the set and frees all data
164 free_array_node(m_Head);
169 The function inserts \p val in the set if it does not contain
170 an item with that hash.
172 Returns \p true if \p val is placed into the set, \p false otherwise.
174 The function locks RCU internally.
176 bool insert( value_type& val )
178 return insert( val, [](value_type&) {} );
183 This function is intended for derived non-intrusive containers.
185 The function allows to split creating of new item into two part:
186 - create item with key only
187 - insert new item into the set
188 - if inserting is success, calls \p f functor to initialize \p val.
190 The functor signature is:
192 void func( value_type& val );
194 where \p val is the item inserted.
196 The user-defined functor is called only if the inserting is success.
198 The function locks RCU internally.
199 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting".
201 template <typename Func>
202 bool insert( value_type& val, Func f )
204 hash_type const& hash = hash_accessor()( val );
205 hash_splitter splitter( hash );
209 size_t nOffset = m_Metrics.head_node_size_log;
210 array_node * pArr = m_Head;
211 size_t nSlot = splitter.cut( m_Metrics.head_node_size_log );
212 assert( nSlot < m_Metrics.head_node_size );
216 node_ptr slot = pArr->nodes[nSlot].load( memory_model::memory_order_acquire );
217 if ( slot.bits() == flag_array_node ) {
218 // array node, go down the tree
219 assert( slot.ptr() != nullptr );
220 nSlot = splitter.cut( m_Metrics.array_node_size_log );
221 assert( nSlot < m_Metrics.array_node_size );
222 pArr = to_array( slot.ptr() );
223 nOffset += m_Metrics.array_node_size_log;
226 else if ( slot.bits() == flag_array_converting ) {
227 // the slot is converting to array node right now
229 m_Stat.onSlotConverting();
233 assert(slot.bits() == 0 );
236 if ( pArr->nodes[nSlot].load(memory_model::memory_order_acquire) == slot ) {
238 if ( cmp( hash, hash_accessor()( *slot.ptr() )) == 0 ) {
239 // the item with that hash value already exists
240 m_Stat.onInsertFailed();
244 // the slot must be expanded
245 expand_slot( pArr, nSlot, slot, nOffset );
248 // the slot is empty, try to insert data node
250 if ( pArr->nodes[nSlot].compare_exchange_strong( pNull, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
252 // the new data node has been inserted
255 m_Stat.onInsertSuccess();
256 m_Stat.height( nHeight );
260 // insert failed - slot has been changed by another thread
262 m_Stat.onInsertRetry();
266 m_Stat.onSlotChanged();
273 Performs inserting or updating the item with hash value equal to \p val.
274 - If hash value is found then existing item is replaced with \p val, old item is disposed
275 with \p Traits::disposer. Note that the disposer is called by \p GC asynchronously.
276 The function returns <tt> std::pair<true, false> </tt>
277 - If hash value is not found and \p bInsert is \p true then \p val is inserted,
278 the function returns <tt> std::pair<true, true> </tt>
279 - If hash value is not found and \p bInsert is \p false then the set is unchanged,
280 the function returns <tt> std::pair<false, false> </tt>
282 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull
283 (i.e. the item has been inserted or updated),
284 \p second is \p true if new item has been added or \p false if the set contains that hash.
286 The function locks RCU internally.
288 std::pair<bool, bool> update( value_type& val, bool bInsert = true )
290 return do_update(val, [](value_type&, value_type *) {}, bInsert );
293 /// Unlinks the item \p val from the set
295 The function searches the item \p val in the set and unlink it
296 if it is found and its address is equal to <tt>&val</tt>.
298 The function returns \p true if success and \p false otherwise.
300 RCU should not be locked. The function locks RCU internally.
302 bool unlink( value_type const& val )
304 check_deadlock_policy::check();
306 auto pred = [&val](value_type const& item) -> bool { return &item == &val; };
310 p = do_erase( hash_accessor()( val ), std::ref( pred ));
313 gc::template retire_ptr<disposer>( p );
319 /// Deletes the item from the set
321 The function searches \p hash in the set,
322 unlinks the item found, and returns \p true.
323 If that item is not found the function returns \p false.
325 The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously.
327 RCU should not be locked. The function locks RCU internally.
329 bool erase( hash_type const& hash )
331 return erase(hash, [](value_type const&) {} );
334 /// Deletes the item from the set
336 The function searches \p hash in the set,
337 call \p f functor with item found, and unlinks it from the set.
338 The \ref disposer specified in \p Traits is called
339 by garbage collector \p GC asynchronously.
341 The \p Func interface is
344 void operator()( value_type& item );
348 If \p hash is not found the function returns \p false.
350 RCU should not be locked. The function locks RCU internally.
352 template <typename Func>
353 bool erase( hash_type const& hash, Func f )
355 check_deadlock_policy::check();
361 p = do_erase( hash, []( value_type const&) -> bool { return true; } );
364 // p is guarded by HP
367 gc::template retire_ptr<disposer>(p);
373 /// Extracts the item with specified \p hash
375 The function searches \p hash in the set,
376 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
377 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
379 RCU \p synchronize method can be called. RCU should NOT be locked.
380 The function does not call the disposer for the item found.
381 The disposer will be implicitly invoked when the returned object is destroyed or when
382 its \p release() member function is called.
385 typedef cds::intrusive::FeldmanHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > set_type;
389 typename set_type::exempt_ptr ep( theSet.extract( 5 ));
394 // Dispose returned item.
399 exempt_ptr extract( hash_type const& hash )
401 check_deadlock_policy::check();
406 p = do_erase( hash, []( value_type const&) -> bool {return true;} );
408 return exempt_ptr( p );
411 /// Finds an item by it's \p hash
413 The function searches the item by \p hash and calls the functor \p f for item found.
414 The interface of \p Func functor is:
417 void operator()( value_type& item );
420 where \p item is the item found.
422 The functor may change non-key fields of \p item. Note that the functor is only guarantee
423 that \p item cannot be disposed during the functor is executing.
424 The functor does not serialize simultaneous access to the set's \p item. If such access is
425 possible you must provide your own synchronization schema on item level to prevent unsafe item modifications.
427 The function returns \p true if \p hash is found, \p false otherwise.
429 The function applies RCU lock internally.
431 template <typename Func>
432 bool find( hash_type const& hash, Func f )
436 value_type * p = search( hash );
444 /// Checks whether the set contains \p hash
446 The function searches the item by its \p hash
447 and returns \p true if it is found, or \p false otherwise.
449 The function applies RCU lock internally.
451 bool contains( hash_type const& hash )
453 return find( hash, [](value_type&) {} );
456 /// Finds an item by it's \p hash and returns the item found
458 The function searches the item by its \p hash
459 and returns the pointer to the item found.
460 If \p hash is not found the function returns \p nullptr.
462 RCU should be locked before the function invocation.
463 Returned pointer is valid only while RCU is locked.
467 typedef cds::intrusive::FeldmanHashSet< your_template_params > my_set;
474 foo * p = theSet.get( 5 );
482 value_type * get( hash_type const& hash )
484 assert( gc::is_locked());
485 return search( hash );
488 /// Clears the set (non-atomic)
490 The function unlink all data node from the set.
491 The function is not atomic but is thread-safe.
492 After \p %clear() the set may not be empty because another threads may insert items.
494 For each item the \p disposer is called after unlinking.
498 clear_array( m_Head, head_size() );
501 /// Checks if the set is empty
503 Emptiness is checked by item counting: if item count is zero then the set is empty.
504 Thus, the correct item counting feature is an important part of the set implementation.
511 /// Returns item count in the set
514 return m_ItemCounter;
517 /// Returns const reference to internal statistics
518 stat const& statistics() const
523 /// Returns the size of head node
524 size_t head_size() const
526 return m_Metrics.head_node_size;
529 /// Returns the size of the array node
530 size_t array_node_size() const
532 return m_Metrics.array_node_size;
535 /// Collects tree level statistics into \p stat
536 /** @copydetails cds::intrusive::FeldmanHashSet::get_level_statistics
538 void get_level_statistics(std::vector<feldman_hashset::level_statistics>& stat) const
541 gather_level_statistics(stat, 0, m_Head, head_size());
548 friend class FeldmanHashSet;
551 array_node * m_pNode; ///< current array node
552 size_t m_idx; ///< current position in m_pNode
553 value_type * m_pValue; ///< current value
554 FeldmanHashSet const* m_set; ///< Hash set
557 iterator_base() CDS_NOEXCEPT
564 iterator_base(iterator_base const& rhs) CDS_NOEXCEPT
565 : m_pNode(rhs.m_pNode)
567 , m_pValue(rhs.m_pValue)
571 iterator_base& operator=(iterator_base const& rhs) CDS_NOEXCEPT
573 m_pNode = rhs.m_pNode;
575 m_pValue = rhs.m_pValue;
580 iterator_base& operator++()
586 iterator_base& operator--()
592 bool operator ==(iterator_base const& rhs) const CDS_NOEXCEPT
594 return m_pNode == rhs.m_pNode && m_idx == rhs.m_idx && m_set == rhs.m_set;
597 bool operator !=(iterator_base const& rhs) const CDS_NOEXCEPT
599 return !(*this == rhs);
603 iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx, bool)
610 iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx)
619 value_type * pointer() const CDS_NOEXCEPT
621 assert(gc::is_locked());
627 assert( gc::is_locked());
628 assert(m_set != nullptr);
629 assert(m_pNode != nullptr);
631 size_t const arrayNodeSize = m_set->array_node_size();
632 size_t const headSize = m_set->head_size();
633 array_node * pNode = m_pNode;
634 size_t idx = m_idx + 1;
635 size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize;
638 if (idx < nodeSize) {
639 node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire);
640 if (slot.bits() == flag_array_node) {
641 // array node, go down the tree
642 assert(slot.ptr() != nullptr);
643 pNode = to_array(slot.ptr());
645 nodeSize = arrayNodeSize;
647 else if (slot.bits() == flag_array_converting) {
648 // the slot is converting to array node right now - skip the node
656 m_pValue = slot.ptr();
664 if (pNode->pParent) {
665 idx = pNode->idxParent + 1;
666 pNode = pNode->pParent;
667 nodeSize = pNode->pParent ? arrayNodeSize : headSize;
671 assert(pNode == m_set->m_Head);
672 assert(idx == headSize);
684 assert(gc::is_locked());
685 assert(m_set != nullptr);
686 assert(m_pNode != nullptr);
688 size_t const arrayNodeSize = m_set->array_node_size();
689 size_t const headSize = m_set->head_size();
690 size_t const endIdx = size_t(0) - 1;
692 array_node * pNode = m_pNode;
693 size_t idx = m_idx - 1;
694 size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize;
698 node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire);
699 if (slot.bits() == flag_array_node) {
700 // array node, go down the tree
701 assert(slot.ptr() != nullptr);
702 pNode = to_array(slot.ptr());
703 nodeSize = arrayNodeSize;
706 else if (slot.bits() == flag_array_converting) {
707 // the slot is converting to array node right now - skip the node
715 m_pValue = slot.ptr();
723 if (pNode->pParent) {
724 idx = pNode->idxParent - 1;
725 pNode = pNode->pParent;
726 nodeSize = pNode->pParent ? arrayNodeSize : headSize;
730 assert(pNode == m_set->m_Head);
731 assert(idx == endIdx);
742 template <class Iterator>
743 Iterator init_begin() const
745 return Iterator(*this, m_Head, size_t(0) - 1);
748 template <class Iterator>
749 Iterator init_end() const
751 return Iterator(*this, m_Head, head_size(), false);
754 template <class Iterator>
755 Iterator init_rbegin() const
757 return Iterator(*this, m_Head, head_size());
760 template <class Iterator>
761 Iterator init_rend() const
763 return Iterator(*this, m_Head, size_t(0) - 1, false);
766 /// Bidirectional iterator class
767 template <bool IsConst>
768 class bidirectional_iterator : protected iterator_base
770 friend class FeldmanHashSet;
773 static CDS_CONSTEXPR bool const c_bConstantIterator = IsConst;
776 typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer
777 typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference
780 bidirectional_iterator() CDS_NOEXCEPT
783 bidirectional_iterator(bidirectional_iterator const& rhs) CDS_NOEXCEPT
787 bidirectional_iterator& operator=(bidirectional_iterator const& rhs) CDS_NOEXCEPT
789 iterator_base::operator=(rhs);
793 bidirectional_iterator& operator++()
795 iterator_base::operator++();
799 bidirectional_iterator& operator--()
801 iterator_base::operator--();
805 value_ptr operator ->() const CDS_NOEXCEPT
807 return iterator_base::pointer();
810 value_ref operator *() const CDS_NOEXCEPT
812 value_ptr p = iterator_base::pointer();
817 template <bool IsConst2>
818 bool operator ==(bidirectional_iterator<IsConst2> const& rhs) const CDS_NOEXCEPT
820 return iterator_base::operator==(rhs);
823 template <bool IsConst2>
824 bool operator !=(bidirectional_iterator<IsConst2> const& rhs) const CDS_NOEXCEPT
826 return !(*this == rhs);
830 bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool)
831 : iterator_base(set, pNode, idx, false)
834 bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx)
835 : iterator_base(set, pNode, idx)
839 /// Reverse bidirectional iterator
840 template <bool IsConst>
841 class reverse_bidirectional_iterator : public iterator_base
843 friend class FeldmanHashSet;
846 typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer
847 typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference
850 reverse_bidirectional_iterator() CDS_NOEXCEPT
854 reverse_bidirectional_iterator(reverse_bidirectional_iterator const& rhs) CDS_NOEXCEPT
858 reverse_bidirectional_iterator& operator=(reverse_bidirectional_iterator const& rhs) CDS_NOEXCEPT
860 iterator_base::operator=(rhs);
864 reverse_bidirectional_iterator& operator++()
866 iterator_base::operator--();
870 reverse_bidirectional_iterator& operator--()
872 iterator_base::operator++();
876 value_ptr operator ->() const CDS_NOEXCEPT
878 return iterator_base::pointer();
881 value_ref operator *() const CDS_NOEXCEPT
883 value_ptr p = iterator_base::pointer();
888 template <bool IsConst2>
889 bool operator ==(reverse_bidirectional_iterator<IsConst2> const& rhs) const
891 return iterator_base::operator==(rhs);
894 template <bool IsConst2>
895 bool operator !=(reverse_bidirectional_iterator<IsConst2> const& rhs)
897 return !(*this == rhs);
901 reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool)
902 : iterator_base(set, pNode, idx, false)
905 reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx)
906 : iterator_base(set, pNode, idx, false)
908 iterator_base::backward();
914 #ifdef CDS_DOXYGEN_INVOKED
915 typedef implementation_defined iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional iterator" type
916 typedef implementation_defined const_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional const iterator" type
917 typedef implementation_defined reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse iterator" type
918 typedef implementation_defined const_reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse const iterator" type
920 typedef bidirectional_iterator<false> iterator;
921 typedef bidirectional_iterator<true> const_iterator;
922 typedef reverse_bidirectional_iterator<false> reverse_iterator;
923 typedef reverse_bidirectional_iterator<true> const_reverse_iterator;
926 ///@name Thread-safe iterators
927 /** @anchor cds_intrusive_FeldmanHashSet_rcu_iterators
928 The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment
929 under explicit RCU lock.
930 RCU lock requirement means that inserting or searching is allowed but you must not erase the items from the set
931 since erasing under RCU lock can lead to a deadlock. However, another thread can call \p erase() safely
932 while your thread is iterating.
934 A typical example is:
939 uint32_t payload; // only for example
941 struct set_traits: cds::intrusive::feldman_hashset::traits
943 struct hash_accessor {
944 uint32_t operator()( foo const& src ) const
951 typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu;
952 typedef cds::intrusive::FeldmanHashSet< rcu, foo, set_traits > set_type;
958 // iterate over the set
961 typename set_type::rcu_lock l; // scoped RCU lock
964 for ( auto i = s.begin(); i != s.end(); ++i ) {
965 // deal with i. Remember, erasing is prohibited here!
968 } // at this point RCU lock is released
971 Each iterator object supports the common interface:
972 - dereference operators:
974 value_type [const] * operator ->() noexcept
975 value_type [const] & operator *() noexcept
977 - pre-increment and pre-decrement. Post-operators is not supported
978 - equality operators <tt>==</tt> and <tt>!=</tt>.
979 Iterators are equal iff they point to the same cell of the same array node.
980 Note that for two iterators \p it1 and \p it2 the condition <tt> it1 == it2 </tt>
981 does not entail <tt> &(*it1) == &(*it2) </tt>: welcome to concurrent containers
983 @note It is possible the item can be iterated more that once, for example, if an iterator points to the item
984 in an array node that is being splitted.
988 /// Returns an iterator to the beginning of the set
991 return iterator(*this, m_Head, size_t(0) - 1);
994 /// Returns an const iterator to the beginning of the set
995 const_iterator begin() const
997 return const_iterator(*this, m_Head, size_t(0) - 1);
1000 /// Returns an const iterator to the beginning of the set
1001 const_iterator cbegin()
1003 return const_iterator(*this, m_Head, size_t(0) - 1);
1006 /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
1009 return iterator(*this, m_Head, head_size(), false);
1012 /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
1013 const_iterator end() const
1015 return const_iterator(*this, m_Head, head_size(), false);
1018 /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
1019 const_iterator cend()
1021 return const_iterator(*this, m_Head, head_size(), false);
1024 /// Returns a reverse iterator to the first element of the reversed set
1025 reverse_iterator rbegin()
1027 return reverse_iterator(*this, m_Head, head_size());
1030 /// Returns a const reverse iterator to the first element of the reversed set
1031 const_reverse_iterator rbegin() const
1033 return const_reverse_iterator(*this, m_Head, head_size());
1036 /// Returns a const reverse iterator to the first element of the reversed set
1037 const_reverse_iterator crbegin()
1039 return const_reverse_iterator(*this, m_Head, head_size());
1042 /// Returns a reverse iterator to the element following the last element of the reversed set
1044 It corresponds to the element preceding the first element of the non-reversed container.
1045 This element acts as a placeholder, attempting to access it results in undefined behavior.
1047 reverse_iterator rend()
1049 return reverse_iterator(*this, m_Head, size_t(0) - 1, false);
1052 /// Returns a const reverse iterator to the element following the last element of the reversed set
1054 It corresponds to the element preceding the first element of the non-reversed container.
1055 This element acts as a placeholder, attempting to access it results in undefined behavior.
1057 const_reverse_iterator rend() const
1059 return const_reverse_iterator(*this, m_Head, size_t(0) - 1, false);
1062 /// Returns a const reverse iterator to the element following the last element of the reversed set
1064 It corresponds to the element preceding the first element of the non-reversed container.
1065 This element acts as a placeholder, attempting to access it results in undefined behavior.
1067 const_reverse_iterator crend()
1069 return const_reverse_iterator(*this, m_Head, size_t(0) - 1, false);
1075 template <typename Func>
1076 std::pair<bool, bool> do_update(value_type& val, Func f, bool bInsert = true)
1078 hash_type const& hash = hash_accessor()(val);
1079 hash_splitter splitter(hash);
1080 hash_comparator cmp;
1083 array_node * pArr = m_Head;
1084 size_t nSlot = splitter.cut(m_Metrics.head_node_size_log);
1085 assert(nSlot < m_Metrics.head_node_size);
1086 size_t nOffset = m_Metrics.head_node_size_log;
1090 node_ptr slot = pArr->nodes[nSlot].load(memory_model::memory_order_acquire);
1091 if (slot.bits() == flag_array_node) {
1092 // array node, go down the tree
1093 assert(slot.ptr() != nullptr);
1094 nSlot = splitter.cut(m_Metrics.array_node_size_log);
1095 assert(nSlot < m_Metrics.array_node_size);
1096 pArr = to_array(slot.ptr());
1097 nOffset += m_Metrics.array_node_size_log;
1100 else if (slot.bits() == flag_array_converting) {
1101 // the slot is converting to array node right now
1103 m_Stat.onSlotConverting();
1107 assert(slot.bits() == 0);
1109 value_type * pOld = nullptr;
1113 if ( pArr->nodes[nSlot].load(memory_model::memory_order_acquire) == slot ) {
1115 if (cmp(hash, hash_accessor()(*slot.ptr())) == 0) {
1116 // the item with that hash value already exists
1117 // Replace it with val
1118 if (slot.ptr() == &val) {
1119 m_Stat.onUpdateExisting();
1120 return std::make_pair(true, false);
1123 if (pArr->nodes[nSlot].compare_exchange_strong(slot, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) {
1124 // slot can be disposed
1127 m_Stat.onUpdateExisting();
1128 goto update_existing_done;
1131 m_Stat.onUpdateRetry();
1135 // the slot must be expanded
1136 expand_slot(pArr, nSlot, slot, nOffset);
1139 // the slot is empty, try to insert data node
1142 if (pArr->nodes[nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed))
1144 // the new data node has been inserted
1147 m_Stat.onUpdateNew();
1148 m_Stat.height(nHeight);
1149 return std::make_pair(true, true);
1153 m_Stat.onUpdateFailed();
1154 return std::make_pair(false, false);
1157 // insert failed - slot has been changed by another thread
1159 m_Stat.onUpdateRetry();
1163 m_Stat.onSlotChanged();
1168 update_existing_done:
1170 gc::template retire_ptr<disposer>( pOld );
1171 return std::make_pair(true, false);
1176 template <typename Predicate>
1177 value_type * do_erase( hash_type const& hash, Predicate pred)
1179 assert(gc::is_locked());
1181 hash_splitter splitter(hash);
1182 hash_comparator cmp;
1185 array_node * pArr = m_Head;
1186 size_t nSlot = splitter.cut(m_Metrics.head_node_size_log);
1187 assert(nSlot < m_Metrics.head_node_size);
1190 node_ptr slot = pArr->nodes[nSlot].load(memory_model::memory_order_acquire);
1191 if (slot.bits() == flag_array_node) {
1192 // array node, go down the tree
1193 assert(slot.ptr() != nullptr);
1194 nSlot = splitter.cut(m_Metrics.array_node_size_log);
1195 assert(nSlot < m_Metrics.array_node_size);
1196 pArr = to_array(slot.ptr());
1198 else if (slot.bits() == flag_array_converting) {
1199 // the slot is converting to array node right now
1201 m_Stat.onSlotConverting();
1205 assert(slot.bits() == 0);
1207 if ( pArr->nodes[nSlot].load(memory_model::memory_order_acquire) == slot ) {
1209 if (cmp(hash, hash_accessor()(*slot.ptr())) == 0 && pred(*slot.ptr())) {
1210 // item found - replace it with nullptr
1211 if (pArr->nodes[nSlot].compare_exchange_strong(slot, node_ptr(nullptr), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) {
1213 m_Stat.onEraseSuccess();
1217 m_Stat.onEraseRetry();
1220 m_Stat.onEraseFailed();
1224 // the slot is empty
1225 m_Stat.onEraseFailed();
1230 m_Stat.onSlotChanged();
1235 value_type * search(hash_type const& hash )
1237 assert( gc::is_locked() );
1239 hash_splitter splitter(hash);
1240 hash_comparator cmp;
1243 array_node * pArr = m_Head;
1244 size_t nSlot = splitter.cut(m_Metrics.head_node_size_log);
1245 assert(nSlot < m_Metrics.head_node_size);
1248 node_ptr slot = pArr->nodes[nSlot].load(memory_model::memory_order_acquire);
1249 if (slot.bits() == flag_array_node) {
1250 // array node, go down the tree
1251 assert(slot.ptr() != nullptr);
1252 nSlot = splitter.cut(m_Metrics.array_node_size_log);
1253 assert(nSlot < m_Metrics.array_node_size);
1254 pArr = to_array(slot.ptr());
1256 else if (slot.bits() == flag_array_converting) {
1257 // the slot is converting to array node right now
1259 m_Stat.onSlotConverting();
1263 assert(slot.bits() == 0);
1265 // protect data node by hazard pointer
1266 if ( pArr->nodes[nSlot].load(memory_model::memory_order_acquire) != slot) {
1267 // slot value has been changed - retry
1268 m_Stat.onSlotChanged();
1270 else if (slot.ptr() && cmp(hash, hash_accessor()(*slot.ptr())) == 0) {
1272 m_Stat.onFindSuccess();
1275 m_Stat.onFindFailed();
1285 array_node * alloc_head_node() const
1287 return alloc_array_node(head_size(), nullptr, 0);
1290 array_node * alloc_array_node(array_node * pParent, size_t idxParent) const
1292 return alloc_array_node(array_node_size(), pParent, idxParent);
1295 static array_node * alloc_array_node(size_t nSize, array_node * pParent, size_t idxParent)
1297 array_node * pNode = cxx_array_node_allocator().NewBlock(sizeof(array_node) + sizeof(atomic_node_ptr) * (nSize - 1), pParent, idxParent);
1298 new (pNode->nodes) atomic_node_ptr[nSize];
1302 static void free_array_node(array_node * parr)
1304 cxx_array_node_allocator().Delete(parr);
1309 // The function is not thread-safe. For use in dtor only
1313 // Destroy all array nodes
1314 destroy_array_nodes(m_Head, head_size());
1317 void destroy_array_nodes(array_node * pArr, size_t nSize)
1319 for (atomic_node_ptr * p = pArr->nodes, *pLast = pArr->nodes + nSize; p != pLast; ++p) {
1320 node_ptr slot = p->load(memory_model::memory_order_relaxed);
1321 if (slot.bits() == flag_array_node) {
1322 destroy_array_nodes(to_array(slot.ptr()), array_node_size());
1323 free_array_node(to_array(slot.ptr()));
1324 p->store(node_ptr(), memory_model::memory_order_relaxed);
1329 void gather_level_statistics(std::vector<feldman_hashset::level_statistics>& stat, size_t nLevel, array_node * pArr, size_t nSize) const
1331 if (stat.size() <= nLevel) {
1332 stat.resize(nLevel + 1);
1333 stat[nLevel].node_capacity = nSize;
1336 ++stat[nLevel].array_node_count;
1337 for (atomic_node_ptr * p = pArr->nodes, *pLast = pArr->nodes + nSize; p != pLast; ++p) {
1338 node_ptr slot = p->load(memory_model::memory_order_relaxed);
1339 if (slot.bits() == flag_array_node) {
1340 ++stat[nLevel].array_cell_count;
1341 gather_level_statistics(stat, nLevel + 1, to_array(slot.ptr()), array_node_size());
1343 else if (slot.bits() == flag_array_converting)
1344 ++stat[nLevel].array_cell_count;
1345 else if (slot.ptr())
1346 ++stat[nLevel].data_cell_count;
1348 ++stat[nLevel].empty_cell_count;
1352 void clear_array(array_node * pArrNode, size_t nSize)
1357 for (atomic_node_ptr * pArr = pArrNode->nodes, *pLast = pArr + nSize; pArr != pLast; ++pArr) {
1359 node_ptr slot = pArr->load(memory_model::memory_order_acquire);
1360 if (slot.bits() == flag_array_node) {
1361 // array node, go down the tree
1362 assert(slot.ptr() != nullptr);
1363 clear_array(to_array(slot.ptr()), array_node_size());
1366 else if (slot.bits() == flag_array_converting) {
1367 // the slot is converting to array node right now
1368 while ((slot = pArr->load(memory_model::memory_order_acquire)).bits() == flag_array_converting) {
1370 m_Stat.onSlotConverting();
1374 assert(slot.ptr() != nullptr);
1375 assert(slot.bits() == flag_array_node);
1376 clear_array(to_array(slot.ptr()), array_node_size());
1381 if (pArr->compare_exchange_strong(slot, node_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) {
1383 gc::template retire_ptr<disposer>(slot.ptr());
1385 m_Stat.onEraseSuccess();
1394 bool expand_slot(array_node * pParent, size_t idxParent, node_ptr current, size_t nOffset)
1396 assert(current.bits() == 0);
1397 assert(current.ptr());
1399 size_t idx = hash_splitter(hash_accessor()(*current.ptr()), nOffset).cut(m_Metrics.array_node_size_log);
1400 array_node * pArr = alloc_array_node(pParent, idxParent);
1402 node_ptr cur(current.ptr());
1403 atomic_node_ptr& slot = pParent->nodes[idxParent];
1404 if (!slot.compare_exchange_strong(cur, cur | flag_array_converting, memory_model::memory_order_release, atomics::memory_order_relaxed))
1406 m_Stat.onExpandNodeFailed();
1407 free_array_node(pArr);
1411 pArr->nodes[idx].store(current, memory_model::memory_order_release);
1413 cur = cur | flag_array_converting;
1415 slot.compare_exchange_strong(cur, node_ptr(to_node(pArr), flag_array_node), memory_model::memory_order_release, atomics::memory_order_relaxed)
1418 m_Stat.onExpandNodeSuccess();
1419 m_Stat.onArrayNodeCreated();
1427 converter(value_type * p)
1431 converter(array_node * p)
1436 static array_node * to_array(value_type * p)
1438 return converter(p).pArr;
1440 static value_type * to_node(array_node * p)
1442 return converter(p).pData;
1447 }} // namespace cds::intrusive
1449 #endif // #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H