3 #ifndef CDSLIB_INTRUSIVE_IMPL_MULTILEVEL_HASHSET_H
4 #define CDSLIB_INTRUSIVE_IMPL_MULTILEVEL_HASHSET_H
6 #include <tuple> // std::tie
7 #include <functional> // std::ref
9 #include <cds/intrusive/details/multilevel_hashset_base.h>
10 #include <cds/details/allocator.h>
12 namespace cds { namespace intrusive {
13 /// Intrusive hash set based on multi-level array
14 /** @ingroup cds_intrusive_map
15 @anchor cds_intrusive_MultilevelHashSet_hp
18 - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays:
19 Wait-free Extensible Hash Maps"
21 [From the paper] The hardest problem encountered while developing a parallel hash map is how to perform
22 a global resize, the process of redistributing the elements in a hash map that occurs when adding new
23 buckets. The negative impact of blocking synchronization is multiplied during a global resize, because all
24 threads will be forced to wait on the thread that is performing the involved process of resizing the hash map
25 and redistributing the elements. \p %MultilevelHashSet implementation avoids global resizes through new array
26 allocation. By allowing concurrent expansion this structure is free from the overhead of an explicit resize,
27 which facilitates concurrent operations.
29 The presented design includes dynamic hashing, the use of sub-arrays within the hash map data structure;
30 which, in combination with <b>perfect hashing</b>, means that each element has a unique final, as well as current, position.
31 It is important to note that the perfect hash function required by our hash map is trivial to realize as
32 any hash function that permutes the bits of the key is suitable. This is possible because of our approach
33 to the hash function; we require that it produces hash values that are equal in size to that of the key.
34 We know that if we expand the hash map a fixed number of times there can be no collision as duplicate keys
35 are not provided for in the standard semantics of a hash map.
37 \p %MultiLevelHashSet is a multi-level array whitch has a structure similar to a tree:
38 @image html multilevel_hashset.png
39 The multi-level array differs from a tree in that each position on the tree could hold an array of nodes or a single node.
40 A position that holds a single node is a \p dataNode which holds the hash value of a key and the value that is associated
\r
41 with that key; it is a simple struct holding two variables. A \p dataNode in the multi-level array could be marked.
\r
42 A \p markedDataNode refers to a pointer to a \p dataNode that has been bitmarked at the least significant bit (LSB)
\r
43 of the pointer to the node. This signifies that this \p dataNode is contended. An expansion must occur at this node;
\r
44 any thread that sees this \p markedDataNode will try to replace it with an \p arrayNode; which is a position that holds
\r
45 an array of nodes. The pointer to an \p arrayNode is differentiated from that of a pointer to a \p dataNode by a bitmark
\r
46 on the second-least significant bit.
\r
48 \p %MultiLevelHashSet multi-level array is similar to a tree in that we keep a pointer to the root, which is a memory array
\r
49 called \p head. The length of the \p head memory array is unique, whereas every other \p arrayNode has a uniform length;
\r
50 a normal \p arrayNode has a fixed power-of-two length equal to the binary logarithm of a variable called \p arrayLength.
\r
51 The maximum depth of the tree, \p maxDepth, is the maximum number of pointers that must be followed to reach any node.
\r
52 We define \p currentDepth as the number of memory arrays that we need to traverse to reach the \p arrayNode on which
\r
53 we need to operate; this is initially one, because of \p head.
\r
55 That approach to the structure of the hash map uses an extensible hashing scheme; <b> the hash value is treated as a bit
\r
56 string</b> and rehash incrementally.
\r
58 @note Two important things you should keep in mind when you're using \p %MultiLevelHashSet:
\r
59 - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %MultiLevelHashSet.
\r
60 Instead, for the strings you should use well-known hashing algorithms like <a href="https://en.wikipedia.org/wiki/Secure_Hash_Algorithm">SHA1, SHA2</a>,
\r
61 <a href="https://en.wikipedia.org/wiki/MurmurHash">MurmurHash</a>, <a href="https://en.wikipedia.org/wiki/CityHash">CityHash</a>
\r
62 or its successor <a href="https://code.google.com/p/farmhash/">FarmHash</a> and so on, which
\r
63 converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %MultiLevelHashSet.
\r
64 - \p %MultiLevelHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string,
\r
65 have identical hash then you cannot insert both that keys in the set. \p %MultiLevelHashSet does not maintain the key,
\r
66 it maintains its fixed-size hash value.
\r
68 Template parameters:
\r
69 - \p GC - safe memory reclamation schema. Can be \p gc::HP, \p gc::DHP or one of \ref cds_urcu_type "RCU type"
\r
70 - \p T - a value type to be stored in the set
\r
71 - \p Traits - type traits, the structure based on \p multilevel_hashset::traits or result of \p multilevel_hashset::make_traits metafunction
\r
73 There are several specializations of \p %MultiLevelHashSet for each \p GC. You should include:
74 - <tt><cds/intrusive/multilevel_hashset_hp.h></tt> for \p gc::HP garbage collector
75 - <tt><cds/intrusive/multilevel_hashset_dhp.h></tt> for \p gc::DHP garbage collector
76 - <tt><cds/intrusive/multilevel_hashset_rcu.h></tt> for \ref cds_intrusive_MultiLevelHashSet_rcu "RCU type"
81 #ifdef CDS_DOXYGEN_INVOKED
82 ,typename Traits = multilevel_hashset::traits
87 class MultiLevelHashSet
90 typedef GC gc; ///< Garbage collector
91 typedef T value_type; ///< type of value stored in the set
92 typedef Traits traits; ///< Traits template parameter, see \p multilevel_hashset::traits
94 typedef typename traits::hash_accessor hash_accessor; ///< Hash accessor functor
95 static_assert(!std::is_same< hash_accessor, cds::opt::none >::value, "hash_accessor functor must be specified" );
97 /// Hash type defined as \p hash_accessor return type
98 typedef typename std::decay<
99 typename std::remove_reference<
100 decltype( hash_accessor()( std::declval<T>()) )
103 static_assert( !std::is_pointer<hash_type>::value, "hash_accessor should return a reference to hash value" );
105 typedef typename traits::disposer disposer; ///< data node disposer
107 # ifdef CDS_DOXYGEN_INVOKED
108 typedef implementation_defined hash_comparator ; ///< hash compare functor based on opt::compare and opt::less option setter
110 typedef typename cds::opt::details::make_comparator_from<
113 multilevel_hashset::bitwise_compare< hash_type >
114 >::type hash_comparator;
117 typedef typename traits::item_counter item_counter; ///< Item counter type
118 typedef typename traits::node_allocator node_allocator; ///< Array node allocator
119 typedef typename traits::memory_model memory_model; ///< Memory model
120 typedef typename traits::back_off back_off; ///< Backoff strategy
121 typedef typename traits::stat stat; ///< Internal statistics type
123 typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer
125 /// Count of hazard pointers required
126 static CDS_CONSTEXPR size_t const c_nHazardPtrCount = 1;
128 /// Node marked poiter
129 typedef cds::details::marked_ptr< value_type, 3 > node_ptr;
130 /// Array node element
131 typedef atomics::atomic< node_ptr > atomic_node_ptr;
136 flag_array_converting = 1, ///< the cell is converting from data node to an array node
137 flag_array_node = 2 ///< the cell is a pointer to an array node
141 array_node * const pParent; ///< parent array node
142 size_t const idxParent; ///< index in parent array node
143 atomic_node_ptr nodes[1]; ///< node array
145 array_node( array_node * parent, size_t idx )
150 array_node() = delete;
151 array_node( array_node const&) = delete;
152 array_node( array_node&& ) = delete;
155 typedef cds::details::Allocator< array_node, node_allocator > cxx_array_node_allocator;
157 typedef multilevel_hashset::details::hash_splitter< hash_type > hash_splitter;
160 size_t head_node_size; // power-of-two
161 size_t head_node_size_log; // log2( head_node_size )
162 size_t array_node_size; // power-of-two
163 size_t array_node_size_log;// log2( array_node_size )
169 metrics const m_Metrics; ///< Metrics
170 array_node * m_Head; ///< Head array
171 item_counter m_ItemCounter; ///< Item counter
172 stat m_Stat; ///< Internal statistics
176 /// Creates empty set
178 @param head_bits: 2<sup>head_bits</sup> specifies the size of head array, minimum is 4.
179 @param array_bits: 2<sup>array_bits</sup> specifies the size of array node, minimum is 2.
181 Equation for \p head_bits and \p array_bits:
183 sizeof(hash_type) * 8 == head_bits + N * array_bits
185 where \p N is multi-level array depth.
187 MultiLevelHashSet( size_t head_bits = 8, size_t array_bits = 4 )
188 : m_Metrics(make_metrics( head_bits, array_bits ))
189 , m_Head( alloc_head_node())
192 /// Destructs the set and frees all data
196 free_array_node( m_Head );
201 The function inserts \p val in the set if it does not contain
202 an item with that hash.
204 Returns \p true if \p val is placed into the set, \p false otherwise.
206 bool insert( value_type& val )
208 return insert( val, [](value_type&) {} );
213 This function is intended for derived non-intrusive containers.
215 The function allows to split creating of new item into two part:
216 - create item with key only
217 - insert new item into the set
218 - if inserting is success, calls \p f functor to initialize \p val.
220 The functor signature is:
222 void func( value_type& val );
224 where \p val is the item inserted.
226 The user-defined functor is called only if the inserting is success.
228 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting".
230 template <typename Func>
231 bool insert( value_type& val, Func f )
233 hash_type const& hash = hash_accessor()( val );
234 hash_splitter splitter( hash );
236 typename gc::Guard guard;
239 size_t nOffset = m_Metrics.head_node_size_log;
240 array_node * pArr = m_Head;
241 size_t nSlot = splitter.cut( m_Metrics.head_node_size_log );
242 assert( nSlot < m_Metrics.head_node_size );
245 node_ptr slot = pArr->nodes[nSlot].load( memory_model::memory_order_acquire );
246 if ( slot.bits() == flag_array_node ) {
247 // array node, go down the tree
248 assert( slot.ptr() != nullptr );
249 nSlot = splitter.cut( m_Metrics.array_node_size_log );
250 assert( nSlot < m_Metrics.array_node_size );
251 pArr = to_array( slot.ptr() );
252 nOffset += m_Metrics.array_node_size_log;
254 else if ( slot.bits() == flag_array_converting ) {
255 // the slot is converting to array node right now
257 m_Stat.onSlotConverting();
261 assert(slot.bits() == 0 );
263 // protect data node by hazard pointer
264 if ( guard.protect( pArr->nodes[nSlot], [](node_ptr p) -> value_type * { return p.ptr(); }) != slot ) {
265 // slot value has been changed - retry
266 m_Stat.onSlotChanged();
268 else if ( slot.ptr() ) {
269 if ( cmp( hash, hash_accessor()( *slot.ptr() )) == 0 ) {
270 // the item with that hash value already exists
271 m_Stat.onInsertFailed();
275 // the slot must be expanded
276 expand_slot( pArr, nSlot, slot, nOffset );
279 // the slot is empty, try to insert data node
281 if ( pArr->nodes[nSlot].compare_exchange_strong( pNull, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
283 // the new data node has been inserted
286 m_Stat.onInsertSuccess();
290 // insert failed - slot has been changed by another thread
292 m_Stat.onInsertRetry();
300 Performs inserting or updating the item with hash value equal to \p val.
301 - If hash value is found then existing item is replaced with \p val, old item is disposed
302 with \p Traits::disposer. Note that the disposer is called by \p GC asynchronously.
303 The function returns <tt> std::pair<true, false> </tt>
304 - If hash value is not found and \p bInsert is \p true then \p val is inserted,
305 the function returns <tt> std::pair<true, true> </tt>
306 - If hash value is not found and \p bInsert is \p false then the set is unchanged,
307 the function returns <tt> std::pair<false, false> </tt>
309 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull
310 (i.e. the item has been inserted or updated),
311 \p second is \p true if new item has been added or \p false if the set contains that hash.
313 std::pair<bool, bool> update( value_type& val, bool bInsert = true )
315 hash_type const& hash = hash_accessor()( val );
316 hash_splitter splitter( hash );
318 typename gc::Guard guard;
321 array_node * pArr = m_Head;
322 size_t nSlot = splitter.cut( m_Metrics.head_node_size_log );
323 assert( nSlot < m_Metrics.head_node_size );
324 size_t nOffset = m_Metrics.head_node_size_log;
327 node_ptr slot = pArr->nodes[nSlot].load( memory_model::memory_order_acquire );
328 if ( slot.bits() == flag_array_node ) {
329 // array node, go down the tree
330 assert( slot.ptr() != nullptr );
331 nSlot = splitter.cut( m_Metrics.array_node_size_log );
332 assert( nSlot < m_Metrics.array_node_size );
333 pArr = to_array( slot.ptr() );
334 nOffset += m_Metrics.array_node_size_log;
336 else if ( slot.bits() == flag_array_converting ) {
337 // the slot is converting to array node right now
339 m_Stat.onSlotConverting();
343 assert(slot.bits() == 0 );
345 // protect data node by hazard pointer
346 if ( guard.protect( pArr->nodes[nSlot], [](node_ptr p) -> value_type * { return p.ptr(); }) != slot ) {
347 // slot value has been changed - retry
348 m_Stat.onSlotChanged();
350 else if ( slot.ptr() ) {
351 if ( cmp( hash, hash_accessor()( *slot.ptr() )) == 0 ) {
352 // the item with that hash value already exists
353 // Replace it with val
354 if ( slot.ptr() == &val ) {
355 m_Stat.onUpdateExisting();
356 return std::make_pair( true, false );
359 if ( pArr->nodes[nSlot].compare_exchange_strong( slot, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
360 // slot can be disposed
361 gc::template retire<disposer>( slot.ptr() );
362 m_Stat.onUpdateExisting();
363 return std::make_pair( true, false );
366 m_Stat.onUpdateRetry();
370 // the slot must be expanded
371 expand_slot( pArr, nSlot, slot, nOffset );
374 // the slot is empty, try to insert data node
377 if ( pArr->nodes[nSlot].compare_exchange_strong( pNull, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
379 // the new data node has been inserted
381 m_Stat.onUpdateNew();
382 return std::make_pair( true, true );
386 m_Stat.onUpdateFailed();
387 return std::make_pair( false, false );
390 // insert failed - slot has been changed by another thread
392 m_Stat.onUpdateRetry();
398 /// Unlinks the item \p val from the set
400 The function searches the item \p val in the set and unlink it
401 if it is found and its address is equal to <tt>&val</tt>.
403 The function returns \p true if success and \p false otherwise.
405 bool unlink( value_type const& val )
407 typename gc::Guard guard;
408 auto pred = [&val](value_type const& item) -> bool { return &item == &val; };
409 value_type * p = do_erase( hash_accessor()( val ), guard, std::ref( pred ));
411 // p is guarded by HP
413 gc::template retire<disposer>( p );
415 m_Stat.onEraseSuccess();
421 /// Deletes the item from the set
423 The function searches \p hash in the set,
424 unlinks the item found, and returns \p true.
425 If that item is not found the function returns \p false.
427 The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously.
430 bool erase( hash_type const& hash )
432 return erase(hash, [](value_type const&) {} );
435 /// Deletes the item from the set
437 The function searches \p hash in the set,
438 call \p f functor with item found, and unlinks it from the set.
439 The \ref disposer specified in \p Traits is called
440 by garbage collector \p GC asynchronously.
442 The \p Func interface is
445 void operator()( value_type& item );
449 If \p hash is not found the function returns \p false.
451 template <typename Func>
452 bool erase( hash_type const& hash, Func f )
454 typename gc::Guard guard;
455 value_type * p = do_erase( hash, guard, []( value_type const&) -> bool {return true; } );
457 // p is guarded by HP
459 gc::template retire<disposer>( p );
462 m_Stat.onEraseSuccess();
468 /// Extracts the item with specified \p hash
470 The function searches \p hash in the set,
471 unlinks it from the set, and returns an guarded pointer to the item extracted.
472 If \p hash is not found the function returns an empty guarded pointer.
474 The \p disposer specified in \p Traits class' template parameter is called automatically
475 by garbage collector \p GC when returned \ref guarded_ptr object to be destroyed or released.
476 @note Each \p guarded_ptr object uses the GC's guard that can be limited resource.
480 typedef cds::intrusive::MultiLevelHashSet< your_template_args > my_set;
484 my_set::guarded_ptr gp( theSet.extract( 5 ));
489 // Destructor of gp releases internal HP guard
493 guarded_ptr extract( hash_type const& hash )
497 typename gc::Guard guard;
498 value_type * p = do_erase( hash, guard, []( value_type const&) -> bool {return true; } );
500 // p is guarded by HP
502 gc::template retire<disposer>( p );
504 m_Stat.onEraseSuccess();
511 /// Finds an item by it's \p hash
513 The function searches the item by \p hash and calls the functor \p f for item found.
514 The interface of \p Func functor is:
517 void operator()( value_type& item );
520 where \p item is the item found.
522 The functor may change non-key fields of \p item. Note that the functor is only guarantee
523 that \p item cannot be disposed during the functor is executing.
524 The functor does not serialize simultaneous access to the set's \p item. If such access is
525 possible you must provide your own synchronization schema on item level to prevent unsafe item modifications.
527 The function returns \p true if \p hash is found, \p false otherwise.
529 template <typename Func>
530 bool find( hash_type const& hash, Func f )
532 typename gc::Guard guard;
533 value_type * p = search( hash, guard );
535 // p is guarded by HP
543 /// Checks whether the set contains \p hash
545 The function searches the item by its \p hash
546 and returns \p true if it is found, or \p false otherwise.
548 bool contains( hash_type const& hash )
550 return find( hash, [](value_type&) {} );
553 /// Finds an item by it's \p hash and returns the item found
555 The function searches the item by its \p hash
556 and returns the guarded pointer to the item found.
557 If \p hash is not found the function returns an empty \p guarded_ptr.
559 @note Each \p guarded_ptr object uses one GC's guard which can be limited resource.
563 typedef cds::intrusive::MultiLevelHashSet< your_template_params > my_set;
567 my_set::guarded_ptr gp( theSet.get( 5 ));
568 if ( theSet.get( 5 )) {
572 // Destructor of guarded_ptr releases internal HP guard
576 guarded_ptr get( hash_type const& hash )
580 typename gc::Guard guard;
581 gp.reset( search( hash, guard ));
586 /// Clears the set (non-atomic)
588 The function unlink all data node from the set.
589 The function is not atomic but is thread-safe.
590 After \p %clear() the set may not be empty because another threads may insert items.
592 For each item the \p disposer is called after unlinking.
596 clear_array( m_Head, head_size() );
599 /// Checks if the set is empty
601 Emptiness is checked by item counting: if item count is zero then the set is empty.
602 Thus, the correct item counting feature is an important part of the set implementation.
609 /// Returns item count in the set
612 return m_ItemCounter;
615 /// Returns const reference to internal statistics
616 stat const& statistics() const
621 /// Returns the size of head node
622 size_t head_size() const
624 return m_Metrics.head_node_size;
627 /// Returns the size of the array node
628 size_t array_node_size() const
630 return m_Metrics.array_node_size;
635 static metrics make_metrics( size_t head_bits, size_t array_bits )
637 size_t const hash_bits = sizeof( hash_type ) * 8;
639 if ( array_bits < 2 )
643 if ( head_bits > hash_bits )
644 head_bits = hash_bits;
645 if ( (hash_bits - head_bits) % array_bits != 0 )
646 head_bits += (hash_bits - head_bits) % array_bits;
648 assert( (hash_bits - head_bits) % array_bits == 0 );
651 m.head_node_size_log = head_bits;
652 m.head_node_size = size_t(1) << head_bits;
653 m.array_node_size_log = array_bits;
654 m.array_node_size = size_t(1) << array_bits;
658 array_node * alloc_head_node() const
660 return alloc_array_node( head_size(), nullptr, 0 );
663 array_node * alloc_array_node( array_node * pParent, size_t idxParent ) const
665 return alloc_array_node( array_node_size(), pParent, idxParent );
668 static array_node * alloc_array_node( size_t nSize, array_node * pParent, size_t idxParent )
670 array_node * pNode = cxx_array_node_allocator().NewBlock( sizeof(array_node) + sizeof(atomic_node_ptr) * (nSize - 1), pParent, idxParent );
671 for ( atomic_node_ptr * p = pNode->nodes, *pEnd = pNode->nodes + nSize; p < pEnd; ++p )
672 p->store( node_ptr(), memory_model::memory_order_release );
676 static void free_array_node( array_node * parr )
678 cxx_array_node_allocator().Delete( parr );
683 // The function is not thread-safe. For use in dtor only
687 // Destroy all array nodes
688 destroy_array_nodes( m_Head, head_size());
691 void destroy_array_nodes( array_node * pArr, size_t nSize )
693 for ( atomic_node_ptr * p = pArr->nodes, *pLast = pArr->nodes + nSize; p != pLast; ++p ) {
694 node_ptr slot = p->load( memory_model::memory_order_acquire );
695 if ( slot.bits() == flag_array_node ) {
696 destroy_array_nodes(to_array(slot.ptr()), array_node_size());
697 free_array_node( to_array(slot.ptr()));
698 p->store(node_ptr(), memory_model::memory_order_relaxed );
703 void clear_array( array_node * pArrNode, size_t nSize )
708 for ( atomic_node_ptr * pArr = pArrNode->nodes, *pLast = pArr + nSize; pArr != pLast; ++pArr ) {
710 node_ptr slot = pArr->load( memory_model::memory_order_acquire );
711 if ( slot.bits() == flag_array_node ) {
712 // array node, go down the tree
713 assert( slot.ptr() != nullptr );
714 clear_array( to_array( slot.ptr()), array_node_size() );
717 else if ( slot.bits() == flag_array_converting ) {
718 // the slot is converting to array node right now
719 while ( (slot = pArr->load(memory_model::memory_order_acquire)).bits() == flag_array_converting ) {
721 m_Stat.onSlotConverting();
725 assert( slot.ptr() != nullptr );
726 assert(slot.bits() == flag_array_node );
727 clear_array( to_array( slot.ptr()), array_node_size() );
732 if ( pArr->compare_exchange_strong( slot, node_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
734 gc::template retire<disposer>( slot.ptr() );
736 m_Stat.onEraseSuccess();
749 converter( value_type * p )
753 converter( array_node * p )
758 static array_node * to_array( value_type * p )
760 return converter( p ).pArr;
762 static value_type * to_node( array_node * p )
764 return converter( p ).pData;
767 bool expand_slot( array_node * pParent, size_t idxParent, node_ptr current, size_t nOffset )
769 assert( current.bits() == 0 );
770 assert( current.ptr() );
772 size_t idx = hash_splitter(hash_accessor()(*current.ptr()), nOffset).cut( m_Metrics.array_node_size_log );
773 array_node * pArr = alloc_array_node( pParent, idxParent );
775 node_ptr cur(current.ptr());
776 atomic_node_ptr& slot = pParent->nodes[idxParent];
777 if ( !slot.compare_exchange_strong( cur, cur | flag_array_converting, memory_model::memory_order_release, atomics::memory_order_relaxed ))
779 m_Stat.onExpandNodeFailed();
780 free_array_node( pArr );
784 pArr->nodes[idx].store( current, memory_model::memory_order_release );
786 cur = cur | flag_array_converting;
788 slot.compare_exchange_strong( cur, node_ptr( to_node( pArr ), flag_array_node ), memory_model::memory_order_release, atomics::memory_order_relaxed )
791 m_Stat.onArrayNodeCreated();
795 value_type * search( hash_type const& hash, typename gc::Guard& guard )
797 hash_splitter splitter( hash );
801 array_node * pArr = m_Head;
802 size_t nSlot = splitter.cut( m_Metrics.head_node_size_log );
803 assert( nSlot < m_Metrics.head_node_size );
806 node_ptr slot = pArr->nodes[nSlot].load( memory_model::memory_order_acquire );
807 if ( slot.bits() == flag_array_node ) {
808 // array node, go down the tree
809 assert( slot.ptr() != nullptr );
810 nSlot = splitter.cut( m_Metrics.array_node_size_log );
811 assert( nSlot < m_Metrics.array_node_size );
812 pArr = to_array( slot.ptr() );
814 else if ( slot.bits() == flag_array_converting ) {
815 // the slot is converting to array node right now
817 m_Stat.onSlotConverting();
821 assert(slot.bits() == 0 );
823 // protect data node by hazard pointer
824 if ( guard.protect( pArr->nodes[nSlot], [](node_ptr p) -> value_type * { return p.ptr(); }) != slot ) {
825 // slot value has been changed - retry
826 m_Stat.onSlotChanged();
828 else if ( slot.ptr() && cmp( hash, hash_accessor()( *slot.ptr() )) == 0 ) {
830 m_Stat.onFindSuccess();
833 m_Stat.onFindFailed();
839 template <typename Predicate>
840 value_type * do_erase( hash_type const& hash, typename gc::Guard& guard, Predicate pred )
842 hash_splitter splitter( hash );
846 array_node * pArr = m_Head;
847 size_t nSlot = splitter.cut( m_Metrics.head_node_size_log );
848 assert( nSlot < m_Metrics.head_node_size );
851 node_ptr slot = pArr->nodes[nSlot].load( memory_model::memory_order_acquire );
852 if ( slot.bits() == flag_array_node ) {
853 // array node, go down the tree
854 assert( slot.ptr() != nullptr );
855 nSlot = splitter.cut( m_Metrics.array_node_size_log );
856 assert( nSlot < m_Metrics.array_node_size );
857 pArr = to_array( slot.ptr() );
859 else if ( slot.bits() == flag_array_converting ) {
860 // the slot is converting to array node right now
862 m_Stat.onSlotConverting();
866 assert(slot.bits() == 0 );
868 // protect data node by hazard pointer
869 if ( guard.protect( pArr->nodes[nSlot], [](node_ptr p) -> value_type * { return p.ptr(); }) != slot ) {
870 // slot value has been changed - retry
871 m_Stat.onSlotChanged();
873 else if ( slot.ptr() ) {
874 if ( cmp( hash, hash_accessor()( *slot.ptr() )) == 0 && pred( *slot.ptr() )) {
875 // item found - replace it with nullptr
876 if ( pArr->nodes[nSlot].compare_exchange_strong(slot, node_ptr(nullptr), memory_model::memory_order_acquire, atomics::memory_order_relaxed))
878 m_Stat.onEraseRetry();
881 m_Stat.onEraseFailed();
886 m_Stat.onEraseFailed();
895 }} // namespace cds::intrusive
897 #endif // #ifndef CDSLIB_INTRUSIVE_IMPL_MULTILEVEL_HASHSET_H