2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
34 #include <memory.h> // memcmp, memcpy
35 #include <type_traits>
37 #include <cds/intrusive/details/base.h>
38 #include <cds/opt/compare.h>
39 #include <cds/algo/atomic.h>
40 #include <cds/algo/split_bitstring.h>
41 #include <cds/details/marked_ptr.h>
42 #include <cds/urcu/options.h>
44 namespace cds { namespace intrusive {
46 /// FeldmanHashSet related definitions
47 /** @ingroup cds_intrusive_helper
49 namespace feldman_hashset {
50 /// Hash accessor option
52 @copydetails traits::hash_accessor
54 template <typename Accessor>
55 struct hash_accessor {
57 template <typename Base> struct pack: public Base
59 typedef Accessor hash_accessor;
66 @copydetails traits::hash_size
68 template <size_t Size>
71 template <typename Base> struct pack: public Base
80 /// \p FeldmanHashSet internal statistics
81 template <typename EventCounter = cds::atomicity::event_counter>
83 typedef EventCounter event_counter ; ///< Event counter type
85 event_counter m_nInsertSuccess; ///< Number of success \p insert() operations
86 event_counter m_nInsertFailed; ///< Number of failed \p insert() operations
87 event_counter m_nInsertRetry; ///< Number of attempts to insert new item
88 event_counter m_nUpdateNew; ///< Number of new item inserted for \p update()
89 event_counter m_nUpdateExisting; ///< Number of existing item updates
90 event_counter m_nUpdateFailed; ///< Number of failed \p update() call
91 event_counter m_nUpdateRetry; ///< Number of attempts to update the item
92 event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations
93 event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations
94 event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item
95 event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations
96 event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations
98 event_counter m_nExpandNodeSuccess; ///< Number of succeeded attempts converting data node to array node
99 event_counter m_nExpandNodeFailed; ///< Number of failed attempts converting data node to array node
100 event_counter m_nSlotChanged; ///< Number of array node slot changing by other thread during an operation
101 event_counter m_nSlotConverting; ///< Number of events when we encounter a slot while it is converting to array node
103 event_counter m_nArrayNodeCount; ///< Number of array nodes
104 event_counter m_nHeight; ///< Current height of the tree
107 void onInsertSuccess() { ++m_nInsertSuccess; }
108 void onInsertFailed() { ++m_nInsertFailed; }
109 void onInsertRetry() { ++m_nInsertRetry; }
110 void onUpdateNew() { ++m_nUpdateNew; }
111 void onUpdateExisting() { ++m_nUpdateExisting; }
112 void onUpdateFailed() { ++m_nUpdateFailed; }
113 void onUpdateRetry() { ++m_nUpdateRetry; }
114 void onEraseSuccess() { ++m_nEraseSuccess; }
115 void onEraseFailed() { ++m_nEraseFailed; }
116 void onEraseRetry() { ++m_nEraseRetry; }
117 void onFindSuccess() { ++m_nFindSuccess; }
118 void onFindFailed() { ++m_nFindFailed; }
120 void onExpandNodeSuccess() { ++m_nExpandNodeSuccess; }
121 void onExpandNodeFailed() { ++m_nExpandNodeFailed; }
122 void onSlotChanged() { ++m_nSlotChanged; }
123 void onSlotConverting() { ++m_nSlotConverting; }
124 void onArrayNodeCreated() { ++m_nArrayNodeCount; }
125 void height( size_t h ) { if (m_nHeight < h ) m_nHeight = h; }
129 /// \p FeldmanHashSet empty internal statistics
132 void onInsertSuccess() const {}
133 void onInsertFailed() const {}
134 void onInsertRetry() const {}
135 void onUpdateNew() const {}
136 void onUpdateExisting() const {}
137 void onUpdateFailed() const {}
138 void onUpdateRetry() const {}
139 void onEraseSuccess() const {}
140 void onEraseFailed() const {}
141 void onEraseRetry() const {}
142 void onFindSuccess() const {}
143 void onFindFailed() const {}
145 void onExpandNodeSuccess() const {}
146 void onExpandNodeFailed() const {}
147 void onSlotChanged() const {}
148 void onSlotConverting() const {}
149 void onArrayNodeCreated() const {}
150 void height(size_t) const {}
154 /// \p FeldmanHashSet traits
157 /// Mandatory functor to get hash value from data node
159 It is most-important feature of \p FeldmanHashSet.
160 That functor must return a reference to fixed-sized hash value of data node.
161 The return value of that functor specifies the type of hash value.
165 typedef uint8_t hash_type[32]; // 256-bit hash type
167 hash_type hash; // 256-bit hash value
172 struct foo_hash_accessor {
173 hash_type const& operator()( foo const& d ) const
180 typedef cds::opt::none hash_accessor;
182 /// The size of hash value in bytes
184 By default, the size of hash value is <tt>sizeof( hash_type )</tt>.
185 Sometimes it is not correct, for example, for that 6-byte struct \p static_assert will be thrown:
192 static_assert( sizeof( key_type ) == 6, "Key type size mismatch" );
194 For that case you can specify \p hash_size explicitly.
196 Value \p 0 means <tt>sizeof( hash_type )</tt>.
202 /// Disposer for removing data nodes
203 typedef cds::intrusive::opt::v::empty_disposer disposer;
205 /// Hash comparing functor
207 No default functor is provided.
208 If the option is not specified, the \p less option is used.
210 typedef cds::opt::none compare;
212 /// Specifies binary predicate used for hash compare.
214 If \p %less and \p %compare are not specified, \p memcmp() -like @ref bitwise_compare "bit-wise hash comparator" is used
215 because the hash value is treated as fixed-sized bit-string.
217 typedef cds::opt::none less;
221 The item counting is an important part of \p FeldmanHashSet algorithm:
222 the \p empty() member function depends on correct item counting.
223 Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option.
225 Default is \p atomicity::item_counter.
227 typedef cds::atomicity::item_counter item_counter;
229 /// Array node allocator
231 Allocator for array nodes. The allocator is used for creating \p headNode and \p arrayNode when the set grows.
232 Default is \ref CDS_DEFAULT_ALLOCATOR
234 typedef CDS_DEFAULT_ALLOCATOR node_allocator;
236 /// C++ memory ordering model
238 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
239 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
241 typedef cds::opt::v::relaxed_ordering memory_model;
243 /// Back-off strategy
244 typedef cds::backoff::Default back_off;
246 /// Internal statistics
248 By default, internal statistics is disabled (\p feldman_hashset::empty_stat).
249 Use \p feldman_hashset::stat to enable it.
251 typedef empty_stat stat;
253 /// RCU deadlock checking policy (only for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet")
255 List of available policy see \p opt::rcu_check_deadlock
257 typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock;
260 /// Metafunction converting option list to \p feldman_hashset::traits
262 Supported \p Options are:
263 - \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor.
264 @copydetails traits::hash_accessor
265 - \p feldman_hashset::hash_size - the size of hash value in bytes.
266 @copydetails traits::hash_size
267 - \p opt::node_allocator - array node allocator.
268 @copydetails traits::node_allocator
269 - \p opt::compare - hash comparison functor. No default functor is provided.
270 If the option is not specified, the \p opt::less is used.
271 - \p opt::less - specifies binary predicate used for hash comparison.
272 If the option is not specified, \p memcmp() -like bit-wise hash comparator is used
273 because the hash value is treated as fixed-sized bit-string.
274 - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used.
275 - \p opt::disposer - the functor used for disposing removed data node. Default is \p opt::v::empty_disposer. Due the nature
276 of GC schema the disposer may be called asynchronously.
277 - \p opt::item_counter - the type of item counting feature.
278 The item counting is an important part of \p FeldmanHashSet algorithm:
279 the \p empty() member function depends on correct item counting.
280 Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option.
281 Default is \p atomicity::item_counter.
282 - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
283 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
284 - \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashset::empty_stat).
285 To enable it use \p feldman_hashset::stat
286 - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet"
287 Default is \p opt::v::rcu_throw_deadlock
289 template <typename... Options>
292 # ifdef CDS_DOXYGEN_INVOKED
293 typedef implementation_defined type ; ///< Metafunction result
295 typedef typename cds::opt::make_options<
296 typename cds::opt::find_type_traits< traits, Options... >::type
302 /// Bit-wise memcmp-based comparator for hash value \p T
303 template <typename T>
304 struct bitwise_compare
306 /// Compares \p lhs and \p rhs
309 - <tt> < 0</tt> if <tt>lhs < rhs</tt>
310 - <tt>0</tt> if <tt>lhs == rhs</tt>
311 - <tt> > 0</tt> if <tt>lhs > rhs</tt>
313 int operator()( T const& lhs, T const& rhs ) const
315 return memcmp( &lhs, &rhs, sizeof(T));
319 /// One-level statistics, see \p FeldmanHashSet::get_level_statistics
320 struct level_statistics
322 size_t array_node_count; ///< Count of array node at the level
323 size_t node_capacity; ///< Array capacity
325 size_t data_cell_count; ///< The number of data cells in all array node at this level
326 size_t array_cell_count; ///< The number of array cells in all array node at this level
327 size_t empty_cell_count; ///< The number of empty cells in all array node at this level
331 : array_node_count(0)
333 , array_cell_count(0)
334 , empty_cell_count(0)
341 template <typename HashType, size_t HashSize >
342 using hash_splitter = cds::algo::split_bitstring< HashType, HashSize >;
345 size_t head_node_size; // power-of-two
346 size_t head_node_size_log; // log2( head_node_size )
347 size_t array_node_size; // power-of-two
348 size_t array_node_size_log;// log2( array_node_size )
350 static metrics make(size_t head_bits, size_t array_bits, size_t hash_size )
352 size_t const hash_bits = hash_size * 8;
358 if (head_bits > hash_bits)
359 head_bits = hash_bits;
360 if ((hash_bits - head_bits) % array_bits != 0)
361 head_bits += (hash_bits - head_bits) % array_bits;
363 assert((hash_bits - head_bits) % array_bits == 0);
366 m.head_node_size_log = head_bits;
367 m.head_node_size = size_t(1) << head_bits;
368 m.array_node_size_log = array_bits;
369 m.array_node_size = size_t(1) << array_bits;
374 } // namespace details
378 template <typename T, typename Traits>
379 class multilevel_array
382 typedef T value_type;
383 typedef Traits traits;
384 typedef typename Traits::node_allocator node_allocator;
385 typedef typename traits::memory_model memory_model;
386 typedef typename traits::back_off back_off; ///< Backoff strategy
387 typedef typename traits::stat stat; ///< Internal statistics type
389 typedef typename traits::hash_accessor hash_accessor;
390 static_assert(!std::is_same< hash_accessor, cds::opt::none >::value, "hash_accessor functor must be specified");
392 /// Hash type deduced from \p hash_accessor return type
393 typedef typename std::decay<
394 typename std::remove_reference<
395 decltype(hash_accessor()(std::declval<value_type>()))
398 static_assert(!std::is_pointer<hash_type>::value, "hash_accessor should return a reference to hash value");
400 typedef typename cds::opt::details::make_comparator_from<
403 feldman_hashset::bitwise_compare< hash_type >
404 >::type hash_comparator;
406 /// The size of hash_type in bytes, see \p traits::hash_size for explanation
407 static CDS_CONSTEXPR size_t const c_hash_size = traits::hash_size == 0 ? sizeof( hash_type ) : traits::hash_size;
409 typedef feldman_hashset::details::hash_splitter< hash_type, c_hash_size > hash_splitter;
412 flag_array_converting = 1, ///< the cell is converting from data node to an array node
413 flag_array_node = 2 ///< the cell is a pointer to an array node
418 typedef cds::details::marked_ptr< value_type, 3 > node_ptr;
419 typedef atomics::atomic< node_ptr > atomic_node_ptr;
422 array_node * const pParent; ///< parent array node
423 size_t const idxParent; ///< index in parent array node
424 atomic_node_ptr nodes[1]; ///< node array
426 array_node(array_node * parent, size_t idx)
431 array_node() = delete;
432 array_node(array_node const&) = delete;
433 array_node(array_node&&) = delete;
436 typedef cds::details::Allocator< array_node, node_allocator > cxx_array_node_allocator;
438 struct traverse_data {
439 hash_splitter splitter;
444 traverse_data( hash_type const& hash, multilevel_array& arr )
450 void reset( multilevel_array& arr )
454 nSlot = splitter.cut( arr.metrics().head_node_size_log );
460 feldman_hashset::details::metrics const m_Metrics;
465 multilevel_array(size_t head_bits, size_t array_bits )
466 : m_Metrics(feldman_hashset::details::metrics::make( head_bits, array_bits, c_hash_size ))
467 , m_Head( alloc_head_node())
473 free_array_node(m_Head);
476 node_ptr traverse(traverse_data& pos)
480 node_ptr slot = pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire);
481 if (slot.bits() == flag_array_node) {
482 // array node, go down the tree
483 assert(slot.ptr() != nullptr);
484 assert( !pos.splitter.eos());
485 pos.nSlot = pos.splitter.cut( metrics().array_node_size_log );
486 assert( pos.nSlot < metrics().array_node_size );
487 pos.pArr = to_array(slot.ptr());
490 else if (slot.bits() == flag_array_converting) {
491 // the slot is converting to array node right now
493 stats().onSlotConverting();
497 assert(slot.bits() == 0);
503 size_t head_size() const
505 return m_Metrics.head_node_size;
508 size_t array_node_size() const
510 return m_Metrics.array_node_size;
513 void get_level_statistics(std::vector< feldman_hashset::level_statistics>& stat) const
516 gather_level_statistics(stat, 0, m_Head, head_size());
520 array_node * head() const
530 feldman_hashset::details::metrics const& metrics() const
537 // The function is not thread-safe. For use in dtor only
538 // Destroy all array nodes
539 destroy_array_nodes(m_Head, head_size());
542 void destroy_array_nodes(array_node * pArr, size_t nSize)
544 for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) {
545 node_ptr slot = p->load(memory_model::memory_order_relaxed);
546 if (slot.bits() == flag_array_node) {
547 destroy_array_nodes(to_array(slot.ptr()), array_node_size());
548 free_array_node(to_array(slot.ptr()));
549 p->store(node_ptr(), memory_model::memory_order_relaxed);
554 static array_node * alloc_array_node(size_t nSize, array_node * pParent, size_t idxParent)
556 array_node * pNode = cxx_array_node_allocator().NewBlock(sizeof(array_node) + sizeof(atomic_node_ptr) * (nSize - 1), pParent, idxParent);
557 new (pNode->nodes) atomic_node_ptr[nSize];
561 array_node * alloc_head_node() const
563 return alloc_array_node(head_size(), nullptr, 0);
566 array_node * alloc_array_node(array_node * pParent, size_t idxParent) const
568 return alloc_array_node(array_node_size(), pParent, idxParent);
571 static void free_array_node(array_node * parr)
573 cxx_array_node_allocator().Delete(parr);
580 converter(value_type * p)
584 converter(array_node * p)
589 static array_node * to_array(value_type * p)
591 return converter(p).pArr;
593 static value_type * to_node(array_node * p)
595 return converter(p).pData;
598 void gather_level_statistics(std::vector<feldman_hashset::level_statistics>& stat, size_t nLevel, array_node * pArr, size_t nSize) const
600 if (stat.size() <= nLevel) {
601 stat.resize(nLevel + 1);
602 stat[nLevel].node_capacity = nSize;
605 ++stat[nLevel].array_node_count;
606 for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) {
607 node_ptr slot = p->load(memory_model::memory_order_relaxed);
609 ++stat[nLevel].array_cell_count;
610 if (slot.bits() == flag_array_node)
611 gather_level_statistics(stat, nLevel + 1, to_array(slot.ptr()), array_node_size());
614 ++stat[nLevel].data_cell_count;
616 ++stat[nLevel].empty_cell_count;
620 bool expand_slot( traverse_data& pos, node_ptr current)
622 return expand_slot( pos.pArr, pos.nSlot, current, pos.splitter.bit_offset());
626 bool expand_slot(array_node * pParent, size_t idxParent, node_ptr current, size_t nOffset)
628 assert(current.bits() == 0);
629 assert(current.ptr());
631 array_node * pArr = alloc_array_node(pParent, idxParent);
633 node_ptr cur(current.ptr());
634 atomic_node_ptr& slot = pParent->nodes[idxParent];
635 if (!slot.compare_exchange_strong(cur, cur | flag_array_converting, memory_model::memory_order_release, atomics::memory_order_relaxed))
637 stats().onExpandNodeFailed();
638 free_array_node(pArr);
642 size_t idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut( m_Metrics.array_node_size_log );
643 pArr->nodes[idx].store(current, memory_model::memory_order_release);
645 cur = cur | flag_array_converting;
647 slot.compare_exchange_strong(cur, node_ptr(to_node(pArr), flag_array_node), memory_model::memory_order_release, atomics::memory_order_relaxed)
650 stats().onExpandNodeSuccess();
651 stats().onArrayNodeCreated();
656 } // namespace feldman_hashset
659 // Forward declaration
660 template < class GC, typename T, class Traits = feldman_hashset::traits >
661 class FeldmanHashSet;
664 }} // namespace cds::intrusive
666 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H