/*
This file is a part of libcds - Concurrent Data Structures library
- (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
//@endcond
};
+ /// Hash size option
+ /**
+ @copydetails traits::hash_size
+ */
+ template <size_t Size>
+ struct hash_size {
+ //@cond
+ template <typename Base> struct pack: public Base
+ {
+ enum: size_t {
+ hash_size = Size
+ };
+ };
+ //@endcond
+ };
+
+ /// Hash splitter option
+ /**
+ @copydetails traits::hash_splitter
+ */
+ template <typename Splitter>
+ struct hash_splitter {
+ //@cond
+ template <typename Base> struct pack: public Base
+ {
+ typedef Splitter hash_splitter;
+ };
+ //@endcond
+ };
+
+
/// \p FeldmanHashSet internal statistics
template <typename EventCounter = cds::atomicity::event_counter>
struct stat {
*/
typedef cds::opt::none hash_accessor;
+ /// The size of hash value in bytes
+ /**
+ By default, the size of hash value is <tt>sizeof( hash_type )</tt>.
+ Sometimes it is not correct, for example, for that 6-byte struct \p static_assert will be thrown:
+ \code
+ struct key_type {
+ uint32_t key1;
+ uint16_t subkey;
+ };
+
+ static_assert( sizeof( key_type ) == 6, "Key type size mismatch" );
+ \endcode
+ For that case you can specify \p hash_size explicitly.
+
+ Value \p 0 means <tt>sizeof( hash_type )</tt>.
+ */
+ static CDS_CONSTEXPR size_t const hash_size = 0;
+
+ /// Hash splitter
+ /**
+ This trait specifies hash bit-string splitter algorithm.
+ By default, \p cds::algo::number_splitter is used if \p HashType is a number,
+ \p cds::algo::split_bitstring otherwise.
+ */
+ typedef cds::opt::none hash_splitter;
+
/// Disposer for removing data nodes
typedef cds::intrusive::opt::v::empty_disposer disposer;
/// Array node allocator
/**
- Allocator for array nodes. That allocator is used for creating \p headNode and \p arrayNode when the set grows.
+ Allocator for array nodes. The allocator is used for creating \p headNode and \p arrayNode when the set grows.
Default is \ref CDS_DEFAULT_ALLOCATOR
*/
typedef CDS_DEFAULT_ALLOCATOR node_allocator;
Supported \p Options are:
- \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor.
@copydetails traits::hash_accessor
+ - \p feldman_hashset::hash_size - the size of hash value in bytes.
+ @copydetails traits::hash_size
+ - \p feldman_hashset::hash_splitter - a hash splitter algorithm
+ @copydetails traits::hash_splitter
- \p opt::node_allocator - array node allocator.
@copydetails traits::node_allocator
- \p opt::compare - hash comparison functor. No default functor is provided.
//@cond
namespace details {
- template <typename HashType >
- using hash_splitter = cds::algo::split_bitstring< HashType >;
+ template <typename HashType, size_t HashSize >
+ using hash_splitter = cds::algo::split_bitstring< HashType, HashSize >;
struct metrics {
size_t head_node_size; // power-of-two
feldman_hashset::bitwise_compare< hash_type >
>::type hash_comparator;
- typedef feldman_hashset::details::hash_splitter< hash_type > hash_splitter;
+ /// The size of hash_type in bytes, see \p traits::hash_size for explanation
+ static CDS_CONSTEXPR size_t const c_hash_size = traits::hash_size == 0 ? sizeof( hash_type ) : static_cast<size_t>( traits::hash_size );
+
+ typedef typename std::conditional<
+ std::is_same< typename traits::hash_splitter, cds::opt::none >::value,
+ typename cds::algo::select_splitter< hash_type, c_hash_size >::type,
+ typename traits::hash_splitter
+ >::type hash_splitter;
enum node_flags {
flag_array_converting = 1, ///< the cell is converting from data node to an array node
struct traverse_data {
hash_splitter splitter;
array_node * pArr;
- size_t nSlot;
+ typename hash_splitter::uint_type nSlot;
size_t nHeight;
traverse_data( hash_type const& hash, multilevel_array& arr )
{
splitter.reset();
pArr = arr.head();
- nSlot = splitter.cut( arr.metrics().head_node_size_log );
+ nSlot = splitter.cut( static_cast<unsigned>( arr.metrics().head_node_size_log ));
+ assert( static_cast<size_t>( nSlot ) < arr.metrics().head_node_size );
nHeight = 1;
}
};
public:
multilevel_array(size_t head_bits, size_t array_bits )
- : m_Metrics(feldman_hashset::details::metrics::make(head_bits, array_bits, sizeof(hash_type)))
+ : m_Metrics(feldman_hashset::details::metrics::make( head_bits, array_bits, c_hash_size ))
, m_Head( alloc_head_node())
- {}
+ {
+ assert( hash_splitter::is_correct( static_cast<unsigned>( metrics().head_node_size_log )));
+ assert( hash_splitter::is_correct( static_cast<unsigned>( metrics().array_node_size_log )));
+ }
~multilevel_array()
{
destroy_tree();
- free_array_node(m_Head);
+ free_array_node( m_Head, head_size());
}
node_ptr traverse(traverse_data& pos)
back_off bkoff;
while (true) {
node_ptr slot = pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire);
- if (slot.bits() == flag_array_node) {
+ if ( slot.bits() == flag_array_node ) {
// array node, go down the tree
assert(slot.ptr() != nullptr);
assert( !pos.splitter.eos());
- pos.nSlot = pos.splitter.cut( metrics().array_node_size_log );
+ pos.nSlot = pos.splitter.cut( static_cast<unsigned>( metrics().array_node_size_log ));
assert( pos.nSlot < metrics().array_node_size );
pos.pArr = to_array(slot.ptr());
++pos.nHeight;
for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) {
node_ptr slot = p->load(memory_model::memory_order_relaxed);
if (slot.bits() == flag_array_node) {
- destroy_array_nodes(to_array(slot.ptr()), array_node_size());
- free_array_node(to_array(slot.ptr()));
+ destroy_array_nodes( to_array(slot.ptr()), array_node_size());
+ free_array_node( to_array( slot.ptr()), array_node_size());
p->store(node_ptr(), memory_model::memory_order_relaxed);
}
}
return alloc_array_node(array_node_size(), pParent, idxParent);
}
- static void free_array_node(array_node * parr)
+ static void free_array_node( array_node * parr, size_t /*nSize*/ )
{
- cxx_array_node_allocator().Delete(parr);
+ cxx_array_node_allocator().Delete( parr, 1 );
}
union converter {
bool expand_slot( traverse_data& pos, node_ptr current)
{
+ assert( !pos.splitter.eos() );
return expand_slot( pos.pArr, pos.nSlot, current, pos.splitter.bit_offset());
}
if (!slot.compare_exchange_strong(cur, cur | flag_array_converting, memory_model::memory_order_release, atomics::memory_order_relaxed))
{
stats().onExpandNodeFailed();
- free_array_node(pArr);
+ free_array_node( pArr, array_node_size());
return false;
}
- size_t idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut( m_Metrics.array_node_size_log );
+ typename hash_splitter::uint_type idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut(
+ static_cast<unsigned>( m_Metrics.array_node_size_log ));
pArr->nodes[idx].store(current, memory_model::memory_order_release);
cur = cur | flag_array_converting;