The splitter stores a const reference to bit-string, not a copy.
The maximum count of bits that can be cut in a single call is <tt> sizeof(UInt) * 8 </tt>
+ The splitter keeps byte order.
+
Template parameters:
- \p BitString - a fixed-sized type that interprets as bit string
- \p BitStringSize - the size of \p BitString in bytes, default is <tt>sizeof( BitString )</tt>.
You can specify 0 for default.
- - \p UInt - an unsigned integer, return type for \p cut()
+ - \p UInt - an unsigned integer, return type for \p cut(), default is \p unsigned
+
+ There are specialized splitters:
+ - a simplified \p byte_splitter algorithm that is suitable when count is multiple of 8.
+ - \p number_splitter algorithm is suitable for a number
*/
- template <typename BitString, size_t BitStringSize = sizeof( BitString ), typename UInt = typename std::conditional< BitStringSize % sizeof(size_t) == 0, size_t, unsigned >::type >
+ template <typename BitString, size_t BitStringSize = sizeof( BitString ), typename UInt = unsigned >
class split_bitstring
{
public:
typedef BitString bitstring; ///< Bit-string type
- typedef UInt uint_type; ///< Bit-string portion type
+ typedef UInt uint_type; ///< Result type of \p cut() function
static CDS_CONSTEXPR size_t const c_bitstring_size = BitStringSize ? BitStringSize : sizeof( BitString ); ///< size of \p BitString in bytes
- /// Minimum count of bits to be cut
- static CDS_CONSTEXPR size_t const c_nMinBitCut = 1;
-
//@cond
- static CDS_CONSTEXPR size_t const c_nHashSize = (c_bitstring_size + sizeof(uint_type) - 1) / sizeof(uint_type);
- static CDS_CONSTEXPR size_t const c_nBitPerByte = 8;
- static CDS_CONSTEXPR size_t const c_nBitPerHash = c_bitstring_size * c_nBitPerByte;
- static CDS_CONSTEXPR size_t const c_nBitPerInt = sizeof( uint_type ) * c_nBitPerByte;
+ static CDS_CONSTEXPR unsigned const c_nBitPerByte = 8;
//@endcond
public:
/// Initializises the splitter with reference to \p h and zero start bit offset
explicit split_bitstring( bitstring const& h )
- : m_ptr(reinterpret_cast<uint_type const*>( &h ))
- , m_offset(0)
- , m_first( m_ptr )
-# ifdef _DEBUG
- , m_last( m_ptr + c_nHashSize )
-# endif
+ : cur_( reinterpret_cast<uint8_t const*>( &h ) )
+ , offset_( 0 )
+ , first_( cur_ )
+ , last_( cur_ + c_bitstring_size )
{}
/// Initializises the splitter with reference to \p h and start bit offset \p nBitOffset
split_bitstring( bitstring const& h, size_t nBitOffset )
- : m_ptr( reinterpret_cast<uint_type const*>( &h ) + nBitOffset / c_nBitPerInt )
- , m_offset( nBitOffset )
- , m_first( reinterpret_cast<uint_type const*>(&h))
-# ifdef _DEBUG
- , m_last( m_first + c_nHashSize )
-# endif
+ : cur_( reinterpret_cast<uint8_t const*>( &h ) + nBitOffset / c_nBitPerByte )
+ , offset_( nBitOffset % c_nBitPerByte )
+ , first_( reinterpret_cast<uint8_t const*>( &h ) )
+ , last_( first_ + c_bitstring_size )
{}
-
/// Returns \p true if end-of-string is not reached yet
explicit operator bool() const
{
/// Returns \p true if end-of-stream encountered
bool eos() const
{
- return m_offset >= c_nBitPerHash;
+ return cur_ >= last_;
}
- /// Cuts next \p nBits from bit-string
+ /// Cuts next \p count bits from bit-string
/**
- Precondition: <tt>nBits <= sizeof(uint_type) * 8</tt>
-
- This function does not manage out-of-bound condition.
+ For performance reason, the function does not manage out-of-bound condition.
To control that use \p safe_cut().
*/
- uint_type cut( size_t nBits )
+ uint_type cut( unsigned count )
{
- assert( !eos());
- assert( nBits <= c_nBitPerInt );
- assert( m_offset + nBits <= c_nBitPerHash );
-# ifdef _DEBUG
- assert( m_ptr < m_last );
-# endif
- uint_type result;
-
- size_t const nRest = c_nBitPerInt - m_offset % c_nBitPerInt;
- m_offset += nBits;
- if ( nBits < nRest ) {
- result = static_cast<uint_type>( *m_ptr << ( nRest - nBits ));
- result = static_cast<uint_type>( result >> ( c_nBitPerInt - nBits ));
+ assert( !eos() );
+
+ uint_type result = 0;
+# if defined( CDS_ARCH_LITTLE_ENDIAN )
+ for ( unsigned done = 0; done < count; ) {
+ assert( cur_ < last_ );
+ unsigned bits = count - done;
+ if ( bits > c_nBitPerByte - offset_ )
+ bits = c_nBitPerByte - offset_;
+
+ result |= static_cast<uint_type>(( *cur_ >> offset_ ) & (( 1 << bits ) - 1 )) << done;
+
+ offset_ += bits;
+ assert( offset_ <= c_nBitPerByte );
+ if ( offset_ == c_nBitPerByte ) {
+ offset_ = 0;
+ ++cur_;
+ }
+ done += bits;
}
- else if ( nBits == nRest ) {
- result = static_cast<uint_type>( *m_ptr >> ( c_nBitPerInt - nRest ));
- ++m_ptr;
- assert( m_offset % c_nBitPerInt == 0 );
- }
- else {
- uint_type const lsb = static_cast<uint_type>( *m_ptr >> ( c_nBitPerInt - nRest ));
- nBits -= nRest;
- ++m_ptr;
-
- result = static_cast<uint_type>( *m_ptr << ( c_nBitPerInt - nBits ));
- result = static_cast<uint_type>( result >> ( c_nBitPerInt - nBits ));
- result = static_cast<uint_type>( (result << nRest) + lsb );
+# else
+ while ( count ) {
+ assert( cur_ < last_ );
+
+ unsigned bits = count <= ( c_nBitPerByte - offset_ ) ? count : c_nBitPerByte - offset_;
+
+ result = ( result << bits ) | (( *cur_ >> offset_ ) & ( ( 1 << bits ) - 1 ));
+
+ offset_ += bits;
+ assert( offset_ <= c_nBitPerByte );
+ if ( offset_ == c_nBitPerByte ) {
+ offset_ = 0;
+ ++cur_;
+ }
+ count -= bits;
}
+# endif
- assert( m_offset <= c_nBitPerHash );
-# ifdef _DEBUG
- assert( m_ptr <= m_last );
-# endif
return result;
}
- /// Cuts up to \p nBits from the bit-string
+ /// Cuts up to \p count from the bit-string
/**
- Analog of \p cut() but if \p nBits is more than the rest of bit-string,
+ Safe analog of \p cut() but if \p count is more than the rest of bit-string,
only the rest is returned.
- If \p eos() is \p true the function returns 0.
+ When \p eos() condition is met the function returns 0.
*/
- uint_type safe_cut( size_t nBits )
+ uint_type safe_cut( unsigned count )
{
if ( eos())
return 0;
- assert( nBits <= sizeof(uint_type) * c_nBitPerByte );
-
- if ( m_offset + nBits > c_nBitPerHash )
- nBits = c_nBitPerHash - m_offset;
- return nBits ? cut( nBits ) : 0;
+ unsigned const rest = static_cast<unsigned>( last_ - cur_ - 1 ) * c_nBitPerByte + ( c_nBitPerByte - offset_ );
+ if ( rest < count )
+ count = rest;
+ return count ? cut( count ) : 0;
}
/// Resets the splitter
void reset() CDS_NOEXCEPT
{
- m_ptr = m_first;
- m_offset = 0;
+ cur_ = first_;
+ offset_ = 0;
}
/// Returns pointer to source bitstring
bitstring const * source() const
{
- return reinterpret_cast<bitstring const *>( m_first );
+ return reinterpret_cast<bitstring const *>( first_ );
+ }
+
+ /// Returns current bit offset from beginning of bit-string
+ size_t bit_offset() const
+ {
+ return offset_ + (cur_ - first_) * c_nBitPerByte;
+ }
+
+ /// Returns how many bits remain
+ size_t rest_count() const
+ {
+ return c_bitstring_size * c_nBitPerByte - bit_offset();
+ }
+
+ /// Returns \p true for any argument
+ static CDS_CONSTEXPR bool is_correct( unsigned /*count*/ )
+ {
+ return true;
+ }
+
+ private:
+ //@cond
+ uint8_t const* cur_;
+ unsigned offset_;
+ uint8_t const* const first_;
+ uint8_t const* const last_;
+ //@endcond
+ };
+
+ /// Simplified \p split_bitstring algorithm when \p count is multiple of 8
+ template <typename BitString, size_t BitStringSize = sizeof( BitString ), typename UInt = unsigned >
+ class byte_splitter
+ {
+ public:
+ typedef BitString bitstring; ///< Bit-string type
+ typedef UInt uint_type; ///< Result type of \p cut() function
+ static CDS_CONSTEXPR size_t const c_bitstring_size = BitStringSize ? BitStringSize : sizeof( BitString ); ///< size of \p BitString in bytes
+
+ //@cond
+ static CDS_CONSTEXPR unsigned const c_nBitPerByte = 8;
+ //@endcond
+
+ public:
+ /// Initializises the splitter with reference to \p h and zero start bit offset
+ explicit byte_splitter( bitstring const& h )
+ : cur_( reinterpret_cast<uint8_t const*>( &h ) )
+ , first_( cur_ )
+ , last_( cur_ + c_bitstring_size )
+ {}
+
+ /// Initializises the splitter with reference to \p h and start bit offset \p nBitOffset
+ byte_splitter( bitstring const& h, size_t nBitOffset )
+ : cur_( reinterpret_cast<uint8_t const*>( &h ) + nBitOffset / c_nBitPerByte )
+ , first_( reinterpret_cast<uint8_t const*>( &h ) )
+ , last_( first_ + c_bitstring_size )
+ {
+ assert( is_correct( static_cast<unsigned>( nBitOffset )));
+ assert( !eos());
+ }
+
+ /// Returns \p true if end-of-string is not reached yet
+ explicit operator bool() const
+ {
+ return !eos();
+ }
+
+ /// Returns \p true if end-of-stream encountered
+ bool eos() const
+ {
+ return cur_ >= last_;
+ }
+
+ /// Cuts next \p count bits (must be multiplier of 8) from bit-string
+ /**
+ For performance reason, the function does not manage out-of-bound condition.
+ To control that use \p safe_cut().
+ */
+ uint_type cut( unsigned count )
+ {
+ assert( !eos() );
+ assert( is_correct( count ) );
+
+ uint_type result = 0;
+
+# if defined( CDS_ARCH_LITTLE_ENDIAN )
+ for ( unsigned i = 0; i < count; i += c_nBitPerByte ) {
+ result |= static_cast<uint_type>( *cur_ ) << i;
+ ++cur_;
+ }
+# else
+ for ( ; count; count -= c_nBitPerByte ) {
+ result = ( result << c_nBitPerByte ) | *cur_;
+ ++cur_;
+ }
+# endif
+
+ return result;
+ }
+
+ /// Cuts up to \p count from the bit-string
+ /**
+ Safe analog of \p cut(): if \p count is more than the rest of bit-string,
+ only the rest is returned.
+ When \p eos() condition is met the function returns 0.
+ */
+ uint_type safe_cut( unsigned count )
+ {
+ if ( eos())
+ return 0;
+
+ unsigned const rest = static_cast<unsigned>( last_ - cur_ - 1 ) * c_nBitPerByte;
+ if ( rest < count )
+ count = rest;
+ return count ? cut( count ) : 0;
+ }
+
+ /// Resets the splitter
+ void reset() CDS_NOEXCEPT
+ {
+ cur_ = first_;
+ }
+
+ /// Returns pointer to source bitstring
+ bitstring const* source() const
+ {
+ return reinterpret_cast<bitstring const *>( first_ );
}
/// Returns current bit offset from beginning of bit-string
size_t bit_offset() const
{
- return m_offset;
+ return (cur_ - first_) * c_nBitPerByte;
+ }
+
+ /// Returns how many bits remain
+ size_t rest_count() const
+ {
+ return c_bitstring_size * c_nBitPerByte - bit_offset();
+ }
+
+ /// Checks if \p count is multiple of 8
+ static CDS_CONSTEXPR bool is_correct( unsigned count )
+ {
+ return count % 8 == 0;
}
private:
//@cond
- uint_type const* m_ptr; ///< current position in the hash
- size_t m_offset; ///< current bit offset in bit-string
- uint_type const* m_first; ///< first position
-# ifdef _DEBUG
- uint_type const* m_last; ///< last position
-# endif
+ uint8_t const* cur_;
+ uint8_t const* const first_;
+ uint8_t const* const last_;
//@endcond
};
+
+ /// Cuts a bit sequence from a number
+ /**
+ The splitter can be used as an iterator over bit representation of the number of type \p Int.
+ Each call of \p cut() or \p safe_cut() cuts the bit count specified
+ and keeps the position inside the number for the next call.
+ */
+ template <typename Int>
+ class number_splitter
+ {
+ public:
+ typedef Int int_type; ///< Number type
+ typedef Int uint_type; ///< Result type of \p cut() function
+
+ //@cond
+ static CDS_CONSTEXPR unsigned const c_nBitPerByte = 8;
+ //@endcond
+
+ public:
+ /// Initalizes the splitter with nymber \p n and initial bit offset 0
+ explicit number_splitter( int_type n )
+ : number_( n )
+ , shift_( 0 )
+ {}
+
+ /// Initalizes the splitter with nymber \p n and initial bit offset \p initial_offset
+ number_splitter( int_type n, size_t initial_offset )
+ : number_( n )
+ , shift_( static_cast<unsigned>( initial_offset ))
+ {
+ assert( initial_offset < sizeof( int_type ) * c_nBitPerByte );
+ }
+
+ /// Returns \p true if end-of-string is not reached yet
+ explicit operator bool() const
+ {
+ return !eos();
+ }
+
+ /// Returns \p true if end-of-stream encountered
+ bool eos() const
+ {
+ return shift_ >= sizeof( int_type ) * c_nBitPerByte;
+ }
+
+ /// Cuts next \p count bits (must be multiplier of 8) from the number
+ /**
+ For performance reason, the function does not manage out-of-bound condition.
+ To control that use \p safe_cut().
+ */
+ int_type cut( unsigned count )
+ {
+ assert( !eos() );
+ assert( is_correct( count ));
+
+ int_type result = ( number_ >> shift_ ) & (( 1 << count ) - 1 );
+ shift_ += count;
+
+ return result;
+ }
+
+ /// Cuts up to \p count from the bit-string
+ /**
+ Safe analog of \p cut(): if \p count is more than the rest of \p int_type,
+ only the rest is returned.
+ When \p eos() condition is met the function returns 0.
+ */
+ int_type safe_cut( unsigned count )
+ {
+ if ( eos() )
+ return 0;
+
+ unsigned rest = static_cast<unsigned>( rest_count() );
+ if ( rest < count )
+ count = rest;
+ return count ? cut( count ) : 0;
+ }
+
+ /// Resets the splitter
+ void reset() CDS_NOEXCEPT
+ {
+ shift_ = 0;
+ }
+
+ /// Returns initial number
+ int_type source() const
+ {
+ return number_;
+ }
+
+ /// Returns current bit offset from beginning of the number
+ size_t bit_offset() const
+ {
+ return shift_;
+ }
+
+ /// Returns how many bits remain
+ size_t rest_count() const
+ {
+ return sizeof( int_type ) * c_nBitPerByte - shift_;
+ }
+
+ /// Checks if \p count is multiple of 8
+ static CDS_CONSTEXPR bool is_correct( unsigned count )
+ {
+ return count < sizeof( int_type ) * c_nBitPerByte;
+ }
+
+ private:
+ //@cond
+ int_type const number_;
+ unsigned shift_;
+ //@endcond
+ };
+
+ /// Metafunctin to select a most suitable splitter for type \p BitString of size \p BitStringSize
+ template <typename BitString, size_t BitStringSize >
+ struct select_splitter
+ {
+ typedef split_bitstring< BitString, BitStringSize > type; ///< metafunction result
+ };
+
+ //@cond
+# define CDS_SELECT_NUMBER_SPLITTER( num_type ) \
+ template <> struct select_splitter<num_type, sizeof(num_type)> { typedef number_splitter<num_type> type; }
+
+ CDS_SELECT_NUMBER_SPLITTER( int );
+ CDS_SELECT_NUMBER_SPLITTER( unsigned );
+ CDS_SELECT_NUMBER_SPLITTER( short );
+ CDS_SELECT_NUMBER_SPLITTER( unsigned short );
+ CDS_SELECT_NUMBER_SPLITTER( long );
+ CDS_SELECT_NUMBER_SPLITTER( unsigned long );
+ CDS_SELECT_NUMBER_SPLITTER( long long );
+ CDS_SELECT_NUMBER_SPLITTER( unsigned long long );
+
+# undef CDS_SELECT_NUMBER_SPLITTER
+ //@endcond
+
}} // namespace cds::algo
#endif // #ifndef CDSLIB_ALGO_SPLIT_BITSTRING_H
# error Unknown value of CDS_COMPILER macro
#endif
-#ifndef CDS_STDCALL
-# define CDS_STDCALL
-#endif
-
#ifndef CDS_EXPORT_API
# define CDS_EXPORT_API
#endif
# endif
#endif
-#if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86
-# define CDS_STDCALL __attribute__((stdcall))
-#else
-# define CDS_STDCALL
+// Byte order
+#if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN)
+# ifdef __BYTE_ORDER__
+# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+# define CDS_ARCH_LITTLE_ENDIAN
+# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define CDS_ARCH_BIG_ENDIAN
+# endif
+# else
+# warning "Undefined byte order for current architecture (no __BYTE_ORDER__ preprocessor definition)"
+# endif
#endif
#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
# define __attribute__( _x )
-# define CDS_STDCALL __stdcall
-#else
-# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86
-# define CDS_STDCALL __attribute__((stdcall))
-# else
-# define CDS_STDCALL
-# endif
#endif
#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
# endif
#endif
+// Byte order
+#if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN)
+# if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
+# define CDS_ARCH_LITTLE_ENDIAN
+# else
+# ifdef __BYTE_ORDER__
+# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+# define CDS_ARCH_LITTLE_ENDIAN
+# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define CDS_ARCH_BIG_ENDIAN
+# endif
+# else
+# warning "Undefined byte order for current architecture (no __BYTE_ORDER__ preprocessor definition)"
+# endif
+# endif
+#endif
#include <cds/compiler/icl/compiler_barriers.h>
#define __attribute__( _x )
-#define CDS_STDCALL __stdcall
-
#ifdef CDS_BUILD_LIB
# define CDS_EXPORT_API __declspec(dllexport)
#else
// double-width CAS support
//#define CDS_DCAS_SUPPORT
+// Byte order
+// It seems, MSVC works only on little-endian architecture?..
+#if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN)
+# define CDS_ARCH_LITTLE_ENDIAN
+#endif
+
#include <cds/compiler/vc/compiler_barriers.h>
//@endcond
template <size_t Size>
using hash_size = cds::intrusive::feldman_hashset::hash_size< Size >;
+ /// Hash splitter option
+ /**
+ @copydetails cds::container::feldman_hashmap::traits::hash_splitter
+ */
+ template <typename Splitter>
+ using hash_splitter = cds::intrusive::feldman_hashset::hash_splitter< Splitter >;
+
/// \p FeldmanHashMap traits
struct traits
/// Hash splitter
/**
This trait specifies hash bit-string splitter algorithm.
- By default, \p cds::algo::split_bitstring< HashType, HashSize > is used
+ By default, \p cds::algo::number_splitter is used if \p HashType is a number,
+ \p cds::algo::split_bitstring otherwise.
*/
typedef cds::opt::none hash_splitter;
typedef typename std::conditional<
std::is_same< typename traits::hash_splitter, cds::opt::none >::value,
- feldman_hashset::details::hash_splitter< hash_type, c_hash_size >,
+ typename cds::algo::select_splitter< hash_type, c_hash_size >::type,
typename traits::hash_splitter
>::type hash_splitter;
struct traverse_data {
hash_splitter splitter;
array_node * pArr;
- size_t nSlot;
+ typename hash_splitter::uint_type nSlot;
size_t nHeight;
traverse_data( hash_type const& hash, multilevel_array& arr )
{
splitter.reset();
pArr = arr.head();
- nSlot = splitter.cut( arr.metrics().head_node_size_log );
+ nSlot = splitter.cut( static_cast<unsigned>( arr.metrics().head_node_size_log ));
+ assert( nSlot < arr.metrics().head_node_size );
nHeight = 1;
}
};
multilevel_array(size_t head_bits, size_t array_bits )
: m_Metrics(feldman_hashset::details::metrics::make( head_bits, array_bits, c_hash_size ))
, m_Head( alloc_head_node())
- {}
+ {
+ assert( hash_splitter::is_correct( static_cast<unsigned>( metrics().head_node_size_log )));
+ assert( hash_splitter::is_correct( static_cast<unsigned>( metrics().array_node_size_log )));
+ }
~multilevel_array()
{
back_off bkoff;
while (true) {
node_ptr slot = pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire);
- if (slot.bits() == flag_array_node) {
+ if ( slot.bits() == flag_array_node ) {
// array node, go down the tree
assert(slot.ptr() != nullptr);
assert( !pos.splitter.eos());
- pos.nSlot = pos.splitter.cut( metrics().array_node_size_log );
+ pos.nSlot = pos.splitter.cut( static_cast<unsigned>( metrics().array_node_size_log ));
assert( pos.nSlot < metrics().array_node_size );
pos.pArr = to_array(slot.ptr());
++pos.nHeight;
return alloc_array_node(array_node_size(), pParent, idxParent);
}
- static void free_array_node( array_node * parr, size_t nSize )
+ static void free_array_node( array_node * parr, size_t /*nSize*/ )
{
- cxx_array_node_allocator().Delete( parr, nSize );
+ cxx_array_node_allocator().Delete( parr, 1 );
}
union converter {
bool expand_slot( traverse_data& pos, node_ptr current)
{
+ assert( !pos.splitter.eos() );
return expand_slot( pos.pArr, pos.nSlot, current, pos.splitter.bit_offset());
}
return false;
}
- size_t idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut( m_Metrics.array_node_size_log );
+ typename hash_splitter::uint_type idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut(
+ static_cast<unsigned>( m_Metrics.array_node_size_log ));
pArr->nodes[idx].store(current, memory_model::memory_order_release);
cur = cur | flag_array_converting;
return false;
}
- // the slot must be expanded
- base_class::expand_slot( pos, slot );
+ if ( !pos.splitter.eos() ) {
+ // the slot must be expanded
+ base_class::expand_slot( pos, slot );
+ }
+ else
+ return false;
}
else {
// the slot is empty, try to insert data node
}
if ( bInsert ) {
- // the slot must be expanded
- base_class::expand_slot( pos, slot );
+ if ( !pos.splitter.eos() ) {
+ // the slot must be expanded
+ base_class::expand_slot( pos, slot );
+ }
+ else
+ return std::make_pair( false, false );
}
else {
stats().onUpdateFailed();
test( s );
}
+ TEST_F( IntrusiveFeldmanHashSet_DHP, byte_cut )
+ {
+ struct traits: public ci::feldman_hashset::traits
+ {
+ typedef base_class::hash_accessor hash_accessor;
+ typedef cds::algo::byte_splitter< int > hash_splitter;
+ typedef cmp compare;
+ typedef std::less<int> less;
+ typedef mock_disposer disposer;
+ typedef simple_item_counter item_counter;
+ };
+
+ typedef ci::FeldmanHashSet< gc_type, int_item, traits > set_type;
+
+ set_type s( 8, 8 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveFeldmanHashSet_DHP, byte_cut_explicit_hash_size )
+ {
+ struct traits: public ci::feldman_hashset::traits
+ {
+ typedef base_class::hash_accessor2 hash_accessor;
+ typedef cds::algo::byte_splitter< key_val > hash_splitter;
+ enum: size_t {
+ hash_size = sizeof( std::declval<key_val>().nKey )
+ };
+ typedef base_class::cmp2 compare;
+ typedef mock_disposer disposer;
+ typedef ci::feldman_hashset::stat<> stat;
+ };
+
+ typedef ci::FeldmanHashSet< gc_type, int_item2, traits > set_type;
+
+ set_type s( 8, 8 );
+ test( s );
+ }
+
} // namespace
test( s );
}
+ TEST_F( IntrusiveFeldmanHashSet_HP, byte_cut )
+ {
+ struct traits: public ci::feldman_hashset::traits
+ {
+ typedef base_class::hash_accessor hash_accessor;
+ typedef cds::algo::byte_splitter< int > hash_splitter;
+ typedef cmp compare;
+ typedef std::less<int> less;
+ typedef mock_disposer disposer;
+ typedef simple_item_counter item_counter;
+ };
+
+ typedef ci::FeldmanHashSet< gc_type, int_item, traits > set_type;
+
+ set_type s( 8, 8 );
+ test( s );
+ }
+
+ TEST_F( IntrusiveFeldmanHashSet_HP, byte_cut_explicit_hash_size )
+ {
+ struct traits: public ci::feldman_hashset::traits
+ {
+ typedef base_class::hash_accessor2 hash_accessor;
+ typedef cds::algo::byte_splitter< key_val > hash_splitter;
+ enum: size_t {
+ hash_size = sizeof( std::declval<key_val>().nKey )
+ };
+ typedef base_class::cmp2 compare;
+ typedef mock_disposer disposer;
+ typedef ci::feldman_hashset::stat<> stat;
+ };
+
+ typedef ci::FeldmanHashSet< gc_type, int_item2, traits > set_type;
+
+ set_type s( 8, 8 );
+ test( s );
+ }
+
} // namespace
this->test( s );
}
+TYPED_TEST_P( IntrusiveFeldmanHashSet, byte_cut )
+{
+ struct traits: public ci::feldman_hashset::traits
+ {
+ typedef typename TestFixture::hash_accessor hash_accessor;
+ typedef cds::algo::byte_splitter< int > hash_splitter;
+ typedef typename TestFixture::cmp compare;
+ typedef typename TestFixture::mock_disposer disposer;
+ };
+
+ typedef ci::FeldmanHashSet< typename TestFixture::rcu_type, typename TestFixture::int_item, traits > set_type;
+
+ set_type s( 8, 8 );
+ test( s );
+}
+
+TYPED_TEST_P( IntrusiveFeldmanHashSet, byte_cut_explicit_hash_size )
+{
+ struct traits: public ci::feldman_hashset::traits
+ {
+ typedef typename TestFixture::hash_accessor2 hash_accessor;
+ typedef cds::algo::byte_splitter< typename TestFixture::key_val > hash_splitter;
+ enum: size_t {
+ hash_size = sizeof( std::declval<typename TestFixture::key_val>().nKey )
+ };
+ typedef typename TestFixture::cmp2 compare;
+ typedef typename TestFixture::mock_disposer disposer;
+ typedef ci::feldman_hashset::stat<> stat;
+ };
+
+ typedef ci::FeldmanHashSet< typename TestFixture::rcu_type, typename TestFixture::int_item2, traits > set_type;
+
+ set_type s( 8, 8 );
+ test( s );
+}
+
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( IntrusiveFeldmanHashSet,
- compare, less, cmpmix, backoff, stat, explicit_hash_size
+ compare, less, cmpmix, backoff, stat, explicit_hash_size, byte_cut, byte_cut_explicit_hash_size
);
test( m );
}
+ TEST_F( FeldmanHashMap_DHP, byte_cut )
+ {
+ typedef cc::FeldmanHashMap< gc_type, key_type, value_type,
+ typename cc::feldman_hashmap::make_traits<
+ cds::opt::compare< cmp >
+ , cc::feldman_hashmap::hash_splitter< cds::algo::byte_splitter< key_type >>
+ >::type
+ > map_type;
+
+ map_type m( 8, 8 );
+ EXPECT_EQ( m.head_size(), static_cast<size_t>( 1 << 8 ));
+ EXPECT_EQ( m.array_node_size(), static_cast<size_t>( 1 << 8 ));
+ test( m );
+ }
+
+ TEST_F( FeldmanHashMap_DHP, byte_cut_explicit_key_size )
+ {
+ struct map_traits: public cc::feldman_hashmap::traits
+ {
+ enum: size_t {
+ hash_size = sizeof(int) + sizeof( uint16_t)
+ };
+ typedef cds::algo::byte_splitter< key_type2, hash_size > hash_splitter;
+ typedef hash2 hash;
+ typedef less2 less;
+ typedef cc::feldman_hashmap::stat<> stat;
+ };
+ typedef cc::FeldmanHashMap< gc_type, key_type2, value_type, map_traits > map_type;
+
+ map_type m( 8, 8 );
+ EXPECT_EQ( m.head_size(), static_cast<size_t>(1 << 8));
+ EXPECT_EQ( m.array_node_size(), static_cast<size_t>(1 << 8));
+ test( m );
+ }
+
} // namespace
test( m );
}
+ TEST_F( FeldmanHashMap_HP, byte_cut )
+ {
+ typedef cc::FeldmanHashMap< gc_type, key_type, value_type,
+ typename cc::feldman_hashmap::make_traits<
+ cds::opt::compare< cmp >
+ , cc::feldman_hashmap::hash_splitter< cds::algo::byte_splitter< key_type >>
+ >::type
+ > map_type;
+
+ map_type m( 8, 8 );
+ EXPECT_EQ( m.head_size(), static_cast<size_t>( 1 << 8 ));
+ EXPECT_EQ( m.array_node_size(), static_cast<size_t>( 1 << 8 ));
+ test( m );
+ }
+
+ TEST_F( FeldmanHashMap_HP, byte_cut_explicit_key_size )
+ {
+ struct map_traits: public cc::feldman_hashmap::traits
+ {
+ enum: size_t {
+ hash_size = sizeof(int) + sizeof( uint16_t)
+ };
+ typedef cds::algo::byte_splitter< key_type2, hash_size > hash_splitter;
+ typedef hash2 hash;
+ typedef less2 less;
+ typedef cc::feldman_hashmap::stat<> stat;
+ };
+ typedef cc::FeldmanHashMap< gc_type, key_type2, value_type, map_traits > map_type;
+
+ map_type m( 8, 8 );
+ EXPECT_EQ( m.head_size(), static_cast<size_t>(1 << 8));
+ EXPECT_EQ( m.array_node_size(), static_cast<size_t>(1 << 8));
+ test( m );
+ }
+
} // namespace
this->test( m );
}
+ TYPED_TEST_P( FeldmanHashMap, byte_cut )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::key_type key_type;
+ typedef typename TestFixture::value_type value_type;
+
+ typedef cc::FeldmanHashMap< rcu_type, key_type, value_type,
+ typename cc::feldman_hashmap::make_traits<
+ cds::opt::compare< typename TestFixture::cmp >
+ , cc::feldman_hashmap::hash_splitter< cds::algo::byte_splitter< key_type >>
+ >::type
+ > map_type;
+
+ map_type m( 8, 8 );
+ this->test( m );
+ }
+
+ TYPED_TEST_P( FeldmanHashMap, byte_cut_explicit_key_size )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::key_type2 key_type2;
+ typedef typename TestFixture::value_type value_type;
+
+ struct map_traits: public cc::feldman_hashmap::traits
+ {
+ enum: size_t {
+ hash_size = sizeof( int ) + sizeof( uint16_t )
+ };
+ typedef cds::algo::byte_splitter< key_type2, hash_size > hash_splitter;
+ typedef typename TestFixture::hash2 hash;
+ typedef typename TestFixture::less2 less;
+ typedef cc::feldman_hashmap::stat<> stat;
+ };
+ typedef cc::FeldmanHashMap< rcu_type, key_type2, value_type, map_traits > map_type;
+
+ map_type m( 8, 8 );
+ this->test( m );
+ }
+
+
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( FeldmanHashMap,
- defaulted, compare, less, cmpmix, backoff, stat, explicit_key_size
+ defaulted, compare, less, cmpmix, backoff, stat, explicit_key_size, byte_cut, byte_cut_explicit_key_size
);
} // namespace
#include <gtest/gtest.h>
namespace {
- class Split_bitstrig : public ::testing::Test
+ bool is_big_endian()
{
- protected:
- bool is_big_endian()
- {
- union {
- uint32_t ui;
- uint8_t ch;
- } byte_order;
- byte_order.ui = 0xFF000001;
+ union {
+ uint32_t ui;
+ uint8_t ch;
+ } byte_order;
+ byte_order.ui = 0xFF000001;
- return byte_order.ch != 0x01;
- }
+ return byte_order.ch != 0x01;
+ }
+ class Split_bitstrig : public ::testing::Test
+ {
+ protected:
void cut_uint_le()
{
- typedef cds::algo::split_bitstring< size_t > split_bitstring;
+ typedef cds::algo::split_bitstring< size_t, 0, size_t > split_bitstring;
size_t src = sizeof(src) == 8 ? 0xFEDCBA9876543210 : 0x76543210;
- split_bitstring splitter(src);
+ split_bitstring splitter( src );
size_t res;
// Trivial case
- ASSERT_FALSE( splitter.eos());
+ ASSERT_FALSE( splitter.eos() );
ASSERT_FALSE( !splitter );
- res = splitter.cut(sizeof(src) * 8);
+ res = splitter.cut( sizeof( src ) * 8 );
EXPECT_EQ( res, src );
- ASSERT_TRUE( splitter.eos());
+ ASSERT_TRUE( splitter.eos() );
ASSERT_TRUE( !splitter );
- EXPECT_EQ(splitter.safe_cut(sizeof(src) * 8), 0u );
- ASSERT_TRUE( splitter.eos());
+ EXPECT_EQ( splitter.safe_cut( sizeof( src ) * 8 ), 0u );
+ ASSERT_TRUE( splitter.eos() );
ASSERT_TRUE( !splitter );
splitter.reset();
- ASSERT_FALSE( splitter.eos());
+ ASSERT_FALSE( splitter.eos() );
ASSERT_FALSE( !splitter );
- res = splitter.cut(sizeof(src) * 8);
+ res = splitter.cut( sizeof( src ) * 8 );
EXPECT_EQ( res, src );
- ASSERT_TRUE( splitter.eos());
+ ASSERT_TRUE( splitter.eos() );
ASSERT_TRUE( !splitter );
- EXPECT_EQ( splitter.safe_cut(sizeof(src) * 8), 0u );
- ASSERT_TRUE( splitter.eos());
+ EXPECT_EQ( splitter.safe_cut( sizeof( src ) * 8 ), 0u );
+ ASSERT_TRUE( splitter.eos() );
ASSERT_TRUE( !splitter );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
+
// Cut each hex digit
splitter.reset();
for ( size_t i = 0; i < sizeof(size_t) * 2; ++i ) {
}
ASSERT_TRUE( splitter.eos());
ASSERT_FALSE( splitter );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
// by one bit
{
splitter.reset();
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
res = 0;
for ( size_t i = 0; i < sizeof(size_t) * 8; ++i ) {
ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
- res = res + (splitter.cut( 1 ) << i);
+ res |= splitter.cut( 1 ) << i;
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( res, src );
+
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
}
// random cut
{
for ( size_t k = 0; k < 100; ++k ) {
splitter.reset();
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
res = 0;
size_t shift = 0;
while ( splitter ) {
ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
int bits = std::rand() % 16;
- res = res + ( splitter.safe_cut( bits ) << shift );
+ res |= splitter.safe_cut( bits ) << shift;
shift += bits;
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( res, src );
+
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
}
}
}
void cut_uint_be()
{
- typedef cds::algo::split_bitstring< size_t > split_bitstring;
+ typedef cds::algo::split_bitstring< size_t, 0, size_t > split_bitstring;
size_t src = sizeof(src) == 8 ? 0xFEDCBA9876543210 : 0x76543210;
- split_bitstring splitter(src);
+ split_bitstring splitter( src );
size_t res;
// Trivial case
- ASSERT_FALSE( splitter.eos());
+ ASSERT_FALSE( splitter.eos() );
ASSERT_FALSE( !splitter );
- res = splitter.cut(sizeof(src) * 8);
+ res = splitter.cut( sizeof( src ) * 8 );
ASSERT_EQ( res, src );
- ASSERT_TRUE( splitter.eos());
+ ASSERT_TRUE( splitter.eos() );
ASSERT_TRUE( !splitter );
- EXPECT_EQ(splitter.safe_cut(sizeof(src) * 8), 0u );
- ASSERT_TRUE( splitter.eos());
+ EXPECT_EQ( splitter.safe_cut( sizeof( src ) * 8 ), 0u );
+ ASSERT_TRUE( splitter.eos() );
ASSERT_TRUE( !splitter );
splitter.reset();
- ASSERT_FALSE( splitter.eos());
+ ASSERT_FALSE( splitter.eos() );
ASSERT_FALSE( !splitter );
- res = splitter.cut(sizeof(src) * 8);
+ res = splitter.cut( sizeof( src ) * 8 );
EXPECT_EQ( res, src );
- ASSERT_TRUE( splitter.eos());
+ ASSERT_TRUE( splitter.eos() );
ASSERT_TRUE( !splitter );
- EXPECT_EQ(splitter.safe_cut(sizeof(src) * 8), 0u );
- ASSERT_TRUE( splitter.eos());
+ EXPECT_EQ( splitter.safe_cut( sizeof( src ) * 8 ), 0u );
+ ASSERT_TRUE( splitter.eos() );
ASSERT_TRUE( !splitter );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
+
// Cut each hex digit
splitter.reset();
for ( size_t i = 0; i < sizeof(size_t) * 2; ++i ) {
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
// by one bit
{
splitter.reset();
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
res = 0;
for ( size_t i = 0; i < sizeof(size_t) * 8; ++i ) {
ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
- res = (res << 1) + splitter.cut( 1 );
+ res = ( res << 1 ) | ( splitter.cut( 1 ) );
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( res, src );
+
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
}
// random cut
{
for ( size_t k = 0; k < 100; ++k ) {
splitter.reset();
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
res = 0;
while ( splitter ) {
ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
- int bits = std::rand() % 16;
- res = (res << bits) + splitter.safe_cut( bits );
+ unsigned bits = std::rand() % 16;
+ size_t shift = splitter.rest_count();
+ if ( shift > bits )
+ shift = bits;
+ res = (res << shift) | splitter.safe_cut( bits );
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( res, src );
+
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
}
}
}
split_bitstring splitter(src);
uint64_t res;
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
// Cut each hex digit
splitter.reset();
for ( size_t i = 0; i < sizeof(src) * 2; ++i ) {
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
// by one bit
{
splitter.reset();
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
res = 0;
for ( size_t i = 0; i < sizeof(src) * 8; ++i ) {
ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
- res = res + ( static_cast<uint64_t>(splitter.cut( 1 )) << i);
+ res += static_cast<uint64_t>(splitter.cut( 1 )) << i;
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( res, src );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
}
// random cut
{
for ( size_t k = 0; k < 100; ++k ) {
splitter.reset();
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
res = 0;
size_t shift = 0;
while ( splitter ) {
ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
int bits = std::rand() % 16;
- res = res + ( static_cast<uint64_t>(splitter.safe_cut( bits )) << shift );
+ res += static_cast<uint64_t>(splitter.safe_cut( bits )) << shift;
shift += bits;
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( res, src );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
}
}
}
split_bitstring splitter(src);
uint64_t res;
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
// Cut each hex digit
splitter.reset();
for ( size_t i = 0; i < sizeof(size_t) * 2; ++i ) {
// by one bit
{
splitter.reset();
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
res = 0;
for ( size_t i = 0; i < sizeof(size_t) * 8; ++i ) {
ASSERT_FALSE( splitter.eos());
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( res, src );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
}
// random cut
{
for ( size_t k = 0; k < 100; ++k ) {
splitter.reset();
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
res = 0;
while ( splitter ) {
ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
- int bits = std::rand() % 16;
- res = (res << bits) + splitter.safe_cut( bits );
+ unsigned bits = std::rand() % 16;
+ size_t shift = splitter.rest_count();
+ if ( shift > bits )
+ shift = bits;
+ res = ( res << shift ) | splitter.safe_cut( bits );
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( res, src );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
+ }
+ }
+ }
+
+ struct int48 {
+ uint32_t n32;
+ uint16_t n16;
+
+ friend bool operator ==( int48 lhs, int48 rhs )
+ {
+ return lhs.n32 == rhs.n32 && lhs.n16 == rhs.n16;
+ }
+
+ uint64_t to64() const
+ {
+# ifdef CDS_ARCH_LITTLE_ENDIAN
+ return ( static_cast<uint64_t>( n16 ) << 32 ) + n32;
+# else
+ return ( static_cast<uint64_t>( n32 ) << 16 ) + n16;
+# endif
+ }
+ };
+ static constexpr size_t int48_size = 6;
+
+ void cut_int48_le()
+ {
+ int48 src;
+ src.n32 = 0x76543210;
+ src.n16 = 0xBA98;
+
+ uint64_t res;
+
+ {
+ typedef cds::algo::split_bitstring< int48, int48_size, size_t > split_bitstring;
+ split_bitstring splitter( src );
+
+ // Trivial case
+ ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( !splitter );
+ res = splitter.cut( int48_size * 8 );
+ EXPECT_EQ( res, src.to64() );
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ EXPECT_EQ( splitter.safe_cut( int48_size * 8 ), 0u );
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ splitter.reset();
+ ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( !splitter );
+ res = splitter.cut( int48_size * 8 );
+ EXPECT_EQ( res, src.to64() );
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ EXPECT_EQ( splitter.safe_cut( int48_size * 8 ), 0u );
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ }
+
+ typedef cds::algo::split_bitstring< int48, int48_size, size_t > split_bitstring;
+ split_bitstring splitter( src );
+
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
+ // Cut each hex digit
+ splitter.reset();
+ for ( size_t i = 0; i < int48_size * 2; ++i ) {
+ ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( !splitter );
+ ASSERT_EQ( splitter.cut( 4 ), i );
+ }
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_FALSE( splitter );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
+
+ // by one bit
+ {
+ splitter.reset();
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
+ res = 0;
+ for ( size_t i = 0; i < int48_size * 8; ++i ) {
+ ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( !splitter );
+ res |= splitter.cut( 1 ) << i;
+ }
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ EXPECT_EQ( res, src.to64() );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
+ }
+
+ // random cut
+ {
+ for ( size_t k = 0; k < 100; ++k ) {
+ splitter.reset();
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
+ res = 0;
+ size_t shift = 0;
+ while ( splitter ) {
+ ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( !splitter );
+ int bits = std::rand() % 16;
+ res |= splitter.safe_cut( bits ) << shift;
+ shift += bits;
+ }
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ EXPECT_EQ( res, src.to64() );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
+ }
+ }
+ }
+
+ void cut_int48_be()
+ {
+ int48 src;
+ src.n32 = 0xBA987654;
+ src.n16 = 0x3210;
+
+ uint64_t res;
+
+ {
+ typedef cds::algo::split_bitstring< int48, int48_size, size_t > split_bitstring;
+ split_bitstring splitter( src );
+
+ // Trivial case
+ ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( !splitter );
+ res = splitter.cut( int48_size * 8 );
+ ASSERT_EQ( res, src.to64() );
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ EXPECT_EQ( splitter.safe_cut( int48_size * 8 ), 0u );
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ splitter.reset();
+ ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( !splitter );
+ res = splitter.cut( int48_size * 8 );
+ EXPECT_EQ( res, src.to64() );
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ EXPECT_EQ( splitter.safe_cut( int48_size * 8 ), 0u );
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ }
+
+ typedef cds::algo::split_bitstring< int48, int48_size, size_t > split_bitstring;
+ split_bitstring splitter( src );
+
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
+ // Cut each hex digit
+ splitter.reset();
+ for ( size_t i = 0; i < int48_size * 2; ++i ) {
+ ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( !splitter );
+ EXPECT_EQ( splitter.cut( 4 ), 0x0B - i );
+ }
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
+
+ // by one bit
+ {
+ splitter.reset();
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
+ res = 0;
+ for ( size_t i = 0; i < int48_size * 8; ++i ) {
+ ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( !splitter );
+ res = ( res << 1 ) | ( splitter.cut( 1 ) );
+ }
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ EXPECT_EQ( res, src.to64() );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
+ }
+
+ // random cut
+ {
+ for ( size_t k = 0; k < 100; ++k ) {
+ splitter.reset();
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+
+ res = 0;
+ while ( splitter ) {
+ ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( !splitter );
+ unsigned bits = std::rand() % 16;
+ size_t shift = splitter.rest_count();
+ if ( shift > bits )
+ shift = bits;
+ res = ( res << shift ) | splitter.safe_cut( bits );
+ }
+ ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( !splitter );
+ EXPECT_EQ( res, src.to64() );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
}
}
}
+
+ void cut_byte_le()
+ {
+ size_t src = sizeof( src ) == 8 ? 0xFEDCBA9876543210 : 0x76543210;
+
+ typedef cds::algo::byte_splitter< size_t > splitter_type;
+ splitter_type splitter( src );
+
+ ASSERT_TRUE( !splitter.eos() );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+ EXPECT_TRUE( splitter.is_correct( 8 ) );
+ EXPECT_FALSE( splitter.is_correct( 4 ) );
+
+ unsigned expected = 0x10;
+ for ( unsigned i = 0; i < splitter_type::c_bitstring_size; ++i ) {
+ auto part = splitter.cut( 8 );
+ EXPECT_EQ( part, expected );
+ expected += 0x22;
+ }
+
+ ASSERT_TRUE( splitter.eos() );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
+ }
+
+ void cut_byte_be()
+ {
+ size_t src = sizeof( src ) == 8 ? 0xFEDCBA9876543210 : 0x76543210;
+
+ typedef cds::algo::byte_splitter< size_t > splitter_type;
+ splitter_type splitter( src );
+
+ ASSERT_TRUE( !splitter.eos() );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+ EXPECT_TRUE( splitter.is_correct( 8 ) );
+ EXPECT_FALSE( splitter.is_correct( 4 ) );
+
+ unsigned expected = 0xFE;
+ for ( unsigned i = 0; i < splitter_type::c_bitstring_size; ++i ) {
+ auto part = splitter.cut( 8 );
+ EXPECT_EQ( part, expected );
+ expected -= 0x22;
+ }
+
+ ASSERT_TRUE( splitter.eos() );
+ EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
+ EXPECT_EQ( *splitter.source(), src );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ EXPECT_EQ( splitter.bit_offset(), sizeof( src ) * 8 );
+ }
};
+ class Split_number: public ::testing::Test
+ {
+ protected:
+ template <typename Int>
+ void split( Int const n )
+ {
+ cds::algo::number_splitter< Int > splitter( n );
+
+ // split by hex digit
+ for ( unsigned count = 4; count < sizeof( Int ) * 8; count += 4 ) {
+ EXPECT_EQ( splitter.cut( 4 ), count / 4 - 1 );
+ }
+
+ // random cut
+ for ( int i = 0; i < 100; ++i ) {
+ splitter.reset();
+ EXPECT_EQ( splitter.source(), n );
+ EXPECT_EQ( splitter.bit_offset(), 0u );
+ EXPECT_EQ( splitter.rest_count(), sizeof( Int ) * 8 );
+
+ unsigned total = 0;
+ Int result = 0;
+
+ while ( total < sizeof( Int ) * 8 ) {
+ unsigned count = std::rand() % 16;
+
+ unsigned shift = count;
+ if ( total + count > sizeof( Int ) * 8 )
+ shift = sizeof( Int ) * 8 - total;
+
+ result += splitter.safe_cut( count ) << total;
+ total += shift;
+ }
+
+ EXPECT_EQ( result, n );
+
+ EXPECT_EQ( splitter.bit_offset(), sizeof( Int ) * 8 );
+ EXPECT_EQ( splitter.rest_count(), 0u );
+ }
+ }
+ };
+
+
TEST_F( Split_bitstrig, cut_uint )
{
if ( is_big_endian())
cut_small_le<uint16_t>();
}
+ TEST_F( Split_bitstrig, cut_int48 )
+ {
+ if ( is_big_endian() )
+ cut_int48_be();
+ else
+ cut_int48_le();
+ }
+
+ TEST_F( Split_bitstrig, cut_byte )
+ {
+ if ( is_big_endian() )
+ cut_byte_be();
+ else
+ cut_byte_le();
+ }
+
+ TEST_F( Split_number, split_int )
+ {
+ split( (int)0x76543210 );
+ }
+
+ TEST_F( Split_number, split_uint )
+ {
+ split( (unsigned)0x76543210 );
+ }
+
+ TEST_F( Split_number, split_short )
+ {
+ split( (short int)0x3210 );
+ }
+
+ TEST_F( Split_number, split_ushort )
+ {
+ split( (unsigned short)0x3210 );
+ }
+
+ TEST_F( Split_number, split_long )
+ {
+ if ( sizeof( long ) == 8 )
+ split( (long)0xFEDCBA9876543210 );
+ else
+ split( (long)0x76543210 );
+ }
+
+ TEST_F( Split_number, split_ulong )
+ {
+ if ( sizeof( long ) == 8 )
+ split( (unsigned long)0xFEDCBA9876543210 );
+ else
+ split( (unsigned long)0x76543210 );
+ }
+
+ TEST_F( Split_number, split_int64 )
+ {
+ split( (int64_t)0xFEDCBA9876543210 );
+ }
+
+ TEST_F( Split_number, split_uint64 )
+ {
+ split( (uint64_t)0xFEDCBA9876543210 );
+ }
+
} // namespace
test( s );
}
+ TEST_F( FeldmanHashSet_DHP, byte_cut )
+ {
+ typedef cc::FeldmanHashSet< gc_type, int_item,
+ typename cc::feldman_hashset::make_traits<
+ cc::feldman_hashset::hash_accessor< get_hash >
+ , cc::feldman_hashset::hash_splitter< cds::algo::byte_splitter<int>>
+ , cds::opt::compare< cmp >
+ >::type
+ > set_type;
+
+ set_type s( 8, 8 );
+ test( s );
+ }
+
+ TEST_F( FeldmanHashSet_DHP, byte_cut_explicit_hash_size )
+ {
+ struct set_traits: public cc::feldman_hashset::traits
+ {
+ typedef get_hash2 hash_accessor;
+ enum: size_t {
+ hash_size = sizeof( std::declval<key_val>().nKey )
+ };
+ typedef cds::algo::byte_splitter< key_val, hash_size > hash_splitter;
+ typedef cmp2 compare;
+ typedef cc::feldman_hashset::stat<> stat;
+ };
+ typedef cc::FeldmanHashSet< gc_type, int_item2, set_traits > set_type;
+
+ set_type s( 8, 8 );
+ test( s );
+ }
+
} // namespace
test( s );
}
+ TEST_F( FeldmanHashSet_HP, byte_cut )
+ {
+ typedef cc::FeldmanHashSet< gc_type, int_item,
+ typename cc::feldman_hashset::make_traits<
+ cc::feldman_hashset::hash_accessor< get_hash >
+ , cc::feldman_hashset::hash_splitter< cds::algo::byte_splitter<int>>
+ , cds::opt::compare< cmp >
+ >::type
+ > set_type;
+
+ set_type s( 8, 8 );
+ test( s );
+ }
+
+ TEST_F( FeldmanHashSet_HP, byte_cut_explicit_hash_size )
+ {
+ struct set_traits: public cc::feldman_hashset::traits
+ {
+ typedef get_hash2 hash_accessor;
+ enum: size_t {
+ hash_size = sizeof( std::declval<key_val>().nKey )
+ };
+ typedef cds::algo::byte_splitter< key_val, hash_size > hash_splitter;
+ typedef cmp2 compare;
+ typedef cc::feldman_hashset::stat<> stat;
+ };
+ typedef cc::FeldmanHashSet< gc_type, int_item2, set_traits > set_type;
+
+ set_type s( 8, 8 );
+ test( s );
+ }
+
} // namespace
this->test( s );
}
+ TYPED_TEST_P( FeldmanHashSet, byte_cut )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::int_item int_item;
+ typedef typename TestFixture::get_hash get_hash;
+
+ typedef cc::FeldmanHashSet< rcu_type, int_item,
+ typename cc::feldman_hashset::make_traits<
+ cc::feldman_hashset::hash_accessor< get_hash >
+ , cc::feldman_hashset::hash_splitter< cds::algo::byte_splitter< int >>
+ , cds::opt::less< std::less<int> >
+ >::type
+ > set_type;
+
+ set_type s( 8, 8 );
+ this->test( s );
+ }
+
+ TYPED_TEST_P( FeldmanHashSet, byte_cut_explicit_hash_size )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::int_item2 int_item;
+ typedef typename TestFixture::get_hash2 get_hash2;
+
+ struct set_traits: public cc::feldman_hashset::traits
+ {
+ enum: size_t {
+ hash_size = sizeof( std::declval<int_item>().nKey )
+ };
+ typedef cds::algo::byte_splitter< typename TestFixture::key_val, hash_size > hash_splitter;
+ typedef get_hash2 hash_accessor;
+ typedef typename TestFixture::cmp2 compare;
+ typedef cc::feldman_hashset::stat<> stat;
+ };
+ typedef cc::FeldmanHashSet< rcu_type, int_item, set_traits > set_type;
+
+ set_type s( 8, 8 );
+ this->test( s );
+ }
+
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( FeldmanHashSet,
- defaulted, compare, less, cmpmix, item_counting, backoff, stat, explicit_hash_size
+ defaulted, compare, less, cmpmix, item_counting, backoff, stat, explicit_hash_size, byte_cut, byte_cut_explicit_hash_size
);
} // namespace