# endif
uint_type result;
- uint_type const nRest = c_nBitPerInt - m_offset % c_nBitPerInt;
+ size_t const nRest = c_nBitPerInt - m_offset % c_nBitPerInt;
m_offset += nBits;
if ( nBits < nRest ) {
- result = *m_ptr << ( nRest - nBits );
- result = result >> ( c_nBitPerInt - nBits );
+ result = static_cast<uint_type>( *m_ptr << ( nRest - nBits ));
+ result = static_cast<uint_type>( result >> ( c_nBitPerInt - nBits ));
}
else if ( nBits == nRest ) {
- result = *m_ptr >> ( c_nBitPerInt - nRest );
+ result = static_cast<uint_type>( *m_ptr >> ( c_nBitPerInt - nRest ));
++m_ptr;
assert( m_offset % c_nBitPerInt == 0 );
}
else {
- uint_type const lsb = *m_ptr >> ( c_nBitPerInt - nRest );
+ uint_type const lsb = static_cast<uint_type>( *m_ptr >> ( c_nBitPerInt - nRest ));
nBits -= nRest;
++m_ptr;
- result = *m_ptr << ( c_nBitPerInt - nBits );
- result = result >> ( c_nBitPerInt - nBits );
- result = (result << nRest) + lsb;
+ result = static_cast<uint_type>( *m_ptr << ( c_nBitPerInt - nBits ));
+ result = static_cast<uint_type>( result >> ( c_nBitPerInt - nBits ));
+ result = static_cast<uint_type>( (result << nRest) + lsb );
}
assert( m_offset <= c_nBitPerHash );
template <typename... S>
value_type * New( S const&... src )
{
- return Construct( allocator_type::allocate(1), src... );
+ return Construct( allocator_type::allocate( 1, nullptr ), src... );
}
/// Analogue of <tt>operator new T( std::forward<Args>(args)... )</tt> (move semantics)
template <typename... Args>
value_type * MoveNew( Args&&... args )
{
- return MoveConstruct( allocator_type::allocate(1), std::forward<Args>(args)... );
+ return MoveConstruct( allocator_type::allocate( 1, nullptr ), std::forward<Args>(args)... );
}
/// Analogue of operator new T[\p nCount ]
value_type * NewArray( size_t nCount )
{
- value_type * p = allocator_type::allocate( nCount );
+ value_type * p = allocator_type::allocate( nCount, nullptr );
for ( size_t i = 0; i < nCount; ++i )
Construct( p + i );
return p;
template <typename S>
value_type * NewArray( size_t nCount, S const& src )
{
- value_type * p = allocator_type::allocate( nCount );
+ value_type * p = allocator_type::allocate( nCount, nullptr );
for ( size_t i = 0; i < nCount; ++i )
Construct( p + i, src );
return p;
cur->clear();
// free all extended blocks
- hp_allocator& alloc = hp_allocator::instance();
+ hp_allocator& a = hp_allocator::instance();
for ( guard_block* p = extended_list_; p; ) {
guard_block* next = p->next_;
- alloc.free( p );
+ a.free( p );
p = next;
}
// Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
- size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
+ size_t nBucketCount = ( nItemCount + m.nLoadFactor - 1 ) / m.nLoadFactor;
if ( nBucketCount <= 2 ) {
m.nSegmentCount = 1;
m.nSegmentSize = 2;
for ( node_type* p = m_pNode->next.load( memory_model::memory_order_relaxed ); p != m_pNode; p = p->next.load( memory_model::memory_order_relaxed ))
{
m_pNode = p;
- if ( m_Guard.protect( p->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr())
+ if ( m_Guard.protect( p->data, []( marked_data_ptr ptr ) { return ptr.ptr(); }).ptr())
return;
}
m_Guard.clear();
{
native_timer_type ts;
current( ts );
- double dblRet = ( ts.tv_sec - m_tmStart.tv_sec ) + ( ts.tv_nsec - m_tmStart.tv_nsec ) / 1.0E9;
+ double dblRet = double( ( ts.tv_sec - m_tmStart.tv_sec ) + ( ts.tv_nsec - m_tmStart.tv_nsec )) / 1.0E9;
m_tmStart = ts;
return dblRet;
}
{
long n = ::sysconf( _SC_NPROCESSORS_ONLN );
if ( n > 0 )
- s_nProcessorCount = n;
+ s_nProcessorCount = static_cast<unsigned>( n );
else {
try {
std::ifstream cpuinfo("/proc/cpuinfo");
uint32 d = Fetch32(s + (len >> 1));
uint32 e = Fetch32(s);
uint32 f = Fetch32(s + len - 4);
- uint32 h = len;
+ uint32 h = static_cast<uint32>( len );
return fmix(Mur(f, Mur(e, Mur(d, Mur(c, Mur(b, Mur(a, h)))))));
}
b = b * c1 + v;
c ^= b;
}
- return fmix(Mur(b, Mur(len, c)));
+ return fmix(Mur(b, Mur(static_cast<uint32>( len ), c)));
}
static uint32 Hash32Len5to12(const char *s, size_t len) {
- uint32 a = len, b = len * 5, c = 9, d = b;
+ uint32 a = static_cast<uint32>( len ), b = static_cast<uint32>( len ) * 5, c = 9, d = b;
a += Fetch32(s);
b += Fetch32(s + len - 4);
c += Fetch32(s + ((len >> 1) & 4));
}
// len > 24
- uint32 h = len, g = c1 * len, f = g;
+ uint32 h = static_cast<uint32>( len ), g = static_cast<uint32>( c1 * len ), f = g;
uint32 a0 = Rotate32(Fetch32(s + len - 4) * c1, 17) * c2;
uint32 a1 = Rotate32(Fetch32(s + len - 8) * c1, 17) * c2;
uint32 a2 = Rotate32(Fetch32(s + len - 16) * c1, 17) * c2;
uint8 b = s[len >> 1];
uint8 c = s[len - 1];
uint32 y = static_cast<uint32>(a) + (static_cast<uint32>(b) << 8);
- uint32 z = len + (static_cast<uint32>(c) << 2);
+ uint32 z = static_cast<uint32>( len + (static_cast<uint32>(c) << 2));
return ShiftMix(y * k2 ^ z * k0) * k2;
}
return k2;
break;
case 1:
EXPECT_EQ( i.s.nInsertCall, 0 );
- EXPECT_TRUE( l.insert( i, []( value_type& i ) { ++i.s.nInsertCall; } ));
+ EXPECT_TRUE( l.insert( i, []( value_type& v ) { ++v.s.nInsertCall; } ));
EXPECT_EQ( i.s.nInsertCall, 1 );
break;
case 2:
{
- std::pair<bool, bool> ret = l.update( i, []( value_type& i, value_type * old ) {
+ std::pair<bool, bool> ret = l.update( i, []( value_type& v, value_type * old ) {
EXPECT_TRUE( old == nullptr );
- EXPECT_EQ( i.s.nUpdateNewCall, 0 );
- ++i.s.nUpdateNewCall;
+ EXPECT_EQ( v.s.nUpdateNewCall, 0 );
+ ++v.s.nUpdateNewCall;
}, false );
EXPECT_EQ( i.s.nUpdateNewCall, 0 );
EXPECT_EQ( ret.first, false );
EXPECT_EQ( ret.second, false );
- ret = l.update( i, []( value_type& i, value_type * old ) {
+ ret = l.update( i, []( value_type& v, value_type * old ) {
EXPECT_TRUE( old == nullptr );
- EXPECT_EQ( i.s.nUpdateNewCall, 0 );
- ++i.s.nUpdateNewCall;
+ EXPECT_EQ( v.s.nUpdateNewCall, 0 );
+ ++v.s.nUpdateNewCall;
}, true );
EXPECT_EQ( i.s.nUpdateNewCall, 1 );
EXPECT_EQ( ret.first, true );
for ( auto& i : arr ) {
EXPECT_EQ( i.s.nUpdateExistsCall, 0 );
- std::pair<bool, bool> ret = l.update( i, []( value_type& i, value_type * old ) {
+ std::pair<bool, bool> ret = l.update( i, []( value_type& v, value_type * old ) {
EXPECT_FALSE( old == nullptr );
- EXPECT_EQ( i.s.nUpdateExistsCall, 0 );
- ++i.s.nUpdateExistsCall;
+ EXPECT_EQ( v.s.nUpdateExistsCall, 0 );
+ ++v.s.nUpdateExistsCall;
});
EXPECT_TRUE( ret.first );
EXPECT_FALSE( ret.second );
EXPECT_TRUE( l.insert( i ));
else {
EXPECT_EQ( i.s.nInsertCall, 0 );
- EXPECT_TRUE( l.insert( i, []( value_type& i ) { ++i.s.nInsertCall; } ));
+ EXPECT_TRUE( l.insert( i, []( value_type& v ) { ++v.s.nInsertCall; } ));
EXPECT_EQ( i.s.nInsertCall, 1 );
}
EXPECT_FALSE( ret.second );
EXPECT_EQ( i.s.nUpdateExistsCall, 1 );
- ret = l.update( i, []( bool bNew, value_type& i, value_type& arg ) {
+ ret = l.update( i, []( bool bNew, value_type& v, value_type& arg ) {
EXPECT_FALSE( bNew );
- EXPECT_EQ( i.s.nUpdateExistsCall, 1 );
- EXPECT_TRUE( &i == &arg );
- ++i.s.nUpdateExistsCall;
+ EXPECT_EQ( v.s.nUpdateExistsCall, 1 );
+ EXPECT_TRUE( &v == &arg );
+ ++v.s.nUpdateExistsCall;
});
EXPECT_TRUE( ret.first );
EXPECT_FALSE( ret.second );
explicit key_type2( int n )
: nKey( n )
- , subkey( n )
+ , subkey( static_cast<uint16_t>( n ))
{}
explicit key_type2( size_t n )
explicit key_type2( std::string const& str )
: nKey( std::stoi( str ))
- , subkey( nKey )
+ , subkey( static_cast<uint16_t>( nKey ))
{}
key_type2( key_type2 const& s )
EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( a.exchange( n ), static_cast<integral_type>( 0 ));
EXPECT_EQ( a.load(), n );
EXPECT_EQ( a.exchange( (integral_type) 0 ), n );
integral_type prev = a.load();
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( a.compare_exchange_weak( expected, n));
prev = a;
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( a.compare_exchange_strong( expected, n));
for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
{
integral_type prev = a.load();
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( a.fetch_add(n), prev);
}
for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
{
integral_type prev = a.load();
- integral_type n = integral_type(42) << ((nByte - 1) * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
EXPECT_EQ( a.fetch_sub(n), prev);
}
for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
{
integral_type prev = a.load() ;;
- integral_type mask = integral_type(1) << nBit;
+ integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
EXPECT_EQ( a.fetch_or( mask ), prev );
prev = a.load();
for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
{
integral_type prev = a;
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( (a += n), (prev + n));
}
for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
{
integral_type prev = a;
- integral_type n = integral_type(42) << ((nByte - 1) * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
EXPECT_EQ( (a -= n), prev - n );
}
for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
{
integral_type prev = a;
- integral_type mask = integral_type(1) << nBit;
+ integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
EXPECT_EQ( (a |= mask ), (prev | mask ));
prev = a;
EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( a.exchange( n, order ), integral_type( 0 ));
EXPECT_EQ( a.load( oLoad ), n );
EXPECT_EQ( a.exchange( (integral_type) 0, order ), n );
integral_type prev = a.load( oLoad );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
prev = a.load( oLoad );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
{
integral_type prev = a.load( oLoad );
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( a.fetch_add( n, order), prev);
}
for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
{
integral_type prev = a.load( oLoad );
- integral_type n = integral_type(42) << ((nByte - 1) * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
EXPECT_EQ( a.fetch_sub( n, order ), prev);
}
for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
{
integral_type prev = a.load( oLoad ) ;;
- integral_type mask = integral_type(1) << nBit;
+ integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
EXPECT_EQ( a.fetch_or( mask, order ), prev );
prev = a.load( oLoad );
char arr[8];
const char aSize = sizeof(arr)/sizeof(arr[0]);
for ( char i = 0; i < aSize; ++i ) {
- arr[unsigned(i)] = i + 1;
+ arr[static_cast<unsigned>( i )] = i + 1;
}
atomic_pointer a;
integral_type arr[8];
const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
for ( integral_type i = 0; i < aSize; ++i ) {
- arr[size_t(i)] = i + 1;
+ arr[static_cast<size_t>(i)] = i + 1;
}
atomic_pointer a;
EXPECT_EQ( atomics::atomic_load( &a ), integral_type( 0 ));
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( atomics::atomic_exchange( &a, n ), (integral_type) 0 );
EXPECT_EQ( atomics::atomic_load( &a ), n );
EXPECT_EQ( atomics::atomic_exchange( &a, (integral_type) 0 ), n );
integral_type prev = atomics::atomic_load( &a );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( atomics::atomic_compare_exchange_weak( &a, &expected, n));
prev = atomics::atomic_load( &a );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( atomics::atomic_compare_exchange_strong( &a, &expected, n));
for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
{
integral_type prev = atomics::atomic_load( &a );
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( atomics::atomic_fetch_add( &a, n ), prev );
}
for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
{
integral_type prev = atomics::atomic_load( &a );
- integral_type n = integral_type(42) << ((nByte - 1) * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
EXPECT_EQ( atomics::atomic_fetch_sub( &a, n ), prev );
}
for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
{
integral_type prev = atomics::atomic_load( &a );
- integral_type mask = integral_type(1) << nBit;
+ integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
EXPECT_EQ( atomics::atomic_fetch_or( &a, mask ), prev );
prev = atomics::atomic_load( &a );
EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), (integral_type) 0 );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( atomics::atomic_exchange_explicit( &a, n, order ), (integral_type) 0 );
EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), n );
EXPECT_EQ( atomics::atomic_exchange_explicit( &a, (integral_type) 0, order ), n );
integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
prev = atomics::atomic_load_explicit( &a, oLoad );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
{
integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( atomics::atomic_fetch_add_explicit( &a, n, order), prev);
}
for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
{
integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
- integral_type n = integral_type(42) << ((nByte - 1) * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
EXPECT_EQ( atomics::atomic_fetch_sub_explicit( &a, n, order ), prev);
}
for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
{
integral_type prev = atomics::atomic_load_explicit( &a, oLoad ) ;;
- integral_type mask = integral_type(1) << nBit;
+ integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
EXPECT_EQ( atomics::atomic_fetch_or_explicit( &a, mask, order ), prev );
prev = atomics::atomic_load_explicit( &a, oLoad );
integral_type arr[8];
const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
for ( integral_type i = 0; i < aSize; ++i ) {
- arr[size_t(i)] = i + 1;
+ arr[static_cast<size_t>(i)] = i + 1;
}
atomic_pointer a;
char arr[8];
const char aSize = sizeof(arr)/sizeof(arr[0]);
for ( char i = 0; i < aSize; ++i ) {
- arr[unsigned(i)] = i + 1;
+ arr[static_cast<size_t>(i)] = i + 1;
}
atomic_pointer a;