X-Git-Url: http://plrg.eecs.uci.edu/git/?p=libcds.git;a=blobdiff_plain;f=test%2Funit%2Fmisc%2Fcxx11_atomic_class.cpp;h=4041d8f70a0ae595b9625610871119049dca0d3b;hp=fa113f7ee3bcacbf9db69b6239786e17b9e4e2c8;hb=HEAD;hpb=a8f8957d8fd4d314e30f3bcb08d32a60074dd72f diff --git a/test/unit/misc/cxx11_atomic_class.cpp b/test/unit/misc/cxx11_atomic_class.cpp index fa113f7e..4041d8f7 100644 --- a/test/unit/misc/cxx11_atomic_class.cpp +++ b/test/unit/misc/cxx11_atomic_class.cpp @@ -1,7 +1,7 @@ /* This file is a part of libcds - Concurrent Data Structures library - (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 Source code repo: http://github.com/khizmax/libcds/ Download: http://sourceforge.net/projects/libcds/files/ @@ -28,10 +28,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include +#include #include #include "cxx11_convert_memory_order.h" +#define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( x.is_lock_free()) + namespace { class cxx11_atomic_class: public ::testing::Test { @@ -71,13 +73,12 @@ namespace { { typedef Integral integral_type; - EXPECT_TRUE( a.is_lock_free()); + EXPECT_ATOMIC_IS_LOCK_FREE( a ); a.store( (integral_type) 0 ); - //EXPECT_EQ( a, static_cast( 0 )); EXPECT_EQ( a.load(), static_cast( 0 )); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { - integral_type n = integral_type(42) << (nByte * 8); + integral_type n = static_cast( integral_type(42) << (nByte * 8)); EXPECT_EQ( a.exchange( n ), static_cast( 0 )); EXPECT_EQ( a.load(), n ); EXPECT_EQ( a.exchange( (integral_type) 0 ), n ); @@ -86,7 +87,7 @@ namespace { integral_type prev = a.load(); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { - integral_type n = integral_type(42) << (nByte * 8); + integral_type n = static_cast( integral_type(42) << (nByte * 8)); integral_type expected = prev; EXPECT_TRUE( a.compare_exchange_weak( expected, n)); @@ -102,7 +103,7 @@ namespace { prev = a; for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { - integral_type n = integral_type(42) << (nByte * 8); + integral_type n = static_cast( integral_type(42) << (nByte * 8)); integral_type expected = prev; EXPECT_TRUE( a.compare_exchange_strong( expected, n)); @@ -131,7 +132,7 @@ namespace { for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte ) { integral_type prev = a.load(); - integral_type n = integral_type(42) << (nByte * 8); + integral_type n = static_cast( integral_type(42) << (nByte * 8)); EXPECT_EQ( a.fetch_add(n), prev); } @@ -140,7 +141,7 @@ namespace { for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte ) { integral_type prev = a.load(); - integral_type n = integral_type(42) << ((nByte - 1) * 8); + integral_type n = static_cast( integral_type(42) << ((nByte - 1) * 8)); EXPECT_EQ( a.fetch_sub(n), prev); } @@ -150,7 +151,7 @@ namespace { for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit ) { integral_type prev = a.load() ;; - integral_type mask = integral_type(1) << nBit; + integral_type mask = static_cast( integral_type(1) << nBit ); EXPECT_EQ( a.fetch_or( mask ), prev ); prev = a.load(); @@ -174,7 +175,7 @@ namespace { for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte ) { integral_type prev = a; - integral_type n = integral_type(42) << (nByte * 8); + integral_type n = static_cast( integral_type(42) << (nByte * 8)); EXPECT_EQ( (a += n), (prev + n)); } @@ -183,7 +184,7 @@ namespace { for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte ) { integral_type prev = a; - integral_type n = integral_type(42) << ((nByte - 1) * 8); + integral_type n = static_cast( integral_type(42) << ((nByte - 1) * 8)); EXPECT_EQ( (a -= n), prev - n ); } @@ -193,7 +194,7 @@ namespace { for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit ) { integral_type prev = a; - integral_type mask = integral_type(1) << nBit; + integral_type mask = static_cast( integral_type(1) << nBit ); EXPECT_EQ( (a |= mask ), (prev | mask )); prev = a; @@ -218,13 +219,12 @@ namespace { const atomics::memory_order oLoad = convert_to_load_order( order ); const atomics::memory_order oStore = convert_to_store_order( order ); - EXPECT_TRUE( a.is_lock_free()); + EXPECT_ATOMIC_IS_LOCK_FREE( a ); a.store((integral_type) 0, oStore ); - //EXPECT_EQ( a, integral_type( 0 )); EXPECT_EQ( a.load( oLoad ), integral_type( 0 )); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { - integral_type n = integral_type(42) << (nByte * 8); + integral_type n = static_cast( integral_type(42) << (nByte * 8)); EXPECT_EQ( a.exchange( n, order ), integral_type( 0 )); EXPECT_EQ( a.load( oLoad ), n ); EXPECT_EQ( a.exchange( (integral_type) 0, order ), n ); @@ -233,7 +233,7 @@ namespace { integral_type prev = a.load( oLoad ); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { - integral_type n = integral_type(42) << (nByte * 8); + integral_type n = static_cast( integral_type(42) << (nByte * 8)); integral_type expected = prev; EXPECT_TRUE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed)); @@ -249,7 +249,7 @@ namespace { prev = a.load( oLoad ); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { - integral_type n = integral_type(42) << (nByte * 8); + integral_type n = static_cast( integral_type(42) << (nByte * 8)); integral_type expected = prev; EXPECT_TRUE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed)); @@ -281,7 +281,7 @@ namespace { for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte ) { integral_type prev = a.load( oLoad ); - integral_type n = integral_type(42) << (nByte * 8); + integral_type n = static_cast( integral_type(42) << (nByte * 8)); EXPECT_EQ( a.fetch_add( n, order), prev); } @@ -290,7 +290,7 @@ namespace { for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte ) { integral_type prev = a.load( oLoad ); - integral_type n = integral_type(42) << ((nByte - 1) * 8); + integral_type n = static_cast( integral_type(42) << ((nByte - 1) * 8)); EXPECT_EQ( a.fetch_sub( n, order ), prev); } @@ -300,7 +300,7 @@ namespace { for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit ) { integral_type prev = a.load( oLoad ) ;; - integral_type mask = integral_type(1) << nBit; + integral_type mask = static_cast( integral_type(1) << nBit ); EXPECT_EQ( a.fetch_or( mask, order ), prev ); prev = a.load( oLoad ); @@ -355,7 +355,7 @@ namespace { template void do_test_atomic_bool( AtomicBool& a ) { - EXPECT_TRUE( a.is_lock_free()); + EXPECT_ATOMIC_IS_LOCK_FREE( a ); a.store( false ); EXPECT_FALSE( a ); EXPECT_FALSE( a.load()); @@ -392,7 +392,7 @@ namespace { const atomics::memory_order oStore = convert_to_store_order( order ); const atomics::memory_order oExchange = convert_to_exchange_order( order ); - EXPECT_TRUE( a.is_lock_free()); + EXPECT_ATOMIC_IS_LOCK_FREE( a ); a.store( false, oStore ); EXPECT_FALSE( a ); EXPECT_FALSE( a.load( oLoad )); @@ -427,6 +427,8 @@ namespace { template void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order ) { + CDS_UNUSED( aSize ); + atomics::memory_order oLoad = convert_to_load_order(order); atomics::memory_order oStore = convert_to_store_order(order); void * p; @@ -452,18 +454,6 @@ namespace { EXPECT_EQ( reinterpret_cast(a.exchange( (void *) arr, order )), arr + 3 ); EXPECT_EQ( reinterpret_cast(a.load( oLoad )), arr ); EXPECT_EQ( *reinterpret_cast(a.load( oLoad )), 1 ); - - for ( char i = 1; i < aSize; ++i ) { - EXPECT_EQ( *reinterpret_cast(a.load( oLoad )), i ); - a.fetch_add( 1, order ); - EXPECT_EQ( *reinterpret_cast(a.load( oLoad )), i + 1 ); - } - - for ( char i = aSize; i > 1; --i ) { - EXPECT_EQ( *reinterpret_cast(a.load( oLoad )), i ); - a.fetch_sub( 1, order ); - EXPECT_EQ( *reinterpret_cast(a.load( oLoad )), i - 1 ); - } } template @@ -474,7 +464,7 @@ namespace { char arr[8]; const char aSize = sizeof(arr)/sizeof(arr[0]); for ( char i = 0; i < aSize; ++i ) { - arr[unsigned(i)] = i + 1; + arr[static_cast( i )] = i + 1; } atomic_pointer a; @@ -498,18 +488,6 @@ namespace { EXPECT_EQ( reinterpret_cast( a.load()), arr ); EXPECT_EQ( *reinterpret_cast( a.load()), 1 ); - for ( char i = 1; i < aSize; ++i ) { - EXPECT_EQ( *reinterpret_cast(a.load()), i ); - a.fetch_add( 1 ); - EXPECT_EQ( *reinterpret_cast(a.load()), i + 1 ); - } - - for ( char i = aSize; i > 1; --i ) { - EXPECT_EQ( *reinterpret_cast(a.load()), i ); - a.fetch_sub( 1 ); - EXPECT_EQ( *reinterpret_cast(a.load()), i - 1 ); - } - do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed ); do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire ); do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release ); @@ -548,14 +526,14 @@ namespace { EXPECT_EQ( *a.load( oLoad ), 1 ); for ( integral_type i = 1; i < aSize; ++i ) { - integral_type * p = a.load(); + p = a.load(); EXPECT_EQ( *p, i ); EXPECT_EQ( a.fetch_add( 1, order ), p ); EXPECT_EQ( *a.load( oLoad ), i + 1 ); } for ( integral_type i = aSize; i > 1; --i ) { - integral_type * p = a.load(); + p = a.load(); EXPECT_EQ( *p, i ); EXPECT_EQ( a.fetch_sub( 1, order ), p ); EXPECT_EQ( *a.load( oLoad ), i - 1 ); @@ -571,7 +549,7 @@ namespace { integral_type arr[8]; const integral_type aSize = sizeof(arr)/sizeof(arr[0]); for ( integral_type i = 0; i < aSize; ++i ) { - arr[size_t(i)] = i + 1; + arr[static_cast(i)] = i + 1; } atomic_pointer a; @@ -600,7 +578,7 @@ namespace { EXPECT_EQ( *a.load(), 1 ); for ( integral_type i = 1; i < aSize; ++i ) { - integral_type * p = a.load(); + p = a.load(); EXPECT_EQ( *p, i ); integral_type * pa = a.fetch_add( 1 ); EXPECT_EQ( pa, p ); @@ -608,7 +586,7 @@ namespace { } for ( integral_type i = aSize; i > 1; --i ) { - integral_type * p = a.load(); + p = a.load(); EXPECT_EQ( *p, i ); EXPECT_EQ( a.fetch_sub( 1 ), p ); EXPECT_EQ( *a.load(), i - 1 ); @@ -788,8 +766,6 @@ namespace { test_atomic_integral_volatile(); } -#if !( CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 40000 ) - //clang error with atomic fetch_add/fetch_sub TEST_F( cxx11_atomic_class, atomic_pointer_void ) { do_test_atomic_pointer_void(); @@ -799,7 +775,6 @@ namespace { { do_test_atomic_pointer_void(); } -#endif TEST_F( cxx11_atomic_class, atomic_pointer_char ) {