2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <gtest/gtest.h>
32 #include <cds/algo/atomic.h>
33 #include "cxx11_convert_memory_order.h"
35 #if CDS_COMPILER == CDS_COMPILER_CLANG && !defined( _LIBCPP_VERSION )
36 // CLang (at least 3.6) without libc++ has no gcc-specific __atomic_is_lock_free function
37 # define EXPECT_ATOMIC_IS_LOCK_FREE( x )
39 # define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( x.is_lock_free() )
44 class cxx11_atomic_class: public ::testing::Test
47 template <typename AtomicFlag>
48 void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
50 atomics::memory_order mo_clear = convert_to_store_order(order);
51 for ( int i = 0; i < 5; ++i ) {
52 EXPECT_TRUE( !f.test_and_set( order ));
53 EXPECT_TRUE( f.test_and_set( order ));
58 template <typename AtomicFlag>
59 void do_test_atomic_flag( AtomicFlag& f)
63 for ( int i = 0; i < 5; ++i ) {
64 EXPECT_TRUE( !f.test_and_set());
65 EXPECT_TRUE( f.test_and_set());
69 do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
70 //do_test_atomic_flag_mo( f, atomics::memory_order_consume );
71 do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
72 do_test_atomic_flag_mo( f, atomics::memory_order_release );
73 do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
74 do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
77 template <class Atomic, typename Integral>
78 void do_test_atomic_type(Atomic& a)
80 typedef Integral integral_type;
82 EXPECT_ATOMIC_IS_LOCK_FREE( a );
83 a.store( (integral_type) 0 );
84 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
86 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
87 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
88 EXPECT_EQ( a.exchange( n ), static_cast<integral_type>( 0 ));
89 EXPECT_EQ( a.load(), n );
90 EXPECT_EQ( a.exchange( (integral_type) 0 ), n );
91 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
94 integral_type prev = a.load();
95 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
96 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
97 integral_type expected = prev;
99 EXPECT_TRUE( a.compare_exchange_weak( expected, n));
100 EXPECT_EQ( expected, prev );
101 EXPECT_FALSE( a.compare_exchange_weak( expected, n));
102 EXPECT_EQ( expected, n );
105 EXPECT_EQ( a.load(), n );
108 a = (integral_type) 0;
111 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
112 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
113 integral_type expected = prev;
115 EXPECT_TRUE( a.compare_exchange_strong( expected, n));
116 EXPECT_EQ( expected, prev );
117 EXPECT_FALSE( a.compare_exchange_strong( expected, n));
118 EXPECT_EQ( expected, n );
121 EXPECT_EQ( a.load(), n );
124 EXPECT_EQ( a.exchange( (integral_type) 0 ), prev );
127 template <class Atomic, typename Integral>
128 void do_test_atomic_integral(Atomic& a)
130 do_test_atomic_type< Atomic, Integral >(a);
132 typedef Integral integral_type;
135 a.store( (integral_type) 0 );
138 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
140 integral_type prev = a.load();
141 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
143 EXPECT_EQ( a.fetch_add(n), prev);
147 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
149 integral_type prev = a.load();
150 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
152 EXPECT_EQ( a.fetch_sub(n), prev);
154 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
156 // fetch_or / fetc_xor / fetch_and
157 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
159 integral_type prev = a.load() ;;
160 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
162 EXPECT_EQ( a.fetch_or( mask ), prev );
164 EXPECT_EQ( ( prev & mask), mask);
166 EXPECT_EQ( a.fetch_and( (integral_type) ~mask ), prev );
168 EXPECT_EQ( integral_type(prev & mask), integral_type(0));
170 EXPECT_EQ( a.fetch_xor( mask ), prev );
172 EXPECT_EQ( integral_type( prev & mask), mask);
174 EXPECT_EQ( a.load(), (integral_type) -1 );
178 a = (integral_type) 0;
181 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
183 integral_type prev = a;
184 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
186 EXPECT_EQ( (a += n), (prev + n));
190 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
192 integral_type prev = a;
193 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
195 EXPECT_EQ( (a -= n), prev - n );
197 EXPECT_EQ( a.load(), (integral_type) 0 );
200 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
202 integral_type prev = a;
203 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
205 EXPECT_EQ( (a |= mask ), (prev | mask ));
207 EXPECT_EQ( ( prev & mask), mask);
209 EXPECT_EQ( (a &= (integral_type) ~mask ), ( prev & (integral_type) ~mask ));
211 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
213 EXPECT_EQ( (a ^= mask ), (prev ^ mask ));
215 EXPECT_EQ( ( prev & mask), mask);
217 EXPECT_EQ( a.load(), (integral_type) -1 );
220 template <class Atomic, typename Integral>
221 void do_test_atomic_type( Atomic& a, atomics::memory_order order )
223 typedef Integral integral_type;
225 const atomics::memory_order oLoad = convert_to_load_order( order );
226 const atomics::memory_order oStore = convert_to_store_order( order );
228 EXPECT_ATOMIC_IS_LOCK_FREE( a );
229 a.store((integral_type) 0, oStore );
230 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
232 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
233 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
234 EXPECT_EQ( a.exchange( n, order ), integral_type( 0 ));
235 EXPECT_EQ( a.load( oLoad ), n );
236 EXPECT_EQ( a.exchange( (integral_type) 0, order ), n );
237 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
240 integral_type prev = a.load( oLoad );
241 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
242 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
243 integral_type expected = prev;
245 EXPECT_TRUE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
246 EXPECT_EQ( expected, prev );
247 EXPECT_FALSE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
248 EXPECT_EQ( expected, n );
251 EXPECT_EQ( a.load( oLoad ), n );
254 a.store( (integral_type) 0, oStore );
256 prev = a.load( oLoad );
257 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
258 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
259 integral_type expected = prev;
261 EXPECT_TRUE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
262 EXPECT_EQ( expected, prev );
263 EXPECT_FALSE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
264 EXPECT_EQ( expected, n );
267 EXPECT_EQ( a.load( oLoad ), n );
270 EXPECT_EQ( a.exchange( (integral_type) 0, order ), prev );
273 template <class Atomic, typename Integral>
274 void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
276 do_test_atomic_type< Atomic, Integral >( a, order );
278 typedef Integral integral_type;
280 const atomics::memory_order oLoad = convert_to_load_order( order );
281 const atomics::memory_order oStore = convert_to_store_order( order );
284 a.store( (integral_type) 0, oStore );
287 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
289 integral_type prev = a.load( oLoad );
290 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
292 EXPECT_EQ( a.fetch_add( n, order), prev);
296 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
298 integral_type prev = a.load( oLoad );
299 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
301 EXPECT_EQ( a.fetch_sub( n, order ), prev);
303 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
305 // fetch_or / fetc_xor / fetch_and
306 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
308 integral_type prev = a.load( oLoad ) ;;
309 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
311 EXPECT_EQ( a.fetch_or( mask, order ), prev );
312 prev = a.load( oLoad );
313 EXPECT_EQ( ( prev & mask), mask);
315 EXPECT_EQ( a.fetch_and( (integral_type) ~mask, order ), prev );
316 prev = a.load( oLoad );
317 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
319 EXPECT_EQ( a.fetch_xor( mask, order ), prev );
320 prev = a.load( oLoad );
321 EXPECT_EQ( ( prev & mask), mask);
323 EXPECT_EQ( a.load( oLoad ), (integral_type) -1 );
328 template <typename Atomic, typename Integral>
329 void test_atomic_integral_(Atomic& a)
331 do_test_atomic_integral<Atomic, Integral >(a);
333 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
334 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
335 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
336 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
337 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
340 template <typename Integral>
341 void test_atomic_integral()
343 typedef atomics::atomic<Integral> atomic_type;
346 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
347 test_atomic_integral_<atomic_type, Integral>( a[i] );
350 template <typename Integral>
351 void test_atomic_integral_volatile()
353 typedef atomics::atomic<Integral> volatile atomic_type;
356 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
357 test_atomic_integral_<atomic_type, Integral>( a[i] );
361 template <class AtomicBool>
362 void do_test_atomic_bool( AtomicBool& a )
364 EXPECT_ATOMIC_IS_LOCK_FREE( a );
367 EXPECT_FALSE( a.load());
369 EXPECT_FALSE( a.exchange( true ));
370 EXPECT_TRUE( a.load());
371 EXPECT_TRUE( a.exchange( false ));
372 EXPECT_FALSE( a.load());
374 bool expected = false;
375 EXPECT_TRUE( a.compare_exchange_weak( expected, true));
376 EXPECT_FALSE( expected );
377 EXPECT_FALSE( a.compare_exchange_weak( expected, false));
378 EXPECT_TRUE( expected );
379 EXPECT_TRUE( a.load());
384 EXPECT_TRUE( a.compare_exchange_strong( expected, true));
385 EXPECT_FALSE( expected );
386 EXPECT_FALSE( a.compare_exchange_strong( expected, false));
387 EXPECT_TRUE( expected );
389 EXPECT_TRUE( a.load());
391 EXPECT_TRUE( a.exchange( false ));
394 template <class AtomicBool>
395 void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
397 const atomics::memory_order oLoad = convert_to_load_order( order );
398 const atomics::memory_order oStore = convert_to_store_order( order );
399 const atomics::memory_order oExchange = convert_to_exchange_order( order );
401 EXPECT_ATOMIC_IS_LOCK_FREE( a );
402 a.store( false, oStore );
404 EXPECT_FALSE( a.load( oLoad ));
406 EXPECT_FALSE( a.exchange( true, oExchange ));
407 EXPECT_TRUE( a.load( oLoad ));
408 EXPECT_TRUE( a.exchange( false, oExchange ));
409 EXPECT_FALSE( a.load( oLoad ));
411 bool expected = false;
412 EXPECT_TRUE( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
413 EXPECT_FALSE( expected );
414 EXPECT_FALSE( a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
415 EXPECT_TRUE( expected );
416 EXPECT_TRUE( a.load( oLoad ));
419 a.store( false, oStore );
422 EXPECT_TRUE( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
423 EXPECT_FALSE( expected );
424 EXPECT_FALSE( a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
425 EXPECT_TRUE( expected );
427 EXPECT_TRUE( a.load( oLoad ));
429 EXPECT_TRUE( a.exchange( false, oExchange ));
433 template <typename Atomic>
434 void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
436 atomics::memory_order oLoad = convert_to_load_order(order);
437 atomics::memory_order oStore = convert_to_store_order(order);
440 a.store( (void *) arr, oStore );
441 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
444 EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
445 EXPECT_EQ( p, arr + 0 );
446 EXPECT_EQ( *reinterpret_cast<char *>(p), 1 );
447 EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
448 EXPECT_EQ( p, arr + 5 );
449 EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
451 EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
452 EXPECT_EQ( p, arr + 5 );
453 EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
454 EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
455 EXPECT_EQ( p, arr + 3 );
456 EXPECT_EQ( *reinterpret_cast<char *>(p), 4 );
458 EXPECT_EQ( reinterpret_cast<char *>(a.exchange( (void *) arr, order )), arr + 3 );
459 EXPECT_EQ( reinterpret_cast<char *>(a.load( oLoad )), arr );
460 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
462 for ( char i = 1; i < aSize; ++i ) {
463 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i );
464 a.fetch_add( 1, order );
465 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i + 1 );
468 for ( char i = aSize; i > 1; --i ) {
469 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i );
470 a.fetch_sub( 1, order );
471 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i - 1 );
475 template <bool Volatile>
476 void do_test_atomic_pointer_void()
478 typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type atomic_pointer;
481 const char aSize = sizeof(arr)/sizeof(arr[0]);
482 for ( char i = 0; i < aSize; ++i ) {
483 arr[static_cast<unsigned>( i )] = i + 1;
489 a.store( (void *) arr );
490 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), 1 );
493 EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5)));
494 EXPECT_EQ( p, arr + 0 );
495 EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3)));
496 EXPECT_EQ( p, arr + 5 );
498 EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3)));
499 EXPECT_EQ( p, arr + 5 );
500 EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5)));
501 EXPECT_EQ( p, arr + 3 );
503 EXPECT_EQ( reinterpret_cast<char *>( a.exchange( (void *) arr )), arr + 3 );
504 EXPECT_EQ( reinterpret_cast<char *>( a.load()), arr );
505 EXPECT_EQ( *reinterpret_cast<char *>( a.load()), 1 );
507 for ( char i = 1; i < aSize; ++i ) {
508 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i );
510 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i + 1 );
513 for ( char i = aSize; i > 1; --i ) {
514 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i );
516 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i - 1 );
519 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
520 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
521 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
522 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
523 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
526 template <typename Atomic, typename Integral>
527 void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
529 typedef Integral integral_type;
530 atomics::memory_order oLoad = convert_to_load_order(order);
531 atomics::memory_order oStore = convert_to_store_order(order);
534 a.store( arr, oStore );
535 EXPECT_EQ( *a.load( oLoad ), 1 );
538 EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
539 EXPECT_EQ( p, arr + 0 );
541 EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
542 EXPECT_EQ( p, arr + 5 );
545 EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
546 EXPECT_EQ( p, arr + 5 );
548 EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
549 EXPECT_EQ( p, arr + 3 );
552 EXPECT_EQ( a.exchange( arr, order ), arr + 3 );
553 EXPECT_EQ( a.load( oLoad ), arr );
554 EXPECT_EQ( *a.load( oLoad ), 1 );
556 for ( integral_type i = 1; i < aSize; ++i ) {
557 integral_type * p = a.load();
559 EXPECT_EQ( a.fetch_add( 1, order ), p );
560 EXPECT_EQ( *a.load( oLoad ), i + 1 );
563 for ( integral_type i = aSize; i > 1; --i ) {
564 integral_type * p = a.load();
566 EXPECT_EQ( a.fetch_sub( 1, order ), p );
567 EXPECT_EQ( *a.load( oLoad ), i - 1 );
571 template <typename Integral, bool Volatile>
572 void test_atomic_pointer_for()
574 typedef Integral integral_type;
575 typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type atomic_pointer;
577 integral_type arr[8];
578 const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
579 for ( integral_type i = 0; i < aSize; ++i ) {
580 arr[static_cast<size_t>(i)] = i + 1;
587 EXPECT_EQ( *a.load(), 1 );
590 EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5 ));
591 EXPECT_EQ( p, arr + 0 );
593 EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3 ));
594 EXPECT_EQ( p, arr + 5 );
597 EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3 ));
598 EXPECT_EQ( p, arr + 5 );
600 EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5 ));
601 EXPECT_EQ( p, arr + 3 );
604 EXPECT_EQ( a.exchange( arr ), arr + 3 );
605 EXPECT_EQ( a.load(), arr );
606 EXPECT_EQ( *a.load(), 1 );
608 for ( integral_type i = 1; i < aSize; ++i ) {
609 integral_type * p = a.load();
611 integral_type * pa = a.fetch_add( 1 );
613 EXPECT_EQ( *a.load(), i + 1 );
616 for ( integral_type i = aSize; i > 1; --i ) {
617 integral_type * p = a.load();
619 EXPECT_EQ( a.fetch_sub( 1 ), p );
620 EXPECT_EQ( *a.load(), i - 1 );
623 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
624 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
625 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
626 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
627 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
631 void test_atomic_flag()
633 // Array to test different alignment
635 atomics::atomic_flag flags[8];
636 for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
637 do_test_atomic_flag( flags[i] );
640 void test_atomic_flag_volatile()
642 // Array to test different alignment
644 atomics::atomic_flag volatile flags[8];
645 for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
646 do_test_atomic_flag( flags[i] );
649 template <typename AtomicBool>
650 void test_atomic_bool_()
652 // Array to test different alignment
655 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
656 do_test_atomic_bool( a[i] );
658 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
659 //do_test_atomic_bool( a[i], atomics::memory_order_consume );
660 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
661 do_test_atomic_bool( a[i], atomics::memory_order_release );
662 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
663 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
667 void test_atomic_bool()
669 test_atomic_bool_< atomics::atomic<bool> >();
671 void test_atomic_bool_volatile()
673 test_atomic_bool_< atomics::atomic<bool> volatile >();
677 TEST_F( cxx11_atomic_class, atomic_char )
679 test_atomic_integral<char>();
682 TEST_F( cxx11_atomic_class, atomic_signed_char )
684 test_atomic_integral<signed char>();
687 TEST_F( cxx11_atomic_class, atomic_unsigned_char )
689 test_atomic_integral<unsigned char>();
692 TEST_F( cxx11_atomic_class, atomic_short_int )
694 test_atomic_integral<short int>();
697 TEST_F( cxx11_atomic_class, atomic_signed_short_int )
699 test_atomic_integral<signed short int>();
702 TEST_F( cxx11_atomic_class, atomic_unsigned_short_int )
704 test_atomic_integral<unsigned short int>();
707 TEST_F( cxx11_atomic_class, atomic_int )
709 test_atomic_integral<int>();
712 TEST_F( cxx11_atomic_class, atomic_unsigned_int )
714 test_atomic_integral<unsigned int>();
717 TEST_F( cxx11_atomic_class, atomic_long )
719 test_atomic_integral<long>();
722 TEST_F( cxx11_atomic_class, atomic_unsigned_long )
724 test_atomic_integral<unsigned long>();
727 TEST_F( cxx11_atomic_class, atomic_long_long )
729 test_atomic_integral<long long>();
732 TEST_F( cxx11_atomic_class, atomic_unsigned_long_long )
734 test_atomic_integral<unsigned long long>();
737 TEST_F( cxx11_atomic_class, atomic_char_volatile )
739 test_atomic_integral_volatile<char>();
742 TEST_F( cxx11_atomic_class, atomic_signed_char_volatile )
744 test_atomic_integral_volatile<signed char>();
747 TEST_F( cxx11_atomic_class, atomic_unsigned_char_volatile )
749 test_atomic_integral_volatile<unsigned char>();
752 TEST_F( cxx11_atomic_class, atomic_short_int_volatile )
754 test_atomic_integral_volatile<short int>();
757 TEST_F( cxx11_atomic_class, atomic_signed_short_int_volatile )
759 test_atomic_integral_volatile<signed short int>();
762 TEST_F( cxx11_atomic_class, atomic_unsigned_short_int_volatile )
764 test_atomic_integral_volatile<unsigned short int>();
767 TEST_F( cxx11_atomic_class, atomic_int_volatile )
769 test_atomic_integral_volatile<int>();
772 TEST_F( cxx11_atomic_class, atomic_unsigned_int_volatile )
774 test_atomic_integral_volatile<unsigned int>();
777 TEST_F( cxx11_atomic_class, atomic_long_volatile )
779 test_atomic_integral_volatile<long>();
782 TEST_F( cxx11_atomic_class, atomic_unsigned_long_volatile )
784 test_atomic_integral_volatile<unsigned long>();
787 TEST_F( cxx11_atomic_class, atomic_long_long_volatile )
789 test_atomic_integral_volatile<long long>();
792 TEST_F( cxx11_atomic_class, atomic_unsigned_long_long_volatile )
794 test_atomic_integral_volatile<unsigned long long>();
797 #if !( CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 40000 )
798 //clang error with atomic<void*> fetch_add/fetch_sub
799 TEST_F( cxx11_atomic_class, atomic_pointer_void )
801 do_test_atomic_pointer_void<false>();
804 TEST_F( cxx11_atomic_class, atomic_pointer_void_volatile )
806 do_test_atomic_pointer_void<true>();
810 TEST_F( cxx11_atomic_class, atomic_pointer_char )
812 test_atomic_pointer_for<char, false>();
815 TEST_F( cxx11_atomic_class, atomic_pointer_char_volatile )
817 test_atomic_pointer_for<char, true>();
820 TEST_F( cxx11_atomic_class, atomic_pointer_short )
822 test_atomic_pointer_for<short int, false>();
825 TEST_F( cxx11_atomic_class, atomic_pointer_short_volatile )
827 test_atomic_pointer_for<short int, true>();
830 TEST_F( cxx11_atomic_class, atomic_pointer_int )
832 test_atomic_pointer_for<int, false>();
835 TEST_F( cxx11_atomic_class, atomic_pointer_int_volatile )
837 test_atomic_pointer_for<int, true>();
840 TEST_F( cxx11_atomic_class, atomic_pointer_long )
842 test_atomic_pointer_for<long, false>();
845 TEST_F( cxx11_atomic_class, atomic_pointer_long_volatile )
847 test_atomic_pointer_for<long, true>();
850 TEST_F( cxx11_atomic_class, atomic_pointer_long_long )
852 test_atomic_pointer_for<long long, false>();
855 TEST_F( cxx11_atomic_class, atomic_pointer_long_long_volatile )
857 test_atomic_pointer_for<long long, true>();