Fix GCC 4.8 <atomic> incompatibility
[libcds.git] / tests / test-hdr / misc / cxx11_atomic_class.cpp
1 //$$CDS-header$$
2
3 #include "cppunit/cppunit_proxy.h"
4
5 #include <cds/algo/atomic.h>
6
7 #include "misc/cxx11_convert_memory_order.h"
8
9 namespace misc {
10     class cxx11_atomic_class: public CppUnitMini::TestCase
11     {
12         template <typename AtomicFlag>
13         void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
14         {
15             atomics::memory_order mo_clear = convert_to_store_order(order);
16             for ( int i = 0; i < 5; ++i ) {
17                 CPPUNIT_ASSERT( !f.test_and_set( order ));
18                 CPPUNIT_ASSERT( f.test_and_set( order ) );
19                 f.clear( mo_clear );
20             }
21         }
22
23         template <typename AtomicFlag>
24         void do_test_atomic_flag( AtomicFlag& f)
25         {
26             f.clear();
27
28             for ( int i = 0; i < 5; ++i ) {
29                 CPPUNIT_ASSERT( !f.test_and_set());
30                 CPPUNIT_ASSERT( f.test_and_set() );
31                 f.clear();
32             }
33
34             do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
35             do_test_atomic_flag_mo( f, atomics::memory_order_consume );
36             do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
37             do_test_atomic_flag_mo( f, atomics::memory_order_release );
38             do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
39             do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
40         }
41
42         template <class Atomic, typename Integral>
43         void do_test_atomic_type(Atomic& a)
44         {
45             typedef Integral    integral_type;
46
47             CPPUNIT_ASSERT( a.is_lock_free() );
48             a.store( (integral_type) 0 );
49             CPPUNIT_ASSERT( a == 0 );
50             CPPUNIT_ASSERT( a.load() == 0 );
51
52             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
53                 integral_type n = integral_type(42) << (nByte * 8);
54                 CPPUNIT_ASSERT( a.exchange( n ) == 0 );
55                 CPPUNIT_ASSERT( a == n );
56                 CPPUNIT_ASSERT( a.exchange( (integral_type) 0 ) == n );
57                 CPPUNIT_ASSERT( a.load() == 0 );
58             }
59
60             integral_type prev = a.load();
61             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
62                 integral_type n = integral_type(42) << (nByte * 8);
63                 integral_type expected = prev;
64
65                 CPPUNIT_ASSERT( a.compare_exchange_weak( expected, n));
66                 CPPUNIT_ASSERT( expected  == prev );
67                 CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, n));
68                 CPPUNIT_ASSERT( expected  == n );
69
70                 prev = n;
71                 CPPUNIT_ASSERT( a == n );
72             }
73
74             a = (integral_type) 0;
75
76             prev = a;
77             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
78                 integral_type n = integral_type(42) << (nByte * 8);
79                 integral_type expected = prev;
80
81                 CPPUNIT_ASSERT( a.compare_exchange_strong( expected, n));
82                 CPPUNIT_ASSERT( expected  == prev );
83                 CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, n));
84                 CPPUNIT_ASSERT( expected  == n );
85
86                 prev = n;
87                 CPPUNIT_ASSERT( a.load() == n );
88             }
89
90             CPPUNIT_ASSERT( a.exchange( (integral_type) 0 ) == prev );
91         }
92
93         template <class Atomic, typename Integral>
94         void do_test_atomic_integral(Atomic& a)
95         {
96             do_test_atomic_type< Atomic, Integral >(a);
97
98             typedef Integral    integral_type;
99
100             // fetch_xxx testing
101             a.store( (integral_type) 0 );
102
103             // fetch_add
104             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
105             {
106                 integral_type prev = a.load();
107                 integral_type n = integral_type(42) << (nByte * 8);
108
109                 CPPUNIT_ASSERT( a.fetch_add(n) == prev);
110             }
111
112             // fetch_sub
113             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
114             {
115                 integral_type prev = a.load();
116                 integral_type n = integral_type(42) << ((nByte - 1) * 8);
117
118                 CPPUNIT_ASSERT( a.fetch_sub(n) == prev);
119             }
120             CPPUNIT_ASSERT( a.load() == 0 );
121
122             // fetch_or / fetc_xor / fetch_and
123             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
124             {
125                 integral_type prev = a.load()  ;;
126                 integral_type mask = 1 << nBit;
127
128                 CPPUNIT_ASSERT( a.fetch_or( mask ) == prev );
129                 prev = a.load();
130                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
131
132                 CPPUNIT_ASSERT( a.fetch_and( (integral_type) ~mask ) == prev );
133                 prev = a.load();
134                 CPPUNIT_ASSERT( integral_type(prev & mask) == integral_type(0));
135
136                 CPPUNIT_ASSERT( a.fetch_xor( mask ) == prev );
137                 prev = a.load();
138                 CPPUNIT_ASSERT( integral_type( prev & mask)  == mask);
139             }
140             CPPUNIT_ASSERT( a.load() == (integral_type) -1 );
141
142
143             // op= testing
144             a = (integral_type) 0;
145
146             // +=
147             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
148             {
149                 integral_type prev = a;
150                 integral_type n = integral_type(42) << (nByte * 8);
151
152                 CPPUNIT_ASSERT( (a += n) == (prev + n));
153             }
154
155             // -=
156             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
157             {
158                 integral_type prev = a;
159                 integral_type n = integral_type(42) << ((nByte - 1) * 8);
160
161                 CPPUNIT_ASSERT( (a -= n) == prev - n );
162             }
163             CPPUNIT_ASSERT( a.load() == 0 );
164
165             // |= / ^= / &=
166             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
167             {
168                 integral_type prev = a;
169                 integral_type mask = integral_type(1) << nBit;
170
171                 CPPUNIT_ASSERT( (a |= mask ) == (prev | mask ));
172                 prev = a;
173                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
174
175                 CPPUNIT_ASSERT( (a &= (integral_type) ~mask ) == ( prev & (integral_type) ~mask ));
176                 prev = a;
177                 CPPUNIT_ASSERT( ( prev & mask)  == 0);
178
179                 CPPUNIT_ASSERT( (a ^= mask ) == (prev ^ mask ));
180                 prev = a;
181                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
182             }
183             CPPUNIT_ASSERT( a == (integral_type) -1 );
184         }
185
186         template <class Atomic, typename Integral>
187         void do_test_atomic_type( Atomic& a, atomics::memory_order order )
188         {
189             typedef Integral    integral_type;
190
191             const atomics::memory_order oLoad = convert_to_load_order( order );
192             const atomics::memory_order oStore = convert_to_store_order( order );
193
194             CPPUNIT_ASSERT( a.is_lock_free() );
195             a.store((integral_type) 0, oStore );
196             CPPUNIT_ASSERT( a == 0 );
197             CPPUNIT_ASSERT( a.load( oLoad ) == 0 );
198
199             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
200                 integral_type n = integral_type(42) << (nByte * 8);
201                 CPPUNIT_ASSERT( a.exchange( n, order ) == 0 );
202                 CPPUNIT_ASSERT( a.load( oLoad ) == n );
203                 CPPUNIT_ASSERT( a.exchange( (integral_type) 0, order ) == n );
204                 CPPUNIT_ASSERT( a.load( oLoad ) == 0 );
205             }
206
207             integral_type prev = a.load( oLoad );
208             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
209                 integral_type n = integral_type(42) << (nByte * 8);
210                 integral_type expected = prev;
211
212                 CPPUNIT_ASSERT( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
213                 CPPUNIT_ASSERT( expected  == prev );
214                 CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
215                 CPPUNIT_ASSERT( expected  == n );
216
217                 prev = n;
218                 CPPUNIT_ASSERT( a.load( oLoad ) == n );
219             }
220
221             a.store( (integral_type) 0, oStore );
222
223             prev = a.load( oLoad );
224             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
225                 integral_type n = integral_type(42) << (nByte * 8);
226                 integral_type expected = prev;
227
228                 CPPUNIT_ASSERT( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
229                 CPPUNIT_ASSERT( expected  == prev );
230                 CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
231                 CPPUNIT_ASSERT( expected  == n );
232
233                 prev = n;
234                 CPPUNIT_ASSERT( a.load( oLoad ) == n );
235             }
236
237             CPPUNIT_ASSERT( a.exchange( (integral_type) 0, order ) == prev );
238         }
239
240         template <class Atomic, typename Integral>
241         void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
242         {
243             do_test_atomic_type< Atomic, Integral >( a, order );
244
245             typedef Integral    integral_type;
246
247             const atomics::memory_order oLoad = convert_to_load_order( order );
248             const atomics::memory_order oStore = convert_to_store_order( order );
249
250             // fetch_xxx testing
251             a.store( (integral_type) 0, oStore );
252
253             // fetch_add
254             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
255             {
256                 integral_type prev = a.load( oLoad );
257                 integral_type n = integral_type(42) << (nByte * 8);
258
259                 CPPUNIT_ASSERT( a.fetch_add( n, order) == prev);
260             }
261
262             // fetch_sub
263             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
264             {
265                 integral_type prev = a.load( oLoad );
266                 integral_type n = integral_type(42) << ((nByte - 1) * 8);
267
268                 CPPUNIT_ASSERT( a.fetch_sub( n, order ) == prev);
269             }
270             CPPUNIT_ASSERT( a.load( oLoad ) == 0 );
271
272             // fetch_or / fetc_xor / fetch_and
273             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
274             {
275                 integral_type prev = a.load( oLoad )  ;;
276                 integral_type mask = 1 << nBit;
277
278                 CPPUNIT_ASSERT( a.fetch_or( mask, order ) == prev );
279                 prev = a.load( oLoad );
280                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
281
282                 CPPUNIT_ASSERT( a.fetch_and( (integral_type) ~mask, order ) == prev );
283                 prev = a.load( oLoad );
284                 CPPUNIT_ASSERT( ( prev & mask)  == 0);
285
286                 CPPUNIT_ASSERT( a.fetch_xor( mask, order ) == prev );
287                 prev = a.load( oLoad );
288                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
289             }
290             CPPUNIT_ASSERT( a.load( oLoad ) == (integral_type) -1 );
291         }
292
293
294
295         template <typename Atomic, typename Integral>
296         void test_atomic_integral_(Atomic& a)
297         {
298             do_test_atomic_integral<Atomic, Integral >(a);
299
300             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
301 #if !(CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION < 40900)
302             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_consume );
303 #endif
304             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
305             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
306             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
307             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
308         }
309
310         template <typename Integral>
311         void test_atomic_integral()
312         {
313             typedef atomics::atomic<Integral> atomic_type;
314
315             atomic_type a[8];
316             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
317                 test_atomic_integral_<atomic_type, Integral>( a[i] );
318             }
319         }
320         template <typename Integral>
321         void test_atomic_integral_volatile()
322         {
323             typedef atomics::atomic<Integral> volatile atomic_type;
324
325             atomic_type a[8];
326             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
327                 test_atomic_integral_<atomic_type, Integral>( a[i] );
328             }
329         }
330
331         template <class AtomicBool>
332         void do_test_atomic_bool( AtomicBool& a )
333         {
334             CPPUNIT_ASSERT( a.is_lock_free() );
335             a.store( false );
336             CPPUNIT_ASSERT( a == false );
337             CPPUNIT_ASSERT( a.load() == false );
338
339             CPPUNIT_ASSERT( a.exchange( true ) == false );
340             CPPUNIT_ASSERT( a.load() == true );
341             CPPUNIT_ASSERT( a.exchange( false ) == true );
342             CPPUNIT_ASSERT( a.load() == false );
343
344             bool expected = false;
345             CPPUNIT_ASSERT( a.compare_exchange_weak( expected, true));
346             CPPUNIT_ASSERT( expected  == false );
347             CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, false));
348             CPPUNIT_ASSERT( expected  == true );
349             CPPUNIT_ASSERT( a.load() == true );
350
351             a.store( false );
352
353             expected = false;
354             CPPUNIT_ASSERT( a.compare_exchange_strong( expected, true));
355             CPPUNIT_ASSERT( expected  == false );
356             CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, false));
357             CPPUNIT_ASSERT( expected  == true );
358
359             CPPUNIT_ASSERT( a.load() == true );
360
361             CPPUNIT_ASSERT( a.exchange( false ) == true );
362         }
363
364         template <class AtomicBool>
365         void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
366         {
367             const atomics::memory_order oLoad = convert_to_load_order( order );
368             const atomics::memory_order oStore = convert_to_store_order( order );
369             const atomics::memory_order oExchange = convert_to_exchange_order( order );
370
371             CPPUNIT_ASSERT( a.is_lock_free() );
372             a.store( false, oStore );
373             CPPUNIT_ASSERT( a == false );
374             CPPUNIT_ASSERT( a.load( oLoad ) == false );
375
376             CPPUNIT_ASSERT( a.exchange( true, oExchange ) == false );
377             CPPUNIT_ASSERT( a.load( oLoad ) == true );
378             CPPUNIT_ASSERT( a.exchange( false, oExchange ) == true );
379             CPPUNIT_ASSERT( a.load( oLoad ) == false );
380
381             bool expected = false;
382             CPPUNIT_ASSERT( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
383             CPPUNIT_ASSERT( expected  == false );
384             CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
385             CPPUNIT_ASSERT( expected  == true );
386             CPPUNIT_ASSERT( a.load( oLoad ) == true );
387
388             //a = bool(false);
389             a.store( false, oStore );
390
391             expected = false;
392             CPPUNIT_ASSERT( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
393             CPPUNIT_ASSERT( expected  == false );
394             CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
395             CPPUNIT_ASSERT( expected  == true );
396
397             CPPUNIT_ASSERT( a.load( oLoad ) == true );
398
399             CPPUNIT_ASSERT( a.exchange( false, oExchange ) == true );
400         }
401
402
403         template <typename Atomic>
404         void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
405         {
406             atomics::memory_order oLoad = convert_to_load_order(order);
407             atomics::memory_order oStore = convert_to_store_order(order);
408             void *  p;
409
410             a.store( (void *) arr, oStore );
411             CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == 1 );
412
413             p = arr;
414             CPPUNIT_ASSERT( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
415             CPPUNIT_ASSERT( p == arr + 0 );
416             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 1 );
417             CPPUNIT_ASSERT( !a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
418             CPPUNIT_ASSERT( p == arr + 5 );
419             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 6 );
420
421             CPPUNIT_ASSERT( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
422             CPPUNIT_ASSERT( p == arr + 5 );
423             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 6 );
424             CPPUNIT_ASSERT( !a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
425             CPPUNIT_ASSERT( p == arr + 3 );
426             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 4 );
427
428             CPPUNIT_ASSERT( reinterpret_cast<char *>(a.exchange( (void *) arr, order )) == arr + 3 );
429             CPPUNIT_ASSERT( reinterpret_cast<char *>(a.load( oLoad )) == arr );
430             CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == 1 );
431
432             for ( char i = 1; i < aSize; ++i ) {
433                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == i );
434                 CPPUNIT_ASSERT( a.fetch_add( 1, order ));
435                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == i + 1 );
436             }
437
438             for ( char i = aSize; i > 1; --i ) {
439                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == i  );
440                 CPPUNIT_ASSERT( a.fetch_sub( 1, order ));
441                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == i - 1 );
442             }
443         }
444
445         template <bool Volatile>
446         void do_test_atomic_pointer_void()
447         {
448             typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type    atomic_pointer;
449
450             char   arr[8];
451             const char aSize = sizeof(arr)/sizeof(arr[0]);
452             for ( char i = 0; i < aSize; ++i ) {
453                 arr[unsigned(i)] = i + 1;
454             }
455
456             atomic_pointer  a;
457             void *  p;
458
459 #if CDS_BUILD_BITS == 32 && !( CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION == 40700 )
460             /* GCC 4.7.0 has an linktime error in 32bit x86 mode:
461
462             ../tests/test-hdr/misc/cxx11_atomic_class.o: In function `std::__atomic_base<void*>::is_lock_free() const':
463             /usr/local/lib/gcc/x86_64-unknown-linux-gnu/4.7.0/../../../../include/c++/4.7.0/bits/atomic_base.h:719: undefined reference to `__atomic_is_lock_free'
464
465             ../tests/test-hdr/misc/cxx11_atomic_class.o: In function `std::__atomic_base<void*>::is_lock_free() const volatile':
466             /usr/local/lib/gcc/x86_64-unknown-linux-gnu/4.7.0/../../../../include/c++/4.7.0/bits/atomic_base.h:723: undefined reference to `__atomic_is_lock_free'
467
468             */
469             CPPUNIT_ASSERT( a.is_lock_free() );
470 #endif
471
472             a.store( (void *) arr );
473             CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == 1 );
474
475             p = arr;
476             CPPUNIT_ASSERT( a.compare_exchange_weak( p, (void *)(arr + 5) ));
477             CPPUNIT_ASSERT( p == arr + 0 );
478             CPPUNIT_ASSERT( !a.compare_exchange_weak( p, (void *)(arr + 3) ));
479             CPPUNIT_ASSERT( p == arr + 5 );
480
481             CPPUNIT_ASSERT( a.compare_exchange_strong( p, (void *)(arr + 3) ));
482             CPPUNIT_ASSERT( p == arr + 5 );
483             CPPUNIT_ASSERT( !a.compare_exchange_strong( p, (void *)(arr + 5) ));
484             CPPUNIT_ASSERT( p == arr + 3 );
485
486             CPPUNIT_ASSERT( reinterpret_cast<char *>( a.exchange( (void *) arr )) == arr + 3 );
487             CPPUNIT_ASSERT( reinterpret_cast<char *>( a.load()) == arr );
488             CPPUNIT_ASSERT( *reinterpret_cast<char *>( a.load()) == 1 );
489
490             for ( char i = 1; i < aSize; ++i ) {
491                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == i );
492                 CPPUNIT_ASSERT( a.fetch_add( 1 ));
493                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == i + 1 );
494             }
495
496             for ( char i = aSize; i > 1; --i ) {
497                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == i  );
498                 CPPUNIT_ASSERT( a.fetch_sub( 1 ));
499                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == i - 1 );
500             }
501
502             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
503             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_consume );
504             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
505             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
506             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
507             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
508         }
509
510         template <typename Atomic, typename Integral>
511         void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
512         {
513             typedef Integral integral_type;
514             atomics::memory_order oLoad = convert_to_load_order(order);
515             atomics::memory_order oStore = convert_to_store_order(order);
516             integral_type *  p;
517
518             a.store( arr, oStore );
519             CPPUNIT_ASSERT( *a.load( oLoad ) == 1 );
520
521             p = arr;
522             CPPUNIT_ASSERT( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
523             CPPUNIT_ASSERT( p == arr + 0 );
524             CPPUNIT_ASSERT( *p == 1 );
525             CPPUNIT_ASSERT( !a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
526             CPPUNIT_ASSERT( p == arr + 5 );
527             CPPUNIT_ASSERT( *p == 6 );
528
529             CPPUNIT_ASSERT( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
530             CPPUNIT_ASSERT( p == arr + 5 );
531             CPPUNIT_ASSERT( *p == 6 );
532             CPPUNIT_ASSERT( !a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
533             CPPUNIT_ASSERT( p == arr + 3 );
534             CPPUNIT_ASSERT( *p == 4 );
535
536             CPPUNIT_ASSERT( a.exchange( arr, order ) == arr + 3 );
537             CPPUNIT_ASSERT( a.load( oLoad ) == arr );
538             CPPUNIT_ASSERT( *a.load( oLoad ) == 1 );
539
540             for ( integral_type i = 1; i < aSize; ++i ) {
541                 integral_type * p = a.load();
542                 CPPUNIT_ASSERT( *p == i );
543                 CPPUNIT_ASSERT( a.fetch_add( 1, order ) == p  );
544                 CPPUNIT_ASSERT( *a.load( oLoad ) == i + 1 );
545             }
546
547             for ( integral_type i = aSize; i > 1; --i ) {
548                 integral_type * p = a.load();
549                 CPPUNIT_ASSERT( *p == i  );
550                 CPPUNIT_ASSERT( a.fetch_sub( 1, order ) == p );
551                 CPPUNIT_ASSERT( *a.load( oLoad ) == i - 1 );
552             }
553         }
554
555         template <typename Integral, bool Volatile>
556         void test_atomic_pointer_for()
557         {
558             typedef Integral integral_type;
559             typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type    atomic_pointer;
560
561             integral_type   arr[8];
562             const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
563             for ( integral_type i = 0; i < aSize; ++i ) {
564                 arr[size_t(i)] = i + 1;
565             }
566
567             atomic_pointer  a;
568             integral_type *  p;
569
570             a.store( arr );
571             CPPUNIT_ASSERT( *a.load() == 1 );
572
573             p = arr;
574             CPPUNIT_ASSERT( a.compare_exchange_weak( p, arr + 5 ));
575             CPPUNIT_ASSERT( p == arr + 0 );
576             CPPUNIT_ASSERT( *p == 1 );
577             CPPUNIT_ASSERT( !a.compare_exchange_weak( p, arr + 3 ));
578             CPPUNIT_ASSERT( p == arr + 5 );
579             CPPUNIT_ASSERT( *p == 6 );
580
581             CPPUNIT_ASSERT( a.compare_exchange_strong( p, arr + 3 ));
582             CPPUNIT_ASSERT( p == arr + 5 );
583             CPPUNIT_ASSERT( *p == 6 );
584             CPPUNIT_ASSERT( !a.compare_exchange_strong( p, arr + 5 ));
585             CPPUNIT_ASSERT( p == arr + 3 );
586             CPPUNIT_ASSERT( *p == 4 );
587
588             CPPUNIT_ASSERT( a.exchange( arr ) == arr + 3 );
589             CPPUNIT_ASSERT( a.load() == arr );
590             CPPUNIT_ASSERT( *a.load() == 1 );
591
592             for ( integral_type i = 1; i < aSize; ++i ) {
593                 integral_type * p = a.load();
594                 CPPUNIT_ASSERT( *p == i );
595                 integral_type * pa = a.fetch_add( 1 );
596                 CPPUNIT_ASSERT_EX( pa == p, "pa=" << ((uintptr_t) pa) << " p=" << ((uintptr_t) p) );
597                 CPPUNIT_ASSERT( *a.load() == i + 1 );
598             }
599
600             for ( integral_type i = aSize; i > 1; --i ) {
601                 integral_type * p = a.load();
602                 CPPUNIT_ASSERT( *p == i  );
603                 CPPUNIT_ASSERT( a.fetch_sub( 1 ) == p );
604                 CPPUNIT_ASSERT( *a.load() == i - 1 );
605             }
606
607             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
608             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_consume );
609             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
610             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
611             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
612             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
613         }
614
615     public:
616         void test_atomic_flag()
617         {
618             // Array to test different alignment
619
620             atomics::atomic_flag flags[8];
621             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
622                 do_test_atomic_flag( flags[i] );
623         }
624
625         void test_atomic_flag_volatile()
626         {
627             // Array to test different alignment
628
629             atomics::atomic_flag volatile flags[8];
630             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
631                 do_test_atomic_flag( flags[i] );
632         }
633
634         template <typename AtomicBool>
635         void test_atomic_bool_()
636         {
637             // Array to test different alignment
638             AtomicBool  a[8];
639
640             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
641                 do_test_atomic_bool( a[i] );
642
643                 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
644                 do_test_atomic_bool( a[i], atomics::memory_order_consume );
645                 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
646                 do_test_atomic_bool( a[i], atomics::memory_order_release );
647                 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
648                 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
649             }
650         }
651
652         void test_atomic_bool()
653         {
654             test_atomic_bool_< atomics::atomic<bool> >();
655         }
656         void test_atomic_bool_volatile()
657         {
658             test_atomic_bool_< atomics::atomic<bool> volatile >();
659         }
660
661         void test_atomic_char()                 { test_atomic_integral<char>(); }
662         void test_atomic_signed_char()          { test_atomic_integral<signed char>(); }
663         void test_atomic_unsigned_char()        { test_atomic_integral<unsigned char>(); }
664         void test_atomic_short_int()            { test_atomic_integral<short int>(); }
665         void test_atomic_unsigned_short_int()   { test_atomic_integral<unsigned short int>(); }
666         void test_atomic_int()                  { test_atomic_integral<int>(); }
667         void test_atomic_unsigned_int()         { test_atomic_integral<unsigned int>(); }
668         void test_atomic_long()                 { test_atomic_integral<long>(); }
669         void test_atomic_unsigned_long()        { test_atomic_integral<unsigned long>(); }
670         void test_atomic_long_long()            { test_atomic_integral<long long>(); }
671         void test_atomic_unsigned_long_long()   { test_atomic_integral<unsigned long long>(); }
672
673         void test_atomic_char_volatile()                 { test_atomic_integral_volatile<char>(); }
674         void test_atomic_signed_char_volatile()          { test_atomic_integral_volatile<signed char>(); }
675         void test_atomic_unsigned_char_volatile()        { test_atomic_integral_volatile<unsigned char>(); }
676         void test_atomic_short_int_volatile()            { test_atomic_integral_volatile<short int>(); }
677         void test_atomic_unsigned_short_int_volatile()   { test_atomic_integral_volatile<unsigned short int>(); }
678         void test_atomic_int_volatile()                  { test_atomic_integral_volatile<int>(); }
679         void test_atomic_unsigned_int_volatile()         { test_atomic_integral_volatile<unsigned int>(); }
680         void test_atomic_long_volatile()                 { test_atomic_integral_volatile<long>(); }
681         void test_atomic_unsigned_long_volatile()        { test_atomic_integral_volatile<unsigned long>(); }
682         void test_atomic_long_long_volatile()            { test_atomic_integral_volatile<long long>(); }
683         void test_atomic_unsigned_long_long_volatile()   { test_atomic_integral_volatile<unsigned long long>(); }
684
685         void test_atomic_pointer_void()         { do_test_atomic_pointer_void<false>() ;}
686         void test_atomic_pointer_void_volatile(){ do_test_atomic_pointer_void<true>() ;}
687
688         void test_atomic_pointer_char()         { test_atomic_pointer_for<char, false>() ;}
689         void test_atomic_pointer_short()        { test_atomic_pointer_for<short int, false>() ;}
690         void test_atomic_pointer_int()          { test_atomic_pointer_for<int, false>() ;}
691         void test_atomic_pointer_long()         { test_atomic_pointer_for<long, false>() ;}
692         void test_atomic_pointer_long_long()    { test_atomic_pointer_for<long long, false>() ;}
693
694         void test_atomic_pointer_char_volatile()        { test_atomic_pointer_for<char, true>() ;}
695         void test_atomic_pointer_short_volatile()       { test_atomic_pointer_for<unsigned short int, true>() ;}
696         void test_atomic_pointer_int_volatile()          { test_atomic_pointer_for<int, true>() ;}
697         void test_atomic_pointer_long_volatile()         { test_atomic_pointer_for<long, true>() ;}
698         void test_atomic_pointer_long_long_volatile()    { test_atomic_pointer_for<long long, true>() ;}
699
700     public:
701         CPPUNIT_TEST_SUITE(cxx11_atomic_class)
702             CPPUNIT_TEST( test_atomic_flag )
703             CPPUNIT_TEST( test_atomic_flag_volatile )
704
705             CPPUNIT_TEST( test_atomic_bool )
706             CPPUNIT_TEST( test_atomic_char )
707             CPPUNIT_TEST( test_atomic_signed_char)
708             CPPUNIT_TEST( test_atomic_unsigned_char)
709             CPPUNIT_TEST( test_atomic_short_int)
710             CPPUNIT_TEST( test_atomic_unsigned_short_int)
711             CPPUNIT_TEST( test_atomic_int)
712             CPPUNIT_TEST( test_atomic_unsigned_int)
713             CPPUNIT_TEST( test_atomic_long)
714             CPPUNIT_TEST( test_atomic_unsigned_long)
715             CPPUNIT_TEST( test_atomic_long_long)
716             CPPUNIT_TEST( test_atomic_unsigned_long_long)
717
718             CPPUNIT_TEST( test_atomic_bool_volatile )
719             CPPUNIT_TEST( test_atomic_char_volatile )
720             CPPUNIT_TEST( test_atomic_signed_char_volatile)
721             CPPUNIT_TEST( test_atomic_unsigned_char_volatile)
722             CPPUNIT_TEST( test_atomic_short_int_volatile)
723             CPPUNIT_TEST( test_atomic_unsigned_short_int_volatile)
724             CPPUNIT_TEST( test_atomic_int_volatile)
725             CPPUNIT_TEST( test_atomic_unsigned_int_volatile)
726             CPPUNIT_TEST( test_atomic_long_volatile)
727             CPPUNIT_TEST( test_atomic_unsigned_long_volatile)
728             CPPUNIT_TEST( test_atomic_long_long_volatile)
729             CPPUNIT_TEST( test_atomic_unsigned_long_long_volatile)
730
731             CPPUNIT_TEST( test_atomic_pointer_void)
732             CPPUNIT_TEST( test_atomic_pointer_void_volatile)
733
734             CPPUNIT_TEST( test_atomic_pointer_char)
735             CPPUNIT_TEST( test_atomic_pointer_short)
736             CPPUNIT_TEST( test_atomic_pointer_int)
737             CPPUNIT_TEST( test_atomic_pointer_long)
738             CPPUNIT_TEST( test_atomic_pointer_long_long)
739
740             CPPUNIT_TEST( test_atomic_pointer_char_volatile)
741             CPPUNIT_TEST( test_atomic_pointer_short_volatile)
742             CPPUNIT_TEST( test_atomic_pointer_int_volatile)
743             CPPUNIT_TEST( test_atomic_pointer_long_volatile)
744             CPPUNIT_TEST( test_atomic_pointer_long_long_volatile)
745
746         CPPUNIT_TEST_SUITE_END()
747     };
748 }   // namespace misc
749
750 CPPUNIT_TEST_SUITE_REGISTRATION(misc::cxx11_atomic_class);