Added copyright and license
[libcds.git] / tests / test-hdr / misc / cxx11_atomic_class.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8     
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.     
29 */
30
31 #include "cppunit/cppunit_proxy.h"
32
33 #include <cds/algo/atomic.h>
34
35 #include "misc/cxx11_convert_memory_order.h"
36
37 namespace misc {
38     class cxx11_atomic_class: public CppUnitMini::TestCase
39     {
40         template <typename AtomicFlag>
41         void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
42         {
43             atomics::memory_order mo_clear = convert_to_store_order(order);
44             for ( int i = 0; i < 5; ++i ) {
45                 CPPUNIT_ASSERT( !f.test_and_set( order ));
46                 CPPUNIT_ASSERT( f.test_and_set( order ) );
47                 f.clear( mo_clear );
48             }
49         }
50
51         template <typename AtomicFlag>
52         void do_test_atomic_flag( AtomicFlag& f)
53         {
54             f.clear();
55
56             for ( int i = 0; i < 5; ++i ) {
57                 CPPUNIT_ASSERT( !f.test_and_set());
58                 CPPUNIT_ASSERT( f.test_and_set() );
59                 f.clear();
60             }
61
62             do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
63             //do_test_atomic_flag_mo( f, atomics::memory_order_consume );
64             do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
65             do_test_atomic_flag_mo( f, atomics::memory_order_release );
66             do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
67             do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
68         }
69
70         template <class Atomic, typename Integral>
71         void do_test_atomic_type(Atomic& a)
72         {
73             typedef Integral    integral_type;
74
75             CPPUNIT_ASSERT( a.is_lock_free() );
76             a.store( (integral_type) 0 );
77             CPPUNIT_ASSERT( a == 0 );
78             CPPUNIT_ASSERT( a.load() == 0 );
79
80             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
81                 integral_type n = integral_type(42) << (nByte * 8);
82                 CPPUNIT_ASSERT( a.exchange( n ) == 0 );
83                 CPPUNIT_ASSERT( a == n );
84                 CPPUNIT_ASSERT( a.exchange( (integral_type) 0 ) == n );
85                 CPPUNIT_ASSERT( a.load() == 0 );
86             }
87
88             integral_type prev = a.load();
89             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
90                 integral_type n = integral_type(42) << (nByte * 8);
91                 integral_type expected = prev;
92
93                 CPPUNIT_ASSERT( a.compare_exchange_weak( expected, n));
94                 CPPUNIT_ASSERT( expected  == prev );
95                 CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, n));
96                 CPPUNIT_ASSERT( expected  == n );
97
98                 prev = n;
99                 CPPUNIT_ASSERT( a == n );
100             }
101
102             a = (integral_type) 0;
103
104             prev = a;
105             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
106                 integral_type n = integral_type(42) << (nByte * 8);
107                 integral_type expected = prev;
108
109                 CPPUNIT_ASSERT( a.compare_exchange_strong( expected, n));
110                 CPPUNIT_ASSERT( expected  == prev );
111                 CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, n));
112                 CPPUNIT_ASSERT( expected  == n );
113
114                 prev = n;
115                 CPPUNIT_ASSERT( a.load() == n );
116             }
117
118             CPPUNIT_ASSERT( a.exchange( (integral_type) 0 ) == prev );
119         }
120
121         template <class Atomic, typename Integral>
122         void do_test_atomic_integral(Atomic& a)
123         {
124             do_test_atomic_type< Atomic, Integral >(a);
125
126             typedef Integral    integral_type;
127
128             // fetch_xxx testing
129             a.store( (integral_type) 0 );
130
131             // fetch_add
132             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
133             {
134                 integral_type prev = a.load();
135                 integral_type n = integral_type(42) << (nByte * 8);
136
137                 CPPUNIT_ASSERT( a.fetch_add(n) == prev);
138             }
139
140             // fetch_sub
141             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
142             {
143                 integral_type prev = a.load();
144                 integral_type n = integral_type(42) << ((nByte - 1) * 8);
145
146                 CPPUNIT_ASSERT( a.fetch_sub(n) == prev);
147             }
148             CPPUNIT_ASSERT( a.load() == 0 );
149
150             // fetch_or / fetc_xor / fetch_and
151             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
152             {
153                 integral_type prev = a.load()  ;;
154                 integral_type mask = 1 << nBit;
155
156                 CPPUNIT_ASSERT( a.fetch_or( mask ) == prev );
157                 prev = a.load();
158                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
159
160                 CPPUNIT_ASSERT( a.fetch_and( (integral_type) ~mask ) == prev );
161                 prev = a.load();
162                 CPPUNIT_ASSERT( integral_type(prev & mask) == integral_type(0));
163
164                 CPPUNIT_ASSERT( a.fetch_xor( mask ) == prev );
165                 prev = a.load();
166                 CPPUNIT_ASSERT( integral_type( prev & mask)  == mask);
167             }
168             CPPUNIT_ASSERT( a.load() == (integral_type) -1 );
169
170
171             // op= testing
172             a = (integral_type) 0;
173
174             // +=
175             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
176             {
177                 integral_type prev = a;
178                 integral_type n = integral_type(42) << (nByte * 8);
179
180                 CPPUNIT_ASSERT( (a += n) == (prev + n));
181             }
182
183             // -=
184             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
185             {
186                 integral_type prev = a;
187                 integral_type n = integral_type(42) << ((nByte - 1) * 8);
188
189                 CPPUNIT_ASSERT( (a -= n) == prev - n );
190             }
191             CPPUNIT_ASSERT( a.load() == 0 );
192
193             // |= / ^= / &=
194             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
195             {
196                 integral_type prev = a;
197                 integral_type mask = integral_type(1) << nBit;
198
199                 CPPUNIT_ASSERT( (a |= mask ) == (prev | mask ));
200                 prev = a;
201                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
202
203                 CPPUNIT_ASSERT( (a &= (integral_type) ~mask ) == ( prev & (integral_type) ~mask ));
204                 prev = a;
205                 CPPUNIT_ASSERT( ( prev & mask)  == 0);
206
207                 CPPUNIT_ASSERT( (a ^= mask ) == (prev ^ mask ));
208                 prev = a;
209                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
210             }
211             CPPUNIT_ASSERT( a == (integral_type) -1 );
212         }
213
214         template <class Atomic, typename Integral>
215         void do_test_atomic_type( Atomic& a, atomics::memory_order order )
216         {
217             typedef Integral    integral_type;
218
219             const atomics::memory_order oLoad = convert_to_load_order( order );
220             const atomics::memory_order oStore = convert_to_store_order( order );
221
222             CPPUNIT_ASSERT( a.is_lock_free() );
223             a.store((integral_type) 0, oStore );
224             CPPUNIT_ASSERT( a == 0 );
225             CPPUNIT_ASSERT( a.load( oLoad ) == 0 );
226
227             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
228                 integral_type n = integral_type(42) << (nByte * 8);
229                 CPPUNIT_ASSERT( a.exchange( n, order ) == 0 );
230                 CPPUNIT_ASSERT( a.load( oLoad ) == n );
231                 CPPUNIT_ASSERT( a.exchange( (integral_type) 0, order ) == n );
232                 CPPUNIT_ASSERT( a.load( oLoad ) == 0 );
233             }
234
235             integral_type prev = a.load( oLoad );
236             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
237                 integral_type n = integral_type(42) << (nByte * 8);
238                 integral_type expected = prev;
239
240                 CPPUNIT_ASSERT( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
241                 CPPUNIT_ASSERT( expected  == prev );
242                 CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
243                 CPPUNIT_ASSERT( expected  == n );
244
245                 prev = n;
246                 CPPUNIT_ASSERT( a.load( oLoad ) == n );
247             }
248
249             a.store( (integral_type) 0, oStore );
250
251             prev = a.load( oLoad );
252             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
253                 integral_type n = integral_type(42) << (nByte * 8);
254                 integral_type expected = prev;
255
256                 CPPUNIT_ASSERT( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
257                 CPPUNIT_ASSERT( expected  == prev );
258                 CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
259                 CPPUNIT_ASSERT( expected  == n );
260
261                 prev = n;
262                 CPPUNIT_ASSERT( a.load( oLoad ) == n );
263             }
264
265             CPPUNIT_ASSERT( a.exchange( (integral_type) 0, order ) == prev );
266         }
267
268         template <class Atomic, typename Integral>
269         void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
270         {
271             do_test_atomic_type< Atomic, Integral >( a, order );
272
273             typedef Integral    integral_type;
274
275             const atomics::memory_order oLoad = convert_to_load_order( order );
276             const atomics::memory_order oStore = convert_to_store_order( order );
277
278             // fetch_xxx testing
279             a.store( (integral_type) 0, oStore );
280
281             // fetch_add
282             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
283             {
284                 integral_type prev = a.load( oLoad );
285                 integral_type n = integral_type(42) << (nByte * 8);
286
287                 CPPUNIT_ASSERT( a.fetch_add( n, order) == prev);
288             }
289
290             // fetch_sub
291             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
292             {
293                 integral_type prev = a.load( oLoad );
294                 integral_type n = integral_type(42) << ((nByte - 1) * 8);
295
296                 CPPUNIT_ASSERT( a.fetch_sub( n, order ) == prev);
297             }
298             CPPUNIT_ASSERT( a.load( oLoad ) == 0 );
299
300             // fetch_or / fetc_xor / fetch_and
301             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
302             {
303                 integral_type prev = a.load( oLoad )  ;;
304                 integral_type mask = 1 << nBit;
305
306                 CPPUNIT_ASSERT( a.fetch_or( mask, order ) == prev );
307                 prev = a.load( oLoad );
308                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
309
310                 CPPUNIT_ASSERT( a.fetch_and( (integral_type) ~mask, order ) == prev );
311                 prev = a.load( oLoad );
312                 CPPUNIT_ASSERT( ( prev & mask)  == 0);
313
314                 CPPUNIT_ASSERT( a.fetch_xor( mask, order ) == prev );
315                 prev = a.load( oLoad );
316                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
317             }
318             CPPUNIT_ASSERT( a.load( oLoad ) == (integral_type) -1 );
319         }
320
321
322
323         template <typename Atomic, typename Integral>
324         void test_atomic_integral_(Atomic& a)
325         {
326             do_test_atomic_integral<Atomic, Integral >(a);
327
328             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
329 //#if !(CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION < 40900)
330 //            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_consume );
331 //#endif
332             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
333             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
334             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
335             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
336         }
337
338         template <typename Integral>
339         void test_atomic_integral()
340         {
341             typedef atomics::atomic<Integral> atomic_type;
342
343             atomic_type a[8];
344             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
345                 test_atomic_integral_<atomic_type, Integral>( a[i] );
346             }
347         }
348         template <typename Integral>
349         void test_atomic_integral_volatile()
350         {
351             typedef atomics::atomic<Integral> volatile atomic_type;
352
353             atomic_type a[8];
354             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
355                 test_atomic_integral_<atomic_type, Integral>( a[i] );
356             }
357         }
358
359         template <class AtomicBool>
360         void do_test_atomic_bool( AtomicBool& a )
361         {
362             CPPUNIT_ASSERT( a.is_lock_free() );
363             a.store( false );
364             CPPUNIT_ASSERT( a == false );
365             CPPUNIT_ASSERT( a.load() == false );
366
367             CPPUNIT_ASSERT( a.exchange( true ) == false );
368             CPPUNIT_ASSERT( a.load() == true );
369             CPPUNIT_ASSERT( a.exchange( false ) == true );
370             CPPUNIT_ASSERT( a.load() == false );
371
372             bool expected = false;
373             CPPUNIT_ASSERT( a.compare_exchange_weak( expected, true));
374             CPPUNIT_ASSERT( expected  == false );
375             CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, false));
376             CPPUNIT_ASSERT( expected  == true );
377             CPPUNIT_ASSERT( a.load() == true );
378
379             a.store( false );
380
381             expected = false;
382             CPPUNIT_ASSERT( a.compare_exchange_strong( expected, true));
383             CPPUNIT_ASSERT( expected  == false );
384             CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, false));
385             CPPUNIT_ASSERT( expected  == true );
386
387             CPPUNIT_ASSERT( a.load() == true );
388
389             CPPUNIT_ASSERT( a.exchange( false ) == true );
390         }
391
392         template <class AtomicBool>
393         void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
394         {
395             const atomics::memory_order oLoad = convert_to_load_order( order );
396             const atomics::memory_order oStore = convert_to_store_order( order );
397             const atomics::memory_order oExchange = convert_to_exchange_order( order );
398
399             CPPUNIT_ASSERT( a.is_lock_free() );
400             a.store( false, oStore );
401             CPPUNIT_ASSERT( a == false );
402             CPPUNIT_ASSERT( a.load( oLoad ) == false );
403
404             CPPUNIT_ASSERT( a.exchange( true, oExchange ) == false );
405             CPPUNIT_ASSERT( a.load( oLoad ) == true );
406             CPPUNIT_ASSERT( a.exchange( false, oExchange ) == true );
407             CPPUNIT_ASSERT( a.load( oLoad ) == false );
408
409             bool expected = false;
410             CPPUNIT_ASSERT( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
411             CPPUNIT_ASSERT( expected  == false );
412             CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
413             CPPUNIT_ASSERT( expected  == true );
414             CPPUNIT_ASSERT( a.load( oLoad ) == true );
415
416             //a = bool(false);
417             a.store( false, oStore );
418
419             expected = false;
420             CPPUNIT_ASSERT( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
421             CPPUNIT_ASSERT( expected  == false );
422             CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
423             CPPUNIT_ASSERT( expected  == true );
424
425             CPPUNIT_ASSERT( a.load( oLoad ) == true );
426
427             CPPUNIT_ASSERT( a.exchange( false, oExchange ) == true );
428         }
429
430
431         template <typename Atomic>
432         void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
433         {
434             atomics::memory_order oLoad = convert_to_load_order(order);
435             atomics::memory_order oStore = convert_to_store_order(order);
436             void *  p;
437
438             a.store( (void *) arr, oStore );
439             CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == 1 );
440
441             p = arr;
442             CPPUNIT_ASSERT( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
443             CPPUNIT_ASSERT( p == arr + 0 );
444             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 1 );
445             CPPUNIT_ASSERT( !a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
446             CPPUNIT_ASSERT( p == arr + 5 );
447             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 6 );
448
449             CPPUNIT_ASSERT( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
450             CPPUNIT_ASSERT( p == arr + 5 );
451             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 6 );
452             CPPUNIT_ASSERT( !a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
453             CPPUNIT_ASSERT( p == arr + 3 );
454             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 4 );
455
456             CPPUNIT_ASSERT( reinterpret_cast<char *>(a.exchange( (void *) arr, order )) == arr + 3 );
457             CPPUNIT_ASSERT( reinterpret_cast<char *>(a.load( oLoad )) == arr );
458             CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == 1 );
459
460             for ( char i = 1; i < aSize; ++i ) {
461                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == i );
462                 CPPUNIT_ASSERT( a.fetch_add( 1, order ));
463                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == i + 1 );
464             }
465
466             for ( char i = aSize; i > 1; --i ) {
467                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == i  );
468                 CPPUNIT_ASSERT( a.fetch_sub( 1, order ));
469                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == i - 1 );
470             }
471         }
472
473         template <bool Volatile>
474         void do_test_atomic_pointer_void()
475         {
476             typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type    atomic_pointer;
477
478             char   arr[8];
479             const char aSize = sizeof(arr)/sizeof(arr[0]);
480             for ( char i = 0; i < aSize; ++i ) {
481                 arr[unsigned(i)] = i + 1;
482             }
483
484             atomic_pointer  a;
485             void *  p;
486
487 #if CDS_BUILD_BITS == 32 && !( CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION == 40700 )
488             /* GCC 4.7.0 has an linktime error in 32bit x86 mode:
489
490             ../tests/test-hdr/misc/cxx11_atomic_class.o: In function `std::__atomic_base<void*>::is_lock_free() const':
491             /usr/local/lib/gcc/x86_64-unknown-linux-gnu/4.7.0/../../../../include/c++/4.7.0/bits/atomic_base.h:719: undefined reference to `__atomic_is_lock_free'
492
493             ../tests/test-hdr/misc/cxx11_atomic_class.o: In function `std::__atomic_base<void*>::is_lock_free() const volatile':
494             /usr/local/lib/gcc/x86_64-unknown-linux-gnu/4.7.0/../../../../include/c++/4.7.0/bits/atomic_base.h:723: undefined reference to `__atomic_is_lock_free'
495
496             */
497             CPPUNIT_ASSERT( a.is_lock_free() );
498 #endif
499
500             a.store( (void *) arr );
501             CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == 1 );
502
503             p = arr;
504             CPPUNIT_ASSERT( a.compare_exchange_weak( p, (void *)(arr + 5) ));
505             CPPUNIT_ASSERT( p == arr + 0 );
506             CPPUNIT_ASSERT( !a.compare_exchange_weak( p, (void *)(arr + 3) ));
507             CPPUNIT_ASSERT( p == arr + 5 );
508
509             CPPUNIT_ASSERT( a.compare_exchange_strong( p, (void *)(arr + 3) ));
510             CPPUNIT_ASSERT( p == arr + 5 );
511             CPPUNIT_ASSERT( !a.compare_exchange_strong( p, (void *)(arr + 5) ));
512             CPPUNIT_ASSERT( p == arr + 3 );
513
514             CPPUNIT_ASSERT( reinterpret_cast<char *>( a.exchange( (void *) arr )) == arr + 3 );
515             CPPUNIT_ASSERT( reinterpret_cast<char *>( a.load()) == arr );
516             CPPUNIT_ASSERT( *reinterpret_cast<char *>( a.load()) == 1 );
517
518             for ( char i = 1; i < aSize; ++i ) {
519                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == i );
520                 CPPUNIT_ASSERT( a.fetch_add( 1 ));
521                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == i + 1 );
522             }
523
524             for ( char i = aSize; i > 1; --i ) {
525                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == i  );
526                 CPPUNIT_ASSERT( a.fetch_sub( 1 ));
527                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == i - 1 );
528             }
529
530             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
531             //do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_consume );
532             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
533             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
534             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
535             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
536         }
537
538         template <typename Atomic, typename Integral>
539         void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
540         {
541             typedef Integral integral_type;
542             atomics::memory_order oLoad = convert_to_load_order(order);
543             atomics::memory_order oStore = convert_to_store_order(order);
544             integral_type *  p;
545
546             a.store( arr, oStore );
547             CPPUNIT_ASSERT( *a.load( oLoad ) == 1 );
548
549             p = arr;
550             CPPUNIT_ASSERT( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
551             CPPUNIT_ASSERT( p == arr + 0 );
552             CPPUNIT_ASSERT( *p == 1 );
553             CPPUNIT_ASSERT( !a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
554             CPPUNIT_ASSERT( p == arr + 5 );
555             CPPUNIT_ASSERT( *p == 6 );
556
557             CPPUNIT_ASSERT( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
558             CPPUNIT_ASSERT( p == arr + 5 );
559             CPPUNIT_ASSERT( *p == 6 );
560             CPPUNIT_ASSERT( !a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
561             CPPUNIT_ASSERT( p == arr + 3 );
562             CPPUNIT_ASSERT( *p == 4 );
563
564             CPPUNIT_ASSERT( a.exchange( arr, order ) == arr + 3 );
565             CPPUNIT_ASSERT( a.load( oLoad ) == arr );
566             CPPUNIT_ASSERT( *a.load( oLoad ) == 1 );
567
568             for ( integral_type i = 1; i < aSize; ++i ) {
569                 integral_type * p = a.load();
570                 CPPUNIT_ASSERT( *p == i );
571                 CPPUNIT_ASSERT( a.fetch_add( 1, order ) == p  );
572                 CPPUNIT_ASSERT( *a.load( oLoad ) == i + 1 );
573             }
574
575             for ( integral_type i = aSize; i > 1; --i ) {
576                 integral_type * p = a.load();
577                 CPPUNIT_ASSERT( *p == i  );
578                 CPPUNIT_ASSERT( a.fetch_sub( 1, order ) == p );
579                 CPPUNIT_ASSERT( *a.load( oLoad ) == i - 1 );
580             }
581         }
582
583         template <typename Integral, bool Volatile>
584         void test_atomic_pointer_for()
585         {
586             typedef Integral integral_type;
587             typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type    atomic_pointer;
588
589             integral_type   arr[8];
590             const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
591             for ( integral_type i = 0; i < aSize; ++i ) {
592                 arr[size_t(i)] = i + 1;
593             }
594
595             atomic_pointer  a;
596             integral_type *  p;
597
598             a.store( arr );
599             CPPUNIT_ASSERT( *a.load() == 1 );
600
601             p = arr;
602             CPPUNIT_ASSERT( a.compare_exchange_weak( p, arr + 5 ));
603             CPPUNIT_ASSERT( p == arr + 0 );
604             CPPUNIT_ASSERT( *p == 1 );
605             CPPUNIT_ASSERT( !a.compare_exchange_weak( p, arr + 3 ));
606             CPPUNIT_ASSERT( p == arr + 5 );
607             CPPUNIT_ASSERT( *p == 6 );
608
609             CPPUNIT_ASSERT( a.compare_exchange_strong( p, arr + 3 ));
610             CPPUNIT_ASSERT( p == arr + 5 );
611             CPPUNIT_ASSERT( *p == 6 );
612             CPPUNIT_ASSERT( !a.compare_exchange_strong( p, arr + 5 ));
613             CPPUNIT_ASSERT( p == arr + 3 );
614             CPPUNIT_ASSERT( *p == 4 );
615
616             CPPUNIT_ASSERT( a.exchange( arr ) == arr + 3 );
617             CPPUNIT_ASSERT( a.load() == arr );
618             CPPUNIT_ASSERT( *a.load() == 1 );
619
620             for ( integral_type i = 1; i < aSize; ++i ) {
621                 integral_type * p = a.load();
622                 CPPUNIT_ASSERT( *p == i );
623                 integral_type * pa = a.fetch_add( 1 );
624                 CPPUNIT_ASSERT_EX( pa == p, "pa=" << ((uintptr_t) pa) << " p=" << ((uintptr_t) p) );
625                 CPPUNIT_ASSERT( *a.load() == i + 1 );
626             }
627
628             for ( integral_type i = aSize; i > 1; --i ) {
629                 integral_type * p = a.load();
630                 CPPUNIT_ASSERT( *p == i  );
631                 CPPUNIT_ASSERT( a.fetch_sub( 1 ) == p );
632                 CPPUNIT_ASSERT( *a.load() == i - 1 );
633             }
634
635             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
636             //test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_consume );
637             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
638             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
639             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
640             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
641         }
642
643     public:
644         void test_atomic_flag()
645         {
646             // Array to test different alignment
647
648             atomics::atomic_flag flags[8];
649             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
650                 do_test_atomic_flag( flags[i] );
651         }
652
653         void test_atomic_flag_volatile()
654         {
655             // Array to test different alignment
656
657             atomics::atomic_flag volatile flags[8];
658             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
659                 do_test_atomic_flag( flags[i] );
660         }
661
662         template <typename AtomicBool>
663         void test_atomic_bool_()
664         {
665             // Array to test different alignment
666             AtomicBool  a[8];
667
668             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
669                 do_test_atomic_bool( a[i] );
670
671                 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
672                 //do_test_atomic_bool( a[i], atomics::memory_order_consume );
673                 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
674                 do_test_atomic_bool( a[i], atomics::memory_order_release );
675                 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
676                 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
677             }
678         }
679
680         void test_atomic_bool()
681         {
682             test_atomic_bool_< atomics::atomic<bool> >();
683         }
684         void test_atomic_bool_volatile()
685         {
686             test_atomic_bool_< atomics::atomic<bool> volatile >();
687         }
688
689         void test_atomic_char()                 { test_atomic_integral<char>(); }
690         void test_atomic_signed_char()          { test_atomic_integral<signed char>(); }
691         void test_atomic_unsigned_char()        { test_atomic_integral<unsigned char>(); }
692         void test_atomic_short_int()            { test_atomic_integral<short int>(); }
693         void test_atomic_unsigned_short_int()   { test_atomic_integral<unsigned short int>(); }
694         void test_atomic_int()                  { test_atomic_integral<int>(); }
695         void test_atomic_unsigned_int()         { test_atomic_integral<unsigned int>(); }
696         void test_atomic_long()                 { test_atomic_integral<long>(); }
697         void test_atomic_unsigned_long()        { test_atomic_integral<unsigned long>(); }
698         void test_atomic_long_long()            { test_atomic_integral<long long>(); }
699         void test_atomic_unsigned_long_long()   { test_atomic_integral<unsigned long long>(); }
700
701         void test_atomic_char_volatile()                 { test_atomic_integral_volatile<char>(); }
702         void test_atomic_signed_char_volatile()          { test_atomic_integral_volatile<signed char>(); }
703         void test_atomic_unsigned_char_volatile()        { test_atomic_integral_volatile<unsigned char>(); }
704         void test_atomic_short_int_volatile()            { test_atomic_integral_volatile<short int>(); }
705         void test_atomic_unsigned_short_int_volatile()   { test_atomic_integral_volatile<unsigned short int>(); }
706         void test_atomic_int_volatile()                  { test_atomic_integral_volatile<int>(); }
707         void test_atomic_unsigned_int_volatile()         { test_atomic_integral_volatile<unsigned int>(); }
708         void test_atomic_long_volatile()                 { test_atomic_integral_volatile<long>(); }
709         void test_atomic_unsigned_long_volatile()        { test_atomic_integral_volatile<unsigned long>(); }
710         void test_atomic_long_long_volatile()            { test_atomic_integral_volatile<long long>(); }
711         void test_atomic_unsigned_long_long_volatile()   { test_atomic_integral_volatile<unsigned long long>(); }
712
713         void test_atomic_pointer_void()         { do_test_atomic_pointer_void<false>() ;}
714         void test_atomic_pointer_void_volatile(){ do_test_atomic_pointer_void<true>() ;}
715
716         void test_atomic_pointer_char()         { test_atomic_pointer_for<char, false>() ;}
717         void test_atomic_pointer_short()        { test_atomic_pointer_for<short int, false>() ;}
718         void test_atomic_pointer_int()          { test_atomic_pointer_for<int, false>() ;}
719         void test_atomic_pointer_long()         { test_atomic_pointer_for<long, false>() ;}
720         void test_atomic_pointer_long_long()    { test_atomic_pointer_for<long long, false>() ;}
721
722         void test_atomic_pointer_char_volatile()        { test_atomic_pointer_for<char, true>() ;}
723         void test_atomic_pointer_short_volatile()       { test_atomic_pointer_for<unsigned short int, true>() ;}
724         void test_atomic_pointer_int_volatile()          { test_atomic_pointer_for<int, true>() ;}
725         void test_atomic_pointer_long_volatile()         { test_atomic_pointer_for<long, true>() ;}
726         void test_atomic_pointer_long_long_volatile()    { test_atomic_pointer_for<long long, true>() ;}
727
728     public:
729         CPPUNIT_TEST_SUITE(cxx11_atomic_class)
730             CPPUNIT_TEST( test_atomic_flag )
731             CPPUNIT_TEST( test_atomic_flag_volatile )
732
733             CPPUNIT_TEST( test_atomic_bool )
734             CPPUNIT_TEST( test_atomic_char )
735             CPPUNIT_TEST( test_atomic_signed_char)
736             CPPUNIT_TEST( test_atomic_unsigned_char)
737             CPPUNIT_TEST( test_atomic_short_int)
738             CPPUNIT_TEST( test_atomic_unsigned_short_int)
739             CPPUNIT_TEST( test_atomic_int)
740             CPPUNIT_TEST( test_atomic_unsigned_int)
741             CPPUNIT_TEST( test_atomic_long)
742             CPPUNIT_TEST( test_atomic_unsigned_long)
743             CPPUNIT_TEST( test_atomic_long_long)
744             CPPUNIT_TEST( test_atomic_unsigned_long_long)
745
746             CPPUNIT_TEST( test_atomic_bool_volatile )
747             CPPUNIT_TEST( test_atomic_char_volatile )
748             CPPUNIT_TEST( test_atomic_signed_char_volatile)
749             CPPUNIT_TEST( test_atomic_unsigned_char_volatile)
750             CPPUNIT_TEST( test_atomic_short_int_volatile)
751             CPPUNIT_TEST( test_atomic_unsigned_short_int_volatile)
752             CPPUNIT_TEST( test_atomic_int_volatile)
753             CPPUNIT_TEST( test_atomic_unsigned_int_volatile)
754             CPPUNIT_TEST( test_atomic_long_volatile)
755             CPPUNIT_TEST( test_atomic_unsigned_long_volatile)
756             CPPUNIT_TEST( test_atomic_long_long_volatile)
757             CPPUNIT_TEST( test_atomic_unsigned_long_long_volatile)
758
759             CPPUNIT_TEST( test_atomic_pointer_void)
760             CPPUNIT_TEST( test_atomic_pointer_void_volatile)
761
762             CPPUNIT_TEST( test_atomic_pointer_char)
763             CPPUNIT_TEST( test_atomic_pointer_short)
764             CPPUNIT_TEST( test_atomic_pointer_int)
765             CPPUNIT_TEST( test_atomic_pointer_long)
766             CPPUNIT_TEST( test_atomic_pointer_long_long)
767
768             CPPUNIT_TEST( test_atomic_pointer_char_volatile)
769             CPPUNIT_TEST( test_atomic_pointer_short_volatile)
770             CPPUNIT_TEST( test_atomic_pointer_int_volatile)
771             CPPUNIT_TEST( test_atomic_pointer_long_volatile)
772             CPPUNIT_TEST( test_atomic_pointer_long_long_volatile)
773
774         CPPUNIT_TEST_SUITE_END()
775     };
776 }   // namespace misc
777
778 CPPUNIT_TEST_SUITE_REGISTRATION(misc::cxx11_atomic_class);