Fixed some minor compiler warnings
[libcds.git] / test / unit / misc / cxx11_atomic_class.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <gtest/gtest.h>
32 #include <cds/algo/atomic.h>
33 #include "cxx11_convert_memory_order.h"
34
35 namespace {
36     class cxx11_atomic_class: public ::testing::Test
37     {
38     protected:
39         template <typename AtomicFlag>
40         void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
41         {
42             atomics::memory_order mo_clear = convert_to_store_order(order);
43             for ( int i = 0; i < 5; ++i ) {
44                 EXPECT_TRUE( !f.test_and_set( order ));
45                 EXPECT_TRUE( f.test_and_set( order ));
46                 f.clear( mo_clear );
47             }
48         }
49
50         template <typename AtomicFlag>
51         void do_test_atomic_flag( AtomicFlag& f)
52         {
53             f.clear();
54
55             for ( int i = 0; i < 5; ++i ) {
56                 EXPECT_TRUE( !f.test_and_set());
57                 EXPECT_TRUE( f.test_and_set());
58                 f.clear();
59             }
60
61             do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
62             //do_test_atomic_flag_mo( f, atomics::memory_order_consume );
63             do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
64             do_test_atomic_flag_mo( f, atomics::memory_order_release );
65             do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
66             do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
67         }
68
69         template <class Atomic, typename Integral>
70         void do_test_atomic_type(Atomic& a)
71         {
72             typedef Integral    integral_type;
73
74             EXPECT_TRUE( a.is_lock_free());
75             a.store( (integral_type) 0 );
76             //EXPECT_EQ( a, static_cast<integral_type>( 0 ));
77             EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
78
79             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
80                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
81                 EXPECT_EQ( a.exchange( n ), static_cast<integral_type>( 0 ));
82                 EXPECT_EQ( a.load(), n );
83                 EXPECT_EQ( a.exchange( (integral_type) 0 ), n );
84                 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
85             }
86
87             integral_type prev = a.load();
88             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
89                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
90                 integral_type expected = prev;
91
92                 EXPECT_TRUE( a.compare_exchange_weak( expected, n));
93                 EXPECT_EQ( expected, prev );
94                 EXPECT_FALSE( a.compare_exchange_weak( expected, n));
95                 EXPECT_EQ( expected, n );
96
97                 prev = n;
98                 EXPECT_EQ( a.load(), n );
99             }
100
101             a = (integral_type) 0;
102
103             prev = a;
104             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
105                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
106                 integral_type expected = prev;
107
108                 EXPECT_TRUE( a.compare_exchange_strong( expected, n));
109                 EXPECT_EQ( expected, prev );
110                 EXPECT_FALSE( a.compare_exchange_strong( expected, n));
111                 EXPECT_EQ( expected, n );
112
113                 prev = n;
114                 EXPECT_EQ( a.load(), n );
115             }
116
117             EXPECT_EQ( a.exchange( (integral_type) 0 ), prev );
118         }
119
120         template <class Atomic, typename Integral>
121         void do_test_atomic_integral(Atomic& a)
122         {
123             do_test_atomic_type< Atomic, Integral >(a);
124
125             typedef Integral    integral_type;
126
127             // fetch_xxx testing
128             a.store( (integral_type) 0 );
129
130             // fetch_add
131             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
132             {
133                 integral_type prev = a.load();
134                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
135
136                 EXPECT_EQ( a.fetch_add(n), prev);
137             }
138
139             // fetch_sub
140             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
141             {
142                 integral_type prev = a.load();
143                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
144
145                 EXPECT_EQ( a.fetch_sub(n), prev);
146             }
147             EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
148
149             // fetch_or / fetc_xor / fetch_and
150             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
151             {
152                 integral_type prev = a.load()  ;;
153                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
154
155                 EXPECT_EQ( a.fetch_or( mask ), prev );
156                 prev = a.load();
157                 EXPECT_EQ( ( prev & mask), mask);
158
159                 EXPECT_EQ( a.fetch_and( (integral_type) ~mask ), prev );
160                 prev = a.load();
161                 EXPECT_EQ( integral_type(prev & mask), integral_type(0));
162
163                 EXPECT_EQ( a.fetch_xor( mask ), prev );
164                 prev = a.load();
165                 EXPECT_EQ( integral_type( prev & mask), mask);
166             }
167             EXPECT_EQ( a.load(), (integral_type) -1 );
168
169
170             // op= testing
171             a = (integral_type) 0;
172
173             // +=
174             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
175             {
176                 integral_type prev = a;
177                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
178
179                 EXPECT_EQ( (a += n), (prev + n));
180             }
181
182             // -=
183             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
184             {
185                 integral_type prev = a;
186                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
187
188                 EXPECT_EQ( (a -= n),  prev - n );
189             }
190             EXPECT_EQ( a.load(), (integral_type) 0 );
191
192             // |= / ^= / &=
193             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
194             {
195                 integral_type prev = a;
196                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
197
198                 EXPECT_EQ( (a |= mask ), (prev | mask ));
199                 prev = a;
200                 EXPECT_EQ( ( prev & mask), mask);
201
202                 EXPECT_EQ( (a &= (integral_type) ~mask ), ( prev & (integral_type) ~mask ));
203                 prev = a;
204                 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
205
206                 EXPECT_EQ( (a ^= mask ), (prev ^ mask ));
207                 prev = a;
208                 EXPECT_EQ( ( prev & mask), mask);
209             }
210             EXPECT_EQ( a.load(), (integral_type) -1 );
211         }
212
213         template <class Atomic, typename Integral>
214         void do_test_atomic_type( Atomic& a, atomics::memory_order order )
215         {
216             typedef Integral    integral_type;
217
218             const atomics::memory_order oLoad = convert_to_load_order( order );
219             const atomics::memory_order oStore = convert_to_store_order( order );
220
221             EXPECT_TRUE( a.is_lock_free());
222             a.store((integral_type) 0, oStore );
223             //EXPECT_EQ( a, integral_type( 0 ));
224             EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
225
226             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
227                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
228                 EXPECT_EQ( a.exchange( n, order ), integral_type( 0 ));
229                 EXPECT_EQ( a.load( oLoad ), n );
230                 EXPECT_EQ( a.exchange( (integral_type) 0, order ), n );
231                 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
232             }
233
234             integral_type prev = a.load( oLoad );
235             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
236                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
237                 integral_type expected = prev;
238
239                 EXPECT_TRUE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
240                 EXPECT_EQ( expected, prev );
241                 EXPECT_FALSE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
242                 EXPECT_EQ( expected, n );
243
244                 prev = n;
245                 EXPECT_EQ( a.load( oLoad ), n );
246             }
247
248             a.store( (integral_type) 0, oStore );
249
250             prev = a.load( oLoad );
251             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
252                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
253                 integral_type expected = prev;
254
255                 EXPECT_TRUE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
256                 EXPECT_EQ( expected, prev );
257                 EXPECT_FALSE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
258                 EXPECT_EQ( expected, n );
259
260                 prev = n;
261                 EXPECT_EQ( a.load( oLoad ), n );
262             }
263
264             EXPECT_EQ( a.exchange( (integral_type) 0, order ), prev );
265         }
266
267         template <class Atomic, typename Integral>
268         void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
269         {
270             do_test_atomic_type< Atomic, Integral >( a, order );
271
272             typedef Integral    integral_type;
273
274             const atomics::memory_order oLoad = convert_to_load_order( order );
275             const atomics::memory_order oStore = convert_to_store_order( order );
276
277             // fetch_xxx testing
278             a.store( (integral_type) 0, oStore );
279
280             // fetch_add
281             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
282             {
283                 integral_type prev = a.load( oLoad );
284                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
285
286                 EXPECT_EQ( a.fetch_add( n, order), prev);
287             }
288
289             // fetch_sub
290             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
291             {
292                 integral_type prev = a.load( oLoad );
293                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
294
295                 EXPECT_EQ( a.fetch_sub( n, order ), prev);
296             }
297             EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
298
299             // fetch_or / fetc_xor / fetch_and
300             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
301             {
302                 integral_type prev = a.load( oLoad )  ;;
303                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
304
305                 EXPECT_EQ( a.fetch_or( mask, order ), prev );
306                 prev = a.load( oLoad );
307                 EXPECT_EQ( ( prev & mask), mask);
308
309                 EXPECT_EQ( a.fetch_and( (integral_type) ~mask, order ), prev );
310                 prev = a.load( oLoad );
311                 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
312
313                 EXPECT_EQ( a.fetch_xor( mask, order ), prev );
314                 prev = a.load( oLoad );
315                 EXPECT_EQ( ( prev & mask), mask);
316             }
317             EXPECT_EQ( a.load( oLoad ), (integral_type) -1 );
318         }
319
320
321
322         template <typename Atomic, typename Integral>
323         void test_atomic_integral_(Atomic& a)
324         {
325             do_test_atomic_integral<Atomic, Integral >(a);
326
327             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
328             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
329             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
330             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
331             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
332         }
333
334         template <typename Integral>
335         void test_atomic_integral()
336         {
337             typedef atomics::atomic<Integral> atomic_type;
338
339             atomic_type a[8];
340             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
341                 test_atomic_integral_<atomic_type, Integral>( a[i] );
342             }
343         }
344         template <typename Integral>
345         void test_atomic_integral_volatile()
346         {
347             typedef atomics::atomic<Integral> volatile atomic_type;
348
349             atomic_type a[8];
350             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
351                 test_atomic_integral_<atomic_type, Integral>( a[i] );
352             }
353         }
354
355         template <class AtomicBool>
356         void do_test_atomic_bool( AtomicBool& a )
357         {
358             EXPECT_TRUE( a.is_lock_free());
359             a.store( false );
360             EXPECT_FALSE( a );
361             EXPECT_FALSE( a.load());
362
363             EXPECT_FALSE( a.exchange( true ));
364             EXPECT_TRUE( a.load());
365             EXPECT_TRUE( a.exchange( false ));
366             EXPECT_FALSE( a.load());
367
368             bool expected = false;
369             EXPECT_TRUE( a.compare_exchange_weak( expected, true));
370             EXPECT_FALSE( expected );
371             EXPECT_FALSE( a.compare_exchange_weak( expected, false));
372             EXPECT_TRUE( expected );
373             EXPECT_TRUE( a.load());
374
375             a.store( false );
376
377             expected = false;
378             EXPECT_TRUE( a.compare_exchange_strong( expected, true));
379             EXPECT_FALSE( expected );
380             EXPECT_FALSE( a.compare_exchange_strong( expected, false));
381             EXPECT_TRUE( expected );
382
383             EXPECT_TRUE( a.load());
384
385             EXPECT_TRUE( a.exchange( false ));
386         }
387
388         template <class AtomicBool>
389         void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
390         {
391             const atomics::memory_order oLoad = convert_to_load_order( order );
392             const atomics::memory_order oStore = convert_to_store_order( order );
393             const atomics::memory_order oExchange = convert_to_exchange_order( order );
394
395             EXPECT_TRUE( a.is_lock_free());
396             a.store( false, oStore );
397             EXPECT_FALSE( a );
398             EXPECT_FALSE( a.load( oLoad ));
399
400             EXPECT_FALSE( a.exchange( true, oExchange ));
401             EXPECT_TRUE( a.load( oLoad ));
402             EXPECT_TRUE( a.exchange( false, oExchange ));
403             EXPECT_FALSE( a.load( oLoad ));
404
405             bool expected = false;
406             EXPECT_TRUE( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
407             EXPECT_FALSE( expected );
408             EXPECT_FALSE( a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
409             EXPECT_TRUE( expected );
410             EXPECT_TRUE( a.load( oLoad ));
411
412             //a = bool(false);
413             a.store( false, oStore );
414
415             expected = false;
416             EXPECT_TRUE( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
417             EXPECT_FALSE( expected );
418             EXPECT_FALSE( a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
419             EXPECT_TRUE( expected );
420
421             EXPECT_TRUE( a.load( oLoad ));
422
423             EXPECT_TRUE( a.exchange( false, oExchange ));
424         }
425
426
427         template <typename Atomic>
428         void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
429         {
430             atomics::memory_order oLoad = convert_to_load_order(order);
431             atomics::memory_order oStore = convert_to_store_order(order);
432             void *  p;
433
434             a.store( (void *) arr, oStore );
435             EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
436
437             p = arr;
438             EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
439             EXPECT_EQ( p, arr + 0 );
440             EXPECT_EQ( *reinterpret_cast<char *>(p), 1 );
441             EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
442             EXPECT_EQ( p, arr + 5 );
443             EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
444
445             EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
446             EXPECT_EQ( p, arr + 5 );
447             EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
448             EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
449             EXPECT_EQ( p, arr + 3 );
450             EXPECT_EQ( *reinterpret_cast<char *>(p), 4 );
451
452             EXPECT_EQ( reinterpret_cast<char *>(a.exchange( (void *) arr, order )), arr + 3 );
453             EXPECT_EQ( reinterpret_cast<char *>(a.load( oLoad )), arr );
454             EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
455
456             for ( char i = 1; i < aSize; ++i ) {
457                 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i );
458                 a.fetch_add( 1, order );
459                 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i + 1 );
460             }
461
462             for ( char i = aSize; i > 1; --i ) {
463                 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i );
464                 a.fetch_sub( 1, order );
465                 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i - 1 );
466             }
467         }
468
469         template <bool Volatile>
470         void do_test_atomic_pointer_void()
471         {
472             typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type    atomic_pointer;
473
474             char   arr[8];
475             const char aSize = sizeof(arr)/sizeof(arr[0]);
476             for ( char i = 0; i < aSize; ++i ) {
477                 arr[static_cast<unsigned>( i )] = i + 1;
478             }
479
480             atomic_pointer  a;
481             void *  p;
482
483             a.store( (void *) arr );
484             EXPECT_EQ( *reinterpret_cast<char *>(a.load()), 1 );
485
486             p = arr;
487             EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5)));
488             EXPECT_EQ( p, arr + 0 );
489             EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3)));
490             EXPECT_EQ( p, arr + 5 );
491
492             EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3)));
493             EXPECT_EQ( p, arr + 5 );
494             EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5)));
495             EXPECT_EQ( p, arr + 3 );
496
497             EXPECT_EQ( reinterpret_cast<char *>( a.exchange( (void *) arr )), arr + 3 );
498             EXPECT_EQ( reinterpret_cast<char *>( a.load()), arr );
499             EXPECT_EQ( *reinterpret_cast<char *>( a.load()), 1 );
500
501             for ( char i = 1; i < aSize; ++i ) {
502                 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i );
503                 a.fetch_add( 1 );
504                 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i + 1 );
505             }
506
507             for ( char i = aSize; i > 1; --i ) {
508                 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i );
509                 a.fetch_sub( 1 );
510                 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i - 1 );
511             }
512
513             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
514             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
515             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
516             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
517             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
518         }
519
520         template <typename Atomic, typename Integral>
521         void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
522         {
523             typedef Integral integral_type;
524             atomics::memory_order oLoad = convert_to_load_order(order);
525             atomics::memory_order oStore = convert_to_store_order(order);
526             integral_type *  p;
527
528             a.store( arr, oStore );
529             EXPECT_EQ( *a.load( oLoad ), 1 );
530
531             p = arr;
532             EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
533             EXPECT_EQ( p, arr + 0 );
534             EXPECT_EQ( *p, 1 );
535             EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
536             EXPECT_EQ( p, arr + 5 );
537             EXPECT_EQ( *p, 6 );
538
539             EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
540             EXPECT_EQ( p, arr + 5 );
541             EXPECT_EQ( *p, 6 );
542             EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
543             EXPECT_EQ( p, arr + 3 );
544             EXPECT_EQ( *p, 4 );
545
546             EXPECT_EQ( a.exchange( arr, order ), arr + 3 );
547             EXPECT_EQ( a.load( oLoad ), arr );
548             EXPECT_EQ( *a.load( oLoad ), 1 );
549
550             for ( integral_type i = 1; i < aSize; ++i ) {
551                 integral_type * p = a.load();
552                 EXPECT_EQ( *p, i );
553                 EXPECT_EQ( a.fetch_add( 1, order ), p );
554                 EXPECT_EQ( *a.load( oLoad ), i + 1 );
555             }
556
557             for ( integral_type i = aSize; i > 1; --i ) {
558                 integral_type * p = a.load();
559                 EXPECT_EQ( *p, i  );
560                 EXPECT_EQ( a.fetch_sub( 1, order ), p );
561                 EXPECT_EQ( *a.load( oLoad ), i - 1 );
562             }
563         }
564
565         template <typename Integral, bool Volatile>
566         void test_atomic_pointer_for()
567         {
568             typedef Integral integral_type;
569             typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type    atomic_pointer;
570
571             integral_type   arr[8];
572             const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
573             for ( integral_type i = 0; i < aSize; ++i ) {
574                 arr[static_cast<size_t>(i)] = i + 1;
575             }
576
577             atomic_pointer  a;
578             integral_type *  p;
579
580             a.store( arr );
581             EXPECT_EQ( *a.load(), 1 );
582
583             p = arr;
584             EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5 ));
585             EXPECT_EQ( p, arr + 0 );
586             EXPECT_EQ( *p, 1 );
587             EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3 ));
588             EXPECT_EQ( p, arr + 5 );
589             EXPECT_EQ( *p, 6 );
590
591             EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3 ));
592             EXPECT_EQ( p, arr + 5 );
593             EXPECT_EQ( *p, 6 );
594             EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5 ));
595             EXPECT_EQ( p, arr + 3 );
596             EXPECT_EQ( *p, 4 );
597
598             EXPECT_EQ( a.exchange( arr ), arr + 3 );
599             EXPECT_EQ( a.load(), arr );
600             EXPECT_EQ( *a.load(), 1 );
601
602             for ( integral_type i = 1; i < aSize; ++i ) {
603                 integral_type * p = a.load();
604                 EXPECT_EQ( *p, i );
605                 integral_type * pa = a.fetch_add( 1 );
606                 EXPECT_EQ( pa, p );
607                 EXPECT_EQ( *a.load(), i + 1 );
608             }
609
610             for ( integral_type i = aSize; i > 1; --i ) {
611                 integral_type * p = a.load();
612                 EXPECT_EQ( *p, i  );
613                 EXPECT_EQ( a.fetch_sub( 1 ), p );
614                 EXPECT_EQ( *a.load(), i - 1 );
615             }
616
617             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
618             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
619             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
620             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
621             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
622         }
623
624     public:
625         void test_atomic_flag()
626         {
627             // Array to test different alignment
628
629             atomics::atomic_flag flags[8];
630             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
631                 do_test_atomic_flag( flags[i] );
632         }
633
634         void test_atomic_flag_volatile()
635         {
636             // Array to test different alignment
637
638             atomics::atomic_flag volatile flags[8];
639             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
640                 do_test_atomic_flag( flags[i] );
641         }
642
643         template <typename AtomicBool>
644         void test_atomic_bool_()
645         {
646             // Array to test different alignment
647             AtomicBool  a[8];
648
649             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
650                 do_test_atomic_bool( a[i] );
651
652                 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
653                 //do_test_atomic_bool( a[i], atomics::memory_order_consume );
654                 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
655                 do_test_atomic_bool( a[i], atomics::memory_order_release );
656                 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
657                 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
658             }
659         }
660
661         void test_atomic_bool()
662         {
663             test_atomic_bool_< atomics::atomic<bool> >();
664         }
665         void test_atomic_bool_volatile()
666         {
667             test_atomic_bool_< atomics::atomic<bool> volatile >();
668         }
669     };
670
671     TEST_F( cxx11_atomic_class, atomic_char )
672     {
673         test_atomic_integral<char>();
674     }
675
676     TEST_F( cxx11_atomic_class, atomic_signed_char )
677     {
678         test_atomic_integral<signed char>();
679     }
680
681     TEST_F( cxx11_atomic_class, atomic_unsigned_char )
682     {
683         test_atomic_integral<unsigned char>();
684     }
685
686     TEST_F( cxx11_atomic_class, atomic_short_int )
687     {
688         test_atomic_integral<short int>();
689     }
690
691     TEST_F( cxx11_atomic_class, atomic_signed_short_int )
692     {
693         test_atomic_integral<signed short int>();
694     }
695
696     TEST_F( cxx11_atomic_class, atomic_unsigned_short_int )
697     {
698         test_atomic_integral<unsigned short int>();
699     }
700
701     TEST_F( cxx11_atomic_class, atomic_int )
702     {
703         test_atomic_integral<int>();
704     }
705
706     TEST_F( cxx11_atomic_class, atomic_unsigned_int )
707     {
708         test_atomic_integral<unsigned int>();
709     }
710
711     TEST_F( cxx11_atomic_class, atomic_long )
712     {
713         test_atomic_integral<long>();
714     }
715
716     TEST_F( cxx11_atomic_class, atomic_unsigned_long )
717     {
718         test_atomic_integral<unsigned long>();
719     }
720
721     TEST_F( cxx11_atomic_class, atomic_long_long )
722     {
723         test_atomic_integral<long long>();
724     }
725
726     TEST_F( cxx11_atomic_class, atomic_unsigned_long_long )
727     {
728         test_atomic_integral<unsigned long long>();
729     }
730
731     TEST_F( cxx11_atomic_class, atomic_char_volatile )
732     {
733         test_atomic_integral_volatile<char>();
734     }
735
736     TEST_F( cxx11_atomic_class, atomic_signed_char_volatile )
737     {
738         test_atomic_integral_volatile<signed char>();
739     }
740
741     TEST_F( cxx11_atomic_class, atomic_unsigned_char_volatile )
742     {
743         test_atomic_integral_volatile<unsigned char>();
744     }
745
746     TEST_F( cxx11_atomic_class, atomic_short_int_volatile )
747     {
748         test_atomic_integral_volatile<short int>();
749     }
750
751     TEST_F( cxx11_atomic_class, atomic_signed_short_int_volatile )
752     {
753         test_atomic_integral_volatile<signed short int>();
754     }
755
756     TEST_F( cxx11_atomic_class, atomic_unsigned_short_int_volatile )
757     {
758         test_atomic_integral_volatile<unsigned short int>();
759     }
760
761     TEST_F( cxx11_atomic_class, atomic_int_volatile )
762     {
763         test_atomic_integral_volatile<int>();
764     }
765
766     TEST_F( cxx11_atomic_class, atomic_unsigned_int_volatile )
767     {
768         test_atomic_integral_volatile<unsigned int>();
769     }
770
771     TEST_F( cxx11_atomic_class, atomic_long_volatile )
772     {
773         test_atomic_integral_volatile<long>();
774     }
775
776     TEST_F( cxx11_atomic_class, atomic_unsigned_long_volatile )
777     {
778         test_atomic_integral_volatile<unsigned long>();
779     }
780
781     TEST_F( cxx11_atomic_class, atomic_long_long_volatile )
782     {
783         test_atomic_integral_volatile<long long>();
784     }
785
786     TEST_F( cxx11_atomic_class, atomic_unsigned_long_long_volatile )
787     {
788         test_atomic_integral_volatile<unsigned long long>();
789     }
790
791 #if !( CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 40000 )
792     //clang error with atomic<void*> fetch_add/fetch_sub
793     TEST_F( cxx11_atomic_class, atomic_pointer_void )
794     {
795         do_test_atomic_pointer_void<false>();
796     }
797
798     TEST_F( cxx11_atomic_class, atomic_pointer_void_volatile )
799     {
800         do_test_atomic_pointer_void<true>();
801     }
802 #endif
803
804     TEST_F( cxx11_atomic_class, atomic_pointer_char )
805     {
806         test_atomic_pointer_for<char, false>();
807     }
808
809     TEST_F( cxx11_atomic_class, atomic_pointer_char_volatile )
810     {
811         test_atomic_pointer_for<char, true>();
812     }
813
814     TEST_F( cxx11_atomic_class, atomic_pointer_short )
815     {
816         test_atomic_pointer_for<short int, false>();
817     }
818
819     TEST_F( cxx11_atomic_class, atomic_pointer_short_volatile )
820     {
821         test_atomic_pointer_for<short int, true>();
822     }
823
824     TEST_F( cxx11_atomic_class, atomic_pointer_int )
825     {
826         test_atomic_pointer_for<int, false>();
827     }
828
829     TEST_F( cxx11_atomic_class, atomic_pointer_int_volatile )
830     {
831         test_atomic_pointer_for<int, true>();
832     }
833
834     TEST_F( cxx11_atomic_class, atomic_pointer_long )
835     {
836         test_atomic_pointer_for<long, false>();
837     }
838
839     TEST_F( cxx11_atomic_class, atomic_pointer_long_volatile )
840     {
841         test_atomic_pointer_for<long, true>();
842     }
843
844     TEST_F( cxx11_atomic_class, atomic_pointer_long_long )
845     {
846         test_atomic_pointer_for<long long, false>();
847     }
848
849     TEST_F( cxx11_atomic_class, atomic_pointer_long_long_volatile )
850     {
851         test_atomic_pointer_for<long long, true>();
852     }
853 }   // namespace