Fixed CLang incompatibility
[libcds.git] / test / unit / misc / cxx11_atomic_class.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <gtest/gtest.h>
32 #include <cds/algo/atomic.h>
33 #include "cxx11_convert_memory_order.h"
34
35 #if CDS_COMPILER == CDS_COMPILER_CLANG && !defined( _LIBCPP_VERSION )
36     // CLang (at least 3.6) without libc++ has no gcc-specific __atomic_is_lock_free function
37 #   define EXPECT_ATOMIC_IS_LOCK_FREE( x )
38 #else
39 #   define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( x.is_lock_free() )
40 #endif
41
42
43 namespace {
44     class cxx11_atomic_class: public ::testing::Test
45     {
46     protected:
47         template <typename AtomicFlag>
48         void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
49         {
50             atomics::memory_order mo_clear = convert_to_store_order(order);
51             for ( int i = 0; i < 5; ++i ) {
52                 EXPECT_TRUE( !f.test_and_set( order ));
53                 EXPECT_TRUE( f.test_and_set( order ));
54                 f.clear( mo_clear );
55             }
56         }
57
58         template <typename AtomicFlag>
59         void do_test_atomic_flag( AtomicFlag& f)
60         {
61             f.clear();
62
63             for ( int i = 0; i < 5; ++i ) {
64                 EXPECT_TRUE( !f.test_and_set());
65                 EXPECT_TRUE( f.test_and_set());
66                 f.clear();
67             }
68
69             do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
70             //do_test_atomic_flag_mo( f, atomics::memory_order_consume );
71             do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
72             do_test_atomic_flag_mo( f, atomics::memory_order_release );
73             do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
74             do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
75         }
76
77         template <class Atomic, typename Integral>
78         void do_test_atomic_type(Atomic& a)
79         {
80             typedef Integral    integral_type;
81
82             EXPECT_ATOMIC_IS_LOCK_FREE( a );
83             a.store( (integral_type) 0 );
84             EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
85
86             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
87                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
88                 EXPECT_EQ( a.exchange( n ), static_cast<integral_type>( 0 ));
89                 EXPECT_EQ( a.load(), n );
90                 EXPECT_EQ( a.exchange( (integral_type) 0 ), n );
91                 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
92             }
93
94             integral_type prev = a.load();
95             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
96                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
97                 integral_type expected = prev;
98
99                 EXPECT_TRUE( a.compare_exchange_weak( expected, n));
100                 EXPECT_EQ( expected, prev );
101                 EXPECT_FALSE( a.compare_exchange_weak( expected, n));
102                 EXPECT_EQ( expected, n );
103
104                 prev = n;
105                 EXPECT_EQ( a.load(), n );
106             }
107
108             a = (integral_type) 0;
109
110             prev = a;
111             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
112                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
113                 integral_type expected = prev;
114
115                 EXPECT_TRUE( a.compare_exchange_strong( expected, n));
116                 EXPECT_EQ( expected, prev );
117                 EXPECT_FALSE( a.compare_exchange_strong( expected, n));
118                 EXPECT_EQ( expected, n );
119
120                 prev = n;
121                 EXPECT_EQ( a.load(), n );
122             }
123
124             EXPECT_EQ( a.exchange( (integral_type) 0 ), prev );
125         }
126
127         template <class Atomic, typename Integral>
128         void do_test_atomic_integral(Atomic& a)
129         {
130             do_test_atomic_type< Atomic, Integral >(a);
131
132             typedef Integral    integral_type;
133
134             // fetch_xxx testing
135             a.store( (integral_type) 0 );
136
137             // fetch_add
138             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
139             {
140                 integral_type prev = a.load();
141                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
142
143                 EXPECT_EQ( a.fetch_add(n), prev);
144             }
145
146             // fetch_sub
147             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
148             {
149                 integral_type prev = a.load();
150                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
151
152                 EXPECT_EQ( a.fetch_sub(n), prev);
153             }
154             EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
155
156             // fetch_or / fetc_xor / fetch_and
157             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
158             {
159                 integral_type prev = a.load()  ;;
160                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
161
162                 EXPECT_EQ( a.fetch_or( mask ), prev );
163                 prev = a.load();
164                 EXPECT_EQ( ( prev & mask), mask);
165
166                 EXPECT_EQ( a.fetch_and( (integral_type) ~mask ), prev );
167                 prev = a.load();
168                 EXPECT_EQ( integral_type(prev & mask), integral_type(0));
169
170                 EXPECT_EQ( a.fetch_xor( mask ), prev );
171                 prev = a.load();
172                 EXPECT_EQ( integral_type( prev & mask), mask);
173             }
174             EXPECT_EQ( a.load(), (integral_type) -1 );
175
176
177             // op= testing
178             a = (integral_type) 0;
179
180             // +=
181             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
182             {
183                 integral_type prev = a;
184                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
185
186                 EXPECT_EQ( (a += n), (prev + n));
187             }
188
189             // -=
190             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
191             {
192                 integral_type prev = a;
193                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
194
195                 EXPECT_EQ( (a -= n),  prev - n );
196             }
197             EXPECT_EQ( a.load(), (integral_type) 0 );
198
199             // |= / ^= / &=
200             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
201             {
202                 integral_type prev = a;
203                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
204
205                 EXPECT_EQ( (a |= mask ), (prev | mask ));
206                 prev = a;
207                 EXPECT_EQ( ( prev & mask), mask);
208
209                 EXPECT_EQ( (a &= (integral_type) ~mask ), ( prev & (integral_type) ~mask ));
210                 prev = a;
211                 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
212
213                 EXPECT_EQ( (a ^= mask ), (prev ^ mask ));
214                 prev = a;
215                 EXPECT_EQ( ( prev & mask), mask);
216             }
217             EXPECT_EQ( a.load(), (integral_type) -1 );
218         }
219
220         template <class Atomic, typename Integral>
221         void do_test_atomic_type( Atomic& a, atomics::memory_order order )
222         {
223             typedef Integral    integral_type;
224
225             const atomics::memory_order oLoad = convert_to_load_order( order );
226             const atomics::memory_order oStore = convert_to_store_order( order );
227
228             EXPECT_ATOMIC_IS_LOCK_FREE( a );
229             a.store((integral_type) 0, oStore );
230             EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
231
232             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
233                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
234                 EXPECT_EQ( a.exchange( n, order ), integral_type( 0 ));
235                 EXPECT_EQ( a.load( oLoad ), n );
236                 EXPECT_EQ( a.exchange( (integral_type) 0, order ), n );
237                 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
238             }
239
240             integral_type prev = a.load( oLoad );
241             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
242                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
243                 integral_type expected = prev;
244
245                 EXPECT_TRUE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
246                 EXPECT_EQ( expected, prev );
247                 EXPECT_FALSE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
248                 EXPECT_EQ( expected, n );
249
250                 prev = n;
251                 EXPECT_EQ( a.load( oLoad ), n );
252             }
253
254             a.store( (integral_type) 0, oStore );
255
256             prev = a.load( oLoad );
257             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
258                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
259                 integral_type expected = prev;
260
261                 EXPECT_TRUE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
262                 EXPECT_EQ( expected, prev );
263                 EXPECT_FALSE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
264                 EXPECT_EQ( expected, n );
265
266                 prev = n;
267                 EXPECT_EQ( a.load( oLoad ), n );
268             }
269
270             EXPECT_EQ( a.exchange( (integral_type) 0, order ), prev );
271         }
272
273         template <class Atomic, typename Integral>
274         void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
275         {
276             do_test_atomic_type< Atomic, Integral >( a, order );
277
278             typedef Integral    integral_type;
279
280             const atomics::memory_order oLoad = convert_to_load_order( order );
281             const atomics::memory_order oStore = convert_to_store_order( order );
282
283             // fetch_xxx testing
284             a.store( (integral_type) 0, oStore );
285
286             // fetch_add
287             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
288             {
289                 integral_type prev = a.load( oLoad );
290                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
291
292                 EXPECT_EQ( a.fetch_add( n, order), prev);
293             }
294
295             // fetch_sub
296             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
297             {
298                 integral_type prev = a.load( oLoad );
299                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
300
301                 EXPECT_EQ( a.fetch_sub( n, order ), prev);
302             }
303             EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
304
305             // fetch_or / fetc_xor / fetch_and
306             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
307             {
308                 integral_type prev = a.load( oLoad )  ;;
309                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
310
311                 EXPECT_EQ( a.fetch_or( mask, order ), prev );
312                 prev = a.load( oLoad );
313                 EXPECT_EQ( ( prev & mask), mask);
314
315                 EXPECT_EQ( a.fetch_and( (integral_type) ~mask, order ), prev );
316                 prev = a.load( oLoad );
317                 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
318
319                 EXPECT_EQ( a.fetch_xor( mask, order ), prev );
320                 prev = a.load( oLoad );
321                 EXPECT_EQ( ( prev & mask), mask);
322             }
323             EXPECT_EQ( a.load( oLoad ), (integral_type) -1 );
324         }
325
326
327
328         template <typename Atomic, typename Integral>
329         void test_atomic_integral_(Atomic& a)
330         {
331             do_test_atomic_integral<Atomic, Integral >(a);
332
333             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
334             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
335             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
336             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
337             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
338         }
339
340         template <typename Integral>
341         void test_atomic_integral()
342         {
343             typedef atomics::atomic<Integral> atomic_type;
344
345             atomic_type a[8];
346             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
347                 test_atomic_integral_<atomic_type, Integral>( a[i] );
348             }
349         }
350         template <typename Integral>
351         void test_atomic_integral_volatile()
352         {
353             typedef atomics::atomic<Integral> volatile atomic_type;
354
355             atomic_type a[8];
356             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
357                 test_atomic_integral_<atomic_type, Integral>( a[i] );
358             }
359         }
360
361         template <class AtomicBool>
362         void do_test_atomic_bool( AtomicBool& a )
363         {
364             EXPECT_ATOMIC_IS_LOCK_FREE( a );
365             a.store( false );
366             EXPECT_FALSE( a );
367             EXPECT_FALSE( a.load());
368
369             EXPECT_FALSE( a.exchange( true ));
370             EXPECT_TRUE( a.load());
371             EXPECT_TRUE( a.exchange( false ));
372             EXPECT_FALSE( a.load());
373
374             bool expected = false;
375             EXPECT_TRUE( a.compare_exchange_weak( expected, true));
376             EXPECT_FALSE( expected );
377             EXPECT_FALSE( a.compare_exchange_weak( expected, false));
378             EXPECT_TRUE( expected );
379             EXPECT_TRUE( a.load());
380
381             a.store( false );
382
383             expected = false;
384             EXPECT_TRUE( a.compare_exchange_strong( expected, true));
385             EXPECT_FALSE( expected );
386             EXPECT_FALSE( a.compare_exchange_strong( expected, false));
387             EXPECT_TRUE( expected );
388
389             EXPECT_TRUE( a.load());
390
391             EXPECT_TRUE( a.exchange( false ));
392         }
393
394         template <class AtomicBool>
395         void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
396         {
397             const atomics::memory_order oLoad = convert_to_load_order( order );
398             const atomics::memory_order oStore = convert_to_store_order( order );
399             const atomics::memory_order oExchange = convert_to_exchange_order( order );
400
401             EXPECT_ATOMIC_IS_LOCK_FREE( a );
402             a.store( false, oStore );
403             EXPECT_FALSE( a );
404             EXPECT_FALSE( a.load( oLoad ));
405
406             EXPECT_FALSE( a.exchange( true, oExchange ));
407             EXPECT_TRUE( a.load( oLoad ));
408             EXPECT_TRUE( a.exchange( false, oExchange ));
409             EXPECT_FALSE( a.load( oLoad ));
410
411             bool expected = false;
412             EXPECT_TRUE( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
413             EXPECT_FALSE( expected );
414             EXPECT_FALSE( a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
415             EXPECT_TRUE( expected );
416             EXPECT_TRUE( a.load( oLoad ));
417
418             //a = bool(false);
419             a.store( false, oStore );
420
421             expected = false;
422             EXPECT_TRUE( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
423             EXPECT_FALSE( expected );
424             EXPECT_FALSE( a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
425             EXPECT_TRUE( expected );
426
427             EXPECT_TRUE( a.load( oLoad ));
428
429             EXPECT_TRUE( a.exchange( false, oExchange ));
430         }
431
432
433         template <typename Atomic>
434         void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
435         {
436             atomics::memory_order oLoad = convert_to_load_order(order);
437             atomics::memory_order oStore = convert_to_store_order(order);
438             void *  p;
439
440             a.store( (void *) arr, oStore );
441             EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
442
443             p = arr;
444             EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
445             EXPECT_EQ( p, arr + 0 );
446             EXPECT_EQ( *reinterpret_cast<char *>(p), 1 );
447             EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
448             EXPECT_EQ( p, arr + 5 );
449             EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
450
451             EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
452             EXPECT_EQ( p, arr + 5 );
453             EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
454             EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
455             EXPECT_EQ( p, arr + 3 );
456             EXPECT_EQ( *reinterpret_cast<char *>(p), 4 );
457
458             EXPECT_EQ( reinterpret_cast<char *>(a.exchange( (void *) arr, order )), arr + 3 );
459             EXPECT_EQ( reinterpret_cast<char *>(a.load( oLoad )), arr );
460             EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
461
462             for ( char i = 1; i < aSize; ++i ) {
463                 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i );
464                 a.fetch_add( 1, order );
465                 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i + 1 );
466             }
467
468             for ( char i = aSize; i > 1; --i ) {
469                 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i );
470                 a.fetch_sub( 1, order );
471                 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i - 1 );
472             }
473         }
474
475         template <bool Volatile>
476         void do_test_atomic_pointer_void()
477         {
478             typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type    atomic_pointer;
479
480             char   arr[8];
481             const char aSize = sizeof(arr)/sizeof(arr[0]);
482             for ( char i = 0; i < aSize; ++i ) {
483                 arr[static_cast<unsigned>( i )] = i + 1;
484             }
485
486             atomic_pointer  a;
487             void *  p;
488
489             a.store( (void *) arr );
490             EXPECT_EQ( *reinterpret_cast<char *>(a.load()), 1 );
491
492             p = arr;
493             EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5)));
494             EXPECT_EQ( p, arr + 0 );
495             EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3)));
496             EXPECT_EQ( p, arr + 5 );
497
498             EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3)));
499             EXPECT_EQ( p, arr + 5 );
500             EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5)));
501             EXPECT_EQ( p, arr + 3 );
502
503             EXPECT_EQ( reinterpret_cast<char *>( a.exchange( (void *) arr )), arr + 3 );
504             EXPECT_EQ( reinterpret_cast<char *>( a.load()), arr );
505             EXPECT_EQ( *reinterpret_cast<char *>( a.load()), 1 );
506
507             for ( char i = 1; i < aSize; ++i ) {
508                 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i );
509                 a.fetch_add( 1 );
510                 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i + 1 );
511             }
512
513             for ( char i = aSize; i > 1; --i ) {
514                 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i );
515                 a.fetch_sub( 1 );
516                 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i - 1 );
517             }
518
519             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
520             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
521             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
522             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
523             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
524         }
525
526         template <typename Atomic, typename Integral>
527         void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
528         {
529             typedef Integral integral_type;
530             atomics::memory_order oLoad = convert_to_load_order(order);
531             atomics::memory_order oStore = convert_to_store_order(order);
532             integral_type *  p;
533
534             a.store( arr, oStore );
535             EXPECT_EQ( *a.load( oLoad ), 1 );
536
537             p = arr;
538             EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
539             EXPECT_EQ( p, arr + 0 );
540             EXPECT_EQ( *p, 1 );
541             EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
542             EXPECT_EQ( p, arr + 5 );
543             EXPECT_EQ( *p, 6 );
544
545             EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
546             EXPECT_EQ( p, arr + 5 );
547             EXPECT_EQ( *p, 6 );
548             EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
549             EXPECT_EQ( p, arr + 3 );
550             EXPECT_EQ( *p, 4 );
551
552             EXPECT_EQ( a.exchange( arr, order ), arr + 3 );
553             EXPECT_EQ( a.load( oLoad ), arr );
554             EXPECT_EQ( *a.load( oLoad ), 1 );
555
556             for ( integral_type i = 1; i < aSize; ++i ) {
557                 integral_type * p = a.load();
558                 EXPECT_EQ( *p, i );
559                 EXPECT_EQ( a.fetch_add( 1, order ), p );
560                 EXPECT_EQ( *a.load( oLoad ), i + 1 );
561             }
562
563             for ( integral_type i = aSize; i > 1; --i ) {
564                 integral_type * p = a.load();
565                 EXPECT_EQ( *p, i  );
566                 EXPECT_EQ( a.fetch_sub( 1, order ), p );
567                 EXPECT_EQ( *a.load( oLoad ), i - 1 );
568             }
569         }
570
571         template <typename Integral, bool Volatile>
572         void test_atomic_pointer_for()
573         {
574             typedef Integral integral_type;
575             typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type    atomic_pointer;
576
577             integral_type   arr[8];
578             const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
579             for ( integral_type i = 0; i < aSize; ++i ) {
580                 arr[static_cast<size_t>(i)] = i + 1;
581             }
582
583             atomic_pointer  a;
584             integral_type *  p;
585
586             a.store( arr );
587             EXPECT_EQ( *a.load(), 1 );
588
589             p = arr;
590             EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5 ));
591             EXPECT_EQ( p, arr + 0 );
592             EXPECT_EQ( *p, 1 );
593             EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3 ));
594             EXPECT_EQ( p, arr + 5 );
595             EXPECT_EQ( *p, 6 );
596
597             EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3 ));
598             EXPECT_EQ( p, arr + 5 );
599             EXPECT_EQ( *p, 6 );
600             EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5 ));
601             EXPECT_EQ( p, arr + 3 );
602             EXPECT_EQ( *p, 4 );
603
604             EXPECT_EQ( a.exchange( arr ), arr + 3 );
605             EXPECT_EQ( a.load(), arr );
606             EXPECT_EQ( *a.load(), 1 );
607
608             for ( integral_type i = 1; i < aSize; ++i ) {
609                 integral_type * p = a.load();
610                 EXPECT_EQ( *p, i );
611                 integral_type * pa = a.fetch_add( 1 );
612                 EXPECT_EQ( pa, p );
613                 EXPECT_EQ( *a.load(), i + 1 );
614             }
615
616             for ( integral_type i = aSize; i > 1; --i ) {
617                 integral_type * p = a.load();
618                 EXPECT_EQ( *p, i  );
619                 EXPECT_EQ( a.fetch_sub( 1 ), p );
620                 EXPECT_EQ( *a.load(), i - 1 );
621             }
622
623             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
624             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
625             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
626             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
627             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
628         }
629
630     public:
631         void test_atomic_flag()
632         {
633             // Array to test different alignment
634
635             atomics::atomic_flag flags[8];
636             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
637                 do_test_atomic_flag( flags[i] );
638         }
639
640         void test_atomic_flag_volatile()
641         {
642             // Array to test different alignment
643
644             atomics::atomic_flag volatile flags[8];
645             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
646                 do_test_atomic_flag( flags[i] );
647         }
648
649         template <typename AtomicBool>
650         void test_atomic_bool_()
651         {
652             // Array to test different alignment
653             AtomicBool  a[8];
654
655             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
656                 do_test_atomic_bool( a[i] );
657
658                 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
659                 //do_test_atomic_bool( a[i], atomics::memory_order_consume );
660                 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
661                 do_test_atomic_bool( a[i], atomics::memory_order_release );
662                 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
663                 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
664             }
665         }
666
667         void test_atomic_bool()
668         {
669             test_atomic_bool_< atomics::atomic<bool> >();
670         }
671         void test_atomic_bool_volatile()
672         {
673             test_atomic_bool_< atomics::atomic<bool> volatile >();
674         }
675     };
676
677     TEST_F( cxx11_atomic_class, atomic_char )
678     {
679         test_atomic_integral<char>();
680     }
681
682     TEST_F( cxx11_atomic_class, atomic_signed_char )
683     {
684         test_atomic_integral<signed char>();
685     }
686
687     TEST_F( cxx11_atomic_class, atomic_unsigned_char )
688     {
689         test_atomic_integral<unsigned char>();
690     }
691
692     TEST_F( cxx11_atomic_class, atomic_short_int )
693     {
694         test_atomic_integral<short int>();
695     }
696
697     TEST_F( cxx11_atomic_class, atomic_signed_short_int )
698     {
699         test_atomic_integral<signed short int>();
700     }
701
702     TEST_F( cxx11_atomic_class, atomic_unsigned_short_int )
703     {
704         test_atomic_integral<unsigned short int>();
705     }
706
707     TEST_F( cxx11_atomic_class, atomic_int )
708     {
709         test_atomic_integral<int>();
710     }
711
712     TEST_F( cxx11_atomic_class, atomic_unsigned_int )
713     {
714         test_atomic_integral<unsigned int>();
715     }
716
717     TEST_F( cxx11_atomic_class, atomic_long )
718     {
719         test_atomic_integral<long>();
720     }
721
722     TEST_F( cxx11_atomic_class, atomic_unsigned_long )
723     {
724         test_atomic_integral<unsigned long>();
725     }
726
727     TEST_F( cxx11_atomic_class, atomic_long_long )
728     {
729         test_atomic_integral<long long>();
730     }
731
732     TEST_F( cxx11_atomic_class, atomic_unsigned_long_long )
733     {
734         test_atomic_integral<unsigned long long>();
735     }
736
737     TEST_F( cxx11_atomic_class, atomic_char_volatile )
738     {
739         test_atomic_integral_volatile<char>();
740     }
741
742     TEST_F( cxx11_atomic_class, atomic_signed_char_volatile )
743     {
744         test_atomic_integral_volatile<signed char>();
745     }
746
747     TEST_F( cxx11_atomic_class, atomic_unsigned_char_volatile )
748     {
749         test_atomic_integral_volatile<unsigned char>();
750     }
751
752     TEST_F( cxx11_atomic_class, atomic_short_int_volatile )
753     {
754         test_atomic_integral_volatile<short int>();
755     }
756
757     TEST_F( cxx11_atomic_class, atomic_signed_short_int_volatile )
758     {
759         test_atomic_integral_volatile<signed short int>();
760     }
761
762     TEST_F( cxx11_atomic_class, atomic_unsigned_short_int_volatile )
763     {
764         test_atomic_integral_volatile<unsigned short int>();
765     }
766
767     TEST_F( cxx11_atomic_class, atomic_int_volatile )
768     {
769         test_atomic_integral_volatile<int>();
770     }
771
772     TEST_F( cxx11_atomic_class, atomic_unsigned_int_volatile )
773     {
774         test_atomic_integral_volatile<unsigned int>();
775     }
776
777     TEST_F( cxx11_atomic_class, atomic_long_volatile )
778     {
779         test_atomic_integral_volatile<long>();
780     }
781
782     TEST_F( cxx11_atomic_class, atomic_unsigned_long_volatile )
783     {
784         test_atomic_integral_volatile<unsigned long>();
785     }
786
787     TEST_F( cxx11_atomic_class, atomic_long_long_volatile )
788     {
789         test_atomic_integral_volatile<long long>();
790     }
791
792     TEST_F( cxx11_atomic_class, atomic_unsigned_long_long_volatile )
793     {
794         test_atomic_integral_volatile<unsigned long long>();
795     }
796
797 #if !( CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 40000 )
798     //clang error with atomic<void*> fetch_add/fetch_sub
799     TEST_F( cxx11_atomic_class, atomic_pointer_void )
800     {
801         do_test_atomic_pointer_void<false>();
802     }
803
804     TEST_F( cxx11_atomic_class, atomic_pointer_void_volatile )
805     {
806         do_test_atomic_pointer_void<true>();
807     }
808 #endif
809
810     TEST_F( cxx11_atomic_class, atomic_pointer_char )
811     {
812         test_atomic_pointer_for<char, false>();
813     }
814
815     TEST_F( cxx11_atomic_class, atomic_pointer_char_volatile )
816     {
817         test_atomic_pointer_for<char, true>();
818     }
819
820     TEST_F( cxx11_atomic_class, atomic_pointer_short )
821     {
822         test_atomic_pointer_for<short int, false>();
823     }
824
825     TEST_F( cxx11_atomic_class, atomic_pointer_short_volatile )
826     {
827         test_atomic_pointer_for<short int, true>();
828     }
829
830     TEST_F( cxx11_atomic_class, atomic_pointer_int )
831     {
832         test_atomic_pointer_for<int, false>();
833     }
834
835     TEST_F( cxx11_atomic_class, atomic_pointer_int_volatile )
836     {
837         test_atomic_pointer_for<int, true>();
838     }
839
840     TEST_F( cxx11_atomic_class, atomic_pointer_long )
841     {
842         test_atomic_pointer_for<long, false>();
843     }
844
845     TEST_F( cxx11_atomic_class, atomic_pointer_long_volatile )
846     {
847         test_atomic_pointer_for<long, true>();
848     }
849
850     TEST_F( cxx11_atomic_class, atomic_pointer_long_long )
851     {
852         test_atomic_pointer_for<long long, false>();
853     }
854
855     TEST_F( cxx11_atomic_class, atomic_pointer_long_long_volatile )
856     {
857         test_atomic_pointer_for<long long, true>();
858     }
859 }   // namespace