Fixed -Wshadow warnings
[libcds.git] / test / unit / misc / cxx11_atomic_class.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <cds_test/ext_gtest.h>
32 #include <cds/algo/atomic.h>
33 #include "cxx11_convert_memory_order.h"
34
35 #define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( x.is_lock_free())
36
37 namespace {
38     class cxx11_atomic_class: public ::testing::Test
39     {
40     protected:
41         template <typename AtomicFlag>
42         void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
43         {
44             atomics::memory_order mo_clear = convert_to_store_order(order);
45             for ( int i = 0; i < 5; ++i ) {
46                 EXPECT_TRUE( !f.test_and_set( order ));
47                 EXPECT_TRUE( f.test_and_set( order ));
48                 f.clear( mo_clear );
49             }
50         }
51
52         template <typename AtomicFlag>
53         void do_test_atomic_flag( AtomicFlag& f)
54         {
55             f.clear();
56
57             for ( int i = 0; i < 5; ++i ) {
58                 EXPECT_TRUE( !f.test_and_set());
59                 EXPECT_TRUE( f.test_and_set());
60                 f.clear();
61             }
62
63             do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
64             //do_test_atomic_flag_mo( f, atomics::memory_order_consume );
65             do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
66             do_test_atomic_flag_mo( f, atomics::memory_order_release );
67             do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
68             do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
69         }
70
71         template <class Atomic, typename Integral>
72         void do_test_atomic_type(Atomic& a)
73         {
74             typedef Integral    integral_type;
75
76             EXPECT_ATOMIC_IS_LOCK_FREE( a );
77             a.store( (integral_type) 0 );
78             EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
79
80             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
81                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
82                 EXPECT_EQ( a.exchange( n ), static_cast<integral_type>( 0 ));
83                 EXPECT_EQ( a.load(), n );
84                 EXPECT_EQ( a.exchange( (integral_type) 0 ), n );
85                 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
86             }
87
88             integral_type prev = a.load();
89             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
90                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
91                 integral_type expected = prev;
92
93                 EXPECT_TRUE( a.compare_exchange_weak( expected, n));
94                 EXPECT_EQ( expected, prev );
95                 EXPECT_FALSE( a.compare_exchange_weak( expected, n));
96                 EXPECT_EQ( expected, n );
97
98                 prev = n;
99                 EXPECT_EQ( a.load(), n );
100             }
101
102             a = (integral_type) 0;
103
104             prev = a;
105             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
106                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
107                 integral_type expected = prev;
108
109                 EXPECT_TRUE( a.compare_exchange_strong( expected, n));
110                 EXPECT_EQ( expected, prev );
111                 EXPECT_FALSE( a.compare_exchange_strong( expected, n));
112                 EXPECT_EQ( expected, n );
113
114                 prev = n;
115                 EXPECT_EQ( a.load(), n );
116             }
117
118             EXPECT_EQ( a.exchange( (integral_type) 0 ), prev );
119         }
120
121         template <class Atomic, typename Integral>
122         void do_test_atomic_integral(Atomic& a)
123         {
124             do_test_atomic_type< Atomic, Integral >(a);
125
126             typedef Integral    integral_type;
127
128             // fetch_xxx testing
129             a.store( (integral_type) 0 );
130
131             // fetch_add
132             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
133             {
134                 integral_type prev = a.load();
135                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
136
137                 EXPECT_EQ( a.fetch_add(n), prev);
138             }
139
140             // fetch_sub
141             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
142             {
143                 integral_type prev = a.load();
144                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
145
146                 EXPECT_EQ( a.fetch_sub(n), prev);
147             }
148             EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
149
150             // fetch_or / fetc_xor / fetch_and
151             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
152             {
153                 integral_type prev = a.load()  ;;
154                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
155
156                 EXPECT_EQ( a.fetch_or( mask ), prev );
157                 prev = a.load();
158                 EXPECT_EQ( ( prev & mask), mask);
159
160                 EXPECT_EQ( a.fetch_and( (integral_type) ~mask ), prev );
161                 prev = a.load();
162                 EXPECT_EQ( integral_type(prev & mask), integral_type(0));
163
164                 EXPECT_EQ( a.fetch_xor( mask ), prev );
165                 prev = a.load();
166                 EXPECT_EQ( integral_type( prev & mask), mask);
167             }
168             EXPECT_EQ( a.load(), (integral_type) -1 );
169
170
171             // op= testing
172             a = (integral_type) 0;
173
174             // +=
175             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
176             {
177                 integral_type prev = a;
178                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
179
180                 EXPECT_EQ( (a += n), (prev + n));
181             }
182
183             // -=
184             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
185             {
186                 integral_type prev = a;
187                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
188
189                 EXPECT_EQ( (a -= n),  prev - n );
190             }
191             EXPECT_EQ( a.load(), (integral_type) 0 );
192
193             // |= / ^= / &=
194             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
195             {
196                 integral_type prev = a;
197                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
198
199                 EXPECT_EQ( (a |= mask ), (prev | mask ));
200                 prev = a;
201                 EXPECT_EQ( ( prev & mask), mask);
202
203                 EXPECT_EQ( (a &= (integral_type) ~mask ), ( prev & (integral_type) ~mask ));
204                 prev = a;
205                 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
206
207                 EXPECT_EQ( (a ^= mask ), (prev ^ mask ));
208                 prev = a;
209                 EXPECT_EQ( ( prev & mask), mask);
210             }
211             EXPECT_EQ( a.load(), (integral_type) -1 );
212         }
213
214         template <class Atomic, typename Integral>
215         void do_test_atomic_type( Atomic& a, atomics::memory_order order )
216         {
217             typedef Integral    integral_type;
218
219             const atomics::memory_order oLoad = convert_to_load_order( order );
220             const atomics::memory_order oStore = convert_to_store_order( order );
221
222             EXPECT_ATOMIC_IS_LOCK_FREE( a );
223             a.store((integral_type) 0, oStore );
224             EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
225
226             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
227                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
228                 EXPECT_EQ( a.exchange( n, order ), integral_type( 0 ));
229                 EXPECT_EQ( a.load( oLoad ), n );
230                 EXPECT_EQ( a.exchange( (integral_type) 0, order ), n );
231                 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
232             }
233
234             integral_type prev = a.load( oLoad );
235             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
236                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
237                 integral_type expected = prev;
238
239                 EXPECT_TRUE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
240                 EXPECT_EQ( expected, prev );
241                 EXPECT_FALSE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
242                 EXPECT_EQ( expected, n );
243
244                 prev = n;
245                 EXPECT_EQ( a.load( oLoad ), n );
246             }
247
248             a.store( (integral_type) 0, oStore );
249
250             prev = a.load( oLoad );
251             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
252                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
253                 integral_type expected = prev;
254
255                 EXPECT_TRUE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
256                 EXPECT_EQ( expected, prev );
257                 EXPECT_FALSE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
258                 EXPECT_EQ( expected, n );
259
260                 prev = n;
261                 EXPECT_EQ( a.load( oLoad ), n );
262             }
263
264             EXPECT_EQ( a.exchange( (integral_type) 0, order ), prev );
265         }
266
267         template <class Atomic, typename Integral>
268         void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
269         {
270             do_test_atomic_type< Atomic, Integral >( a, order );
271
272             typedef Integral    integral_type;
273
274             const atomics::memory_order oLoad = convert_to_load_order( order );
275             const atomics::memory_order oStore = convert_to_store_order( order );
276
277             // fetch_xxx testing
278             a.store( (integral_type) 0, oStore );
279
280             // fetch_add
281             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
282             {
283                 integral_type prev = a.load( oLoad );
284                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
285
286                 EXPECT_EQ( a.fetch_add( n, order), prev);
287             }
288
289             // fetch_sub
290             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
291             {
292                 integral_type prev = a.load( oLoad );
293                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
294
295                 EXPECT_EQ( a.fetch_sub( n, order ), prev);
296             }
297             EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
298
299             // fetch_or / fetc_xor / fetch_and
300             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
301             {
302                 integral_type prev = a.load( oLoad )  ;;
303                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
304
305                 EXPECT_EQ( a.fetch_or( mask, order ), prev );
306                 prev = a.load( oLoad );
307                 EXPECT_EQ( ( prev & mask), mask);
308
309                 EXPECT_EQ( a.fetch_and( (integral_type) ~mask, order ), prev );
310                 prev = a.load( oLoad );
311                 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
312
313                 EXPECT_EQ( a.fetch_xor( mask, order ), prev );
314                 prev = a.load( oLoad );
315                 EXPECT_EQ( ( prev & mask), mask);
316             }
317             EXPECT_EQ( a.load( oLoad ), (integral_type) -1 );
318         }
319
320
321
322         template <typename Atomic, typename Integral>
323         void test_atomic_integral_(Atomic& a)
324         {
325             do_test_atomic_integral<Atomic, Integral >(a);
326
327             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
328             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
329             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
330             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
331             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
332         }
333
334         template <typename Integral>
335         void test_atomic_integral()
336         {
337             typedef atomics::atomic<Integral> atomic_type;
338
339             atomic_type a[8];
340             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
341                 test_atomic_integral_<atomic_type, Integral>( a[i] );
342             }
343         }
344         template <typename Integral>
345         void test_atomic_integral_volatile()
346         {
347             typedef atomics::atomic<Integral> volatile atomic_type;
348
349             atomic_type a[8];
350             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
351                 test_atomic_integral_<atomic_type, Integral>( a[i] );
352             }
353         }
354
355         template <class AtomicBool>
356         void do_test_atomic_bool( AtomicBool& a )
357         {
358             EXPECT_ATOMIC_IS_LOCK_FREE( a );
359             a.store( false );
360             EXPECT_FALSE( a );
361             EXPECT_FALSE( a.load());
362
363             EXPECT_FALSE( a.exchange( true ));
364             EXPECT_TRUE( a.load());
365             EXPECT_TRUE( a.exchange( false ));
366             EXPECT_FALSE( a.load());
367
368             bool expected = false;
369             EXPECT_TRUE( a.compare_exchange_weak( expected, true));
370             EXPECT_FALSE( expected );
371             EXPECT_FALSE( a.compare_exchange_weak( expected, false));
372             EXPECT_TRUE( expected );
373             EXPECT_TRUE( a.load());
374
375             a.store( false );
376
377             expected = false;
378             EXPECT_TRUE( a.compare_exchange_strong( expected, true));
379             EXPECT_FALSE( expected );
380             EXPECT_FALSE( a.compare_exchange_strong( expected, false));
381             EXPECT_TRUE( expected );
382
383             EXPECT_TRUE( a.load());
384
385             EXPECT_TRUE( a.exchange( false ));
386         }
387
388         template <class AtomicBool>
389         void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
390         {
391             const atomics::memory_order oLoad = convert_to_load_order( order );
392             const atomics::memory_order oStore = convert_to_store_order( order );
393             const atomics::memory_order oExchange = convert_to_exchange_order( order );
394
395             EXPECT_ATOMIC_IS_LOCK_FREE( a );
396             a.store( false, oStore );
397             EXPECT_FALSE( a );
398             EXPECT_FALSE( a.load( oLoad ));
399
400             EXPECT_FALSE( a.exchange( true, oExchange ));
401             EXPECT_TRUE( a.load( oLoad ));
402             EXPECT_TRUE( a.exchange( false, oExchange ));
403             EXPECT_FALSE( a.load( oLoad ));
404
405             bool expected = false;
406             EXPECT_TRUE( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
407             EXPECT_FALSE( expected );
408             EXPECT_FALSE( a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
409             EXPECT_TRUE( expected );
410             EXPECT_TRUE( a.load( oLoad ));
411
412             //a = bool(false);
413             a.store( false, oStore );
414
415             expected = false;
416             EXPECT_TRUE( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
417             EXPECT_FALSE( expected );
418             EXPECT_FALSE( a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
419             EXPECT_TRUE( expected );
420
421             EXPECT_TRUE( a.load( oLoad ));
422
423             EXPECT_TRUE( a.exchange( false, oExchange ));
424         }
425
426
427         template <typename Atomic>
428         void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
429         {
430             CDS_UNUSED( aSize );
431
432             atomics::memory_order oLoad = convert_to_load_order(order);
433             atomics::memory_order oStore = convert_to_store_order(order);
434             void *  p;
435
436             a.store( (void *) arr, oStore );
437             EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
438
439             p = arr;
440             EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
441             EXPECT_EQ( p, arr + 0 );
442             EXPECT_EQ( *reinterpret_cast<char *>(p), 1 );
443             EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
444             EXPECT_EQ( p, arr + 5 );
445             EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
446
447             EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
448             EXPECT_EQ( p, arr + 5 );
449             EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
450             EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
451             EXPECT_EQ( p, arr + 3 );
452             EXPECT_EQ( *reinterpret_cast<char *>(p), 4 );
453
454             EXPECT_EQ( reinterpret_cast<char *>(a.exchange( (void *) arr, order )), arr + 3 );
455             EXPECT_EQ( reinterpret_cast<char *>(a.load( oLoad )), arr );
456             EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
457         }
458
459         template <bool Volatile>
460         void do_test_atomic_pointer_void()
461         {
462             typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type    atomic_pointer;
463
464             char   arr[8];
465             const char aSize = sizeof(arr)/sizeof(arr[0]);
466             for ( char i = 0; i < aSize; ++i ) {
467                 arr[static_cast<unsigned>( i )] = i + 1;
468             }
469
470             atomic_pointer  a;
471             void *  p;
472
473             a.store( (void *) arr );
474             EXPECT_EQ( *reinterpret_cast<char *>(a.load()), 1 );
475
476             p = arr;
477             EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5)));
478             EXPECT_EQ( p, arr + 0 );
479             EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3)));
480             EXPECT_EQ( p, arr + 5 );
481
482             EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3)));
483             EXPECT_EQ( p, arr + 5 );
484             EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5)));
485             EXPECT_EQ( p, arr + 3 );
486
487             EXPECT_EQ( reinterpret_cast<char *>( a.exchange( (void *) arr )), arr + 3 );
488             EXPECT_EQ( reinterpret_cast<char *>( a.load()), arr );
489             EXPECT_EQ( *reinterpret_cast<char *>( a.load()), 1 );
490
491             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
492             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
493             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
494             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
495             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
496         }
497
498         template <typename Atomic, typename Integral>
499         void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
500         {
501             typedef Integral integral_type;
502             atomics::memory_order oLoad = convert_to_load_order(order);
503             atomics::memory_order oStore = convert_to_store_order(order);
504             integral_type *  p;
505
506             a.store( arr, oStore );
507             EXPECT_EQ( *a.load( oLoad ), 1 );
508
509             p = arr;
510             EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
511             EXPECT_EQ( p, arr + 0 );
512             EXPECT_EQ( *p, 1 );
513             EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
514             EXPECT_EQ( p, arr + 5 );
515             EXPECT_EQ( *p, 6 );
516
517             EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
518             EXPECT_EQ( p, arr + 5 );
519             EXPECT_EQ( *p, 6 );
520             EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
521             EXPECT_EQ( p, arr + 3 );
522             EXPECT_EQ( *p, 4 );
523
524             EXPECT_EQ( a.exchange( arr, order ), arr + 3 );
525             EXPECT_EQ( a.load( oLoad ), arr );
526             EXPECT_EQ( *a.load( oLoad ), 1 );
527
528             for ( integral_type i = 1; i < aSize; ++i ) {
529                 p = a.load();
530                 EXPECT_EQ( *p, i );
531                 EXPECT_EQ( a.fetch_add( 1, order ), p );
532                 EXPECT_EQ( *a.load( oLoad ), i + 1 );
533             }
534
535             for ( integral_type i = aSize; i > 1; --i ) {
536                 p = a.load();
537                 EXPECT_EQ( *p, i  );
538                 EXPECT_EQ( a.fetch_sub( 1, order ), p );
539                 EXPECT_EQ( *a.load( oLoad ), i - 1 );
540             }
541         }
542
543         template <typename Integral, bool Volatile>
544         void test_atomic_pointer_for()
545         {
546             typedef Integral integral_type;
547             typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type    atomic_pointer;
548
549             integral_type   arr[8];
550             const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
551             for ( integral_type i = 0; i < aSize; ++i ) {
552                 arr[static_cast<size_t>(i)] = i + 1;
553             }
554
555             atomic_pointer  a;
556             integral_type *  p;
557
558             a.store( arr );
559             EXPECT_EQ( *a.load(), 1 );
560
561             p = arr;
562             EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5 ));
563             EXPECT_EQ( p, arr + 0 );
564             EXPECT_EQ( *p, 1 );
565             EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3 ));
566             EXPECT_EQ( p, arr + 5 );
567             EXPECT_EQ( *p, 6 );
568
569             EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3 ));
570             EXPECT_EQ( p, arr + 5 );
571             EXPECT_EQ( *p, 6 );
572             EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5 ));
573             EXPECT_EQ( p, arr + 3 );
574             EXPECT_EQ( *p, 4 );
575
576             EXPECT_EQ( a.exchange( arr ), arr + 3 );
577             EXPECT_EQ( a.load(), arr );
578             EXPECT_EQ( *a.load(), 1 );
579
580             for ( integral_type i = 1; i < aSize; ++i ) {
581                 p = a.load();
582                 EXPECT_EQ( *p, i );
583                 integral_type * pa = a.fetch_add( 1 );
584                 EXPECT_EQ( pa, p );
585                 EXPECT_EQ( *a.load(), i + 1 );
586             }
587
588             for ( integral_type i = aSize; i > 1; --i ) {
589                 p = a.load();
590                 EXPECT_EQ( *p, i  );
591                 EXPECT_EQ( a.fetch_sub( 1 ), p );
592                 EXPECT_EQ( *a.load(), i - 1 );
593             }
594
595             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
596             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
597             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
598             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
599             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
600         }
601
602     public:
603         void test_atomic_flag()
604         {
605             // Array to test different alignment
606
607             atomics::atomic_flag flags[8];
608             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
609                 do_test_atomic_flag( flags[i] );
610         }
611
612         void test_atomic_flag_volatile()
613         {
614             // Array to test different alignment
615
616             atomics::atomic_flag volatile flags[8];
617             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
618                 do_test_atomic_flag( flags[i] );
619         }
620
621         template <typename AtomicBool>
622         void test_atomic_bool_()
623         {
624             // Array to test different alignment
625             AtomicBool  a[8];
626
627             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
628                 do_test_atomic_bool( a[i] );
629
630                 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
631                 //do_test_atomic_bool( a[i], atomics::memory_order_consume );
632                 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
633                 do_test_atomic_bool( a[i], atomics::memory_order_release );
634                 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
635                 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
636             }
637         }
638
639         void test_atomic_bool()
640         {
641             test_atomic_bool_< atomics::atomic<bool> >();
642         }
643         void test_atomic_bool_volatile()
644         {
645             test_atomic_bool_< atomics::atomic<bool> volatile >();
646         }
647     };
648
649     TEST_F( cxx11_atomic_class, atomic_char )
650     {
651         test_atomic_integral<char>();
652     }
653
654     TEST_F( cxx11_atomic_class, atomic_signed_char )
655     {
656         test_atomic_integral<signed char>();
657     }
658
659     TEST_F( cxx11_atomic_class, atomic_unsigned_char )
660     {
661         test_atomic_integral<unsigned char>();
662     }
663
664     TEST_F( cxx11_atomic_class, atomic_short_int )
665     {
666         test_atomic_integral<short int>();
667     }
668
669     TEST_F( cxx11_atomic_class, atomic_signed_short_int )
670     {
671         test_atomic_integral<signed short int>();
672     }
673
674     TEST_F( cxx11_atomic_class, atomic_unsigned_short_int )
675     {
676         test_atomic_integral<unsigned short int>();
677     }
678
679     TEST_F( cxx11_atomic_class, atomic_int )
680     {
681         test_atomic_integral<int>();
682     }
683
684     TEST_F( cxx11_atomic_class, atomic_unsigned_int )
685     {
686         test_atomic_integral<unsigned int>();
687     }
688
689     TEST_F( cxx11_atomic_class, atomic_long )
690     {
691         test_atomic_integral<long>();
692     }
693
694     TEST_F( cxx11_atomic_class, atomic_unsigned_long )
695     {
696         test_atomic_integral<unsigned long>();
697     }
698
699     TEST_F( cxx11_atomic_class, atomic_long_long )
700     {
701         test_atomic_integral<long long>();
702     }
703
704     TEST_F( cxx11_atomic_class, atomic_unsigned_long_long )
705     {
706         test_atomic_integral<unsigned long long>();
707     }
708
709     TEST_F( cxx11_atomic_class, atomic_char_volatile )
710     {
711         test_atomic_integral_volatile<char>();
712     }
713
714     TEST_F( cxx11_atomic_class, atomic_signed_char_volatile )
715     {
716         test_atomic_integral_volatile<signed char>();
717     }
718
719     TEST_F( cxx11_atomic_class, atomic_unsigned_char_volatile )
720     {
721         test_atomic_integral_volatile<unsigned char>();
722     }
723
724     TEST_F( cxx11_atomic_class, atomic_short_int_volatile )
725     {
726         test_atomic_integral_volatile<short int>();
727     }
728
729     TEST_F( cxx11_atomic_class, atomic_signed_short_int_volatile )
730     {
731         test_atomic_integral_volatile<signed short int>();
732     }
733
734     TEST_F( cxx11_atomic_class, atomic_unsigned_short_int_volatile )
735     {
736         test_atomic_integral_volatile<unsigned short int>();
737     }
738
739     TEST_F( cxx11_atomic_class, atomic_int_volatile )
740     {
741         test_atomic_integral_volatile<int>();
742     }
743
744     TEST_F( cxx11_atomic_class, atomic_unsigned_int_volatile )
745     {
746         test_atomic_integral_volatile<unsigned int>();
747     }
748
749     TEST_F( cxx11_atomic_class, atomic_long_volatile )
750     {
751         test_atomic_integral_volatile<long>();
752     }
753
754     TEST_F( cxx11_atomic_class, atomic_unsigned_long_volatile )
755     {
756         test_atomic_integral_volatile<unsigned long>();
757     }
758
759     TEST_F( cxx11_atomic_class, atomic_long_long_volatile )
760     {
761         test_atomic_integral_volatile<long long>();
762     }
763
764     TEST_F( cxx11_atomic_class, atomic_unsigned_long_long_volatile )
765     {
766         test_atomic_integral_volatile<unsigned long long>();
767     }
768
769     TEST_F( cxx11_atomic_class, atomic_pointer_void )
770     {
771         do_test_atomic_pointer_void<false>();
772     }
773
774     TEST_F( cxx11_atomic_class, atomic_pointer_void_volatile )
775     {
776         do_test_atomic_pointer_void<true>();
777     }
778
779     TEST_F( cxx11_atomic_class, atomic_pointer_char )
780     {
781         test_atomic_pointer_for<char, false>();
782     }
783
784     TEST_F( cxx11_atomic_class, atomic_pointer_char_volatile )
785     {
786         test_atomic_pointer_for<char, true>();
787     }
788
789     TEST_F( cxx11_atomic_class, atomic_pointer_short )
790     {
791         test_atomic_pointer_for<short int, false>();
792     }
793
794     TEST_F( cxx11_atomic_class, atomic_pointer_short_volatile )
795     {
796         test_atomic_pointer_for<short int, true>();
797     }
798
799     TEST_F( cxx11_atomic_class, atomic_pointer_int )
800     {
801         test_atomic_pointer_for<int, false>();
802     }
803
804     TEST_F( cxx11_atomic_class, atomic_pointer_int_volatile )
805     {
806         test_atomic_pointer_for<int, true>();
807     }
808
809     TEST_F( cxx11_atomic_class, atomic_pointer_long )
810     {
811         test_atomic_pointer_for<long, false>();
812     }
813
814     TEST_F( cxx11_atomic_class, atomic_pointer_long_volatile )
815     {
816         test_atomic_pointer_for<long, true>();
817     }
818
819     TEST_F( cxx11_atomic_class, atomic_pointer_long_long )
820     {
821         test_atomic_pointer_for<long long, false>();
822     }
823
824     TEST_F( cxx11_atomic_class, atomic_pointer_long_long_volatile )
825     {
826         test_atomic_pointer_for<long long, true>();
827     }
828 }   // namespace