Replace CDS_ATOMIC with namespace atomics
authorkhizmax <libcds.dev@gmail.com>
Sat, 20 Sep 2014 16:40:56 +0000 (20:40 +0400)
committerkhizmax <libcds.dev@gmail.com>
Sat, 20 Sep 2014 16:40:56 +0000 (20:40 +0400)
Rename namespace cds::cxx11_atomics to cds::cxx11_atomic

86 files changed:
cds/algo/flat_combining.h
cds/compiler/cxx11_atomic.h
cds/compiler/gcc/amd64/cxx11_atomic.h
cds/compiler/gcc/ia64/cxx11_atomic.h
cds/compiler/gcc/sparc/cxx11_atomic.h
cds/compiler/gcc/x86/cxx11_atomic.h
cds/compiler/vc/amd64/cxx11_atomic.h
cds/compiler/vc/x86/cxx11_atomic.h
cds/container/vyukov_mpmc_cycle_queue.h
cds/cxx11_atomic.h
cds/details/defs.h
cds/details/marked_ptr.h
cds/gc/hp_decl.h
cds/gc/hrc/details/hrc_retired.h
cds/gc/hrc/hrc.h
cds/gc/hrc_decl.h
cds/gc/hzp/details/hp_alloc.h
cds/gc/hzp/hzp.h
cds/gc/ptb/ptb.h
cds/gc/ptb_decl.h
cds/intrusive/basket_queue.h
cds/intrusive/cuckoo_set.h
cds/intrusive/details/dummy_node_holder.h
cds/intrusive/details/ellen_bintree_base.h
cds/intrusive/ellen_bintree_impl.h
cds/intrusive/ellen_bintree_rcu.h
cds/intrusive/lazy_list_base.h
cds/intrusive/lazy_list_hrc.h
cds/intrusive/lazy_list_nogc.h
cds/intrusive/lazy_list_rcu.h
cds/intrusive/michael_deque.h
cds/intrusive/michael_list_base.h
cds/intrusive/michael_list_hrc.h
cds/intrusive/michael_list_impl.h
cds/intrusive/michael_list_nogc.h
cds/intrusive/michael_list_rcu.h
cds/intrusive/moir_queue.h
cds/intrusive/msqueue.h
cds/intrusive/optimistic_queue.h
cds/intrusive/segmented_queue.h
cds/intrusive/single_link_struct.h
cds/intrusive/skip_list_base.h
cds/intrusive/skip_list_hrc.h
cds/intrusive/skip_list_impl.h
cds/intrusive/skip_list_nogc.h
cds/intrusive/skip_list_rcu.h
cds/intrusive/split_list.h
cds/intrusive/split_list_base.h
cds/intrusive/split_list_nogc.h
cds/intrusive/split_list_rcu.h
cds/intrusive/striped_set/striping_policy.h
cds/intrusive/treiber_stack.h
cds/intrusive/tsigas_cycle_queue.h
cds/lock/spinlock.h
cds/memory/michael/allocator.h
cds/memory/michael/osalloc_stat.h
cds/memory/michael/procheap_stat.h
cds/opt/options.h
cds/refcounter.h
cds/threading/details/_common.h
cds/urcu/details/base.h
cds/urcu/details/gp.h
cds/urcu/details/gp_decl.h
cds/urcu/details/gpb.h
cds/urcu/details/gpi.h
cds/urcu/details/gpt.h
cds/urcu/details/sh.h
cds/urcu/details/sh_decl.h
cds/urcu/details/sig_buffered.h
cds/urcu/details/sig_threaded.h
src/hrc_gc.cpp
src/hzp_gc.cpp
src/init.cpp
src/ptb_gc.cpp
tests/cppunit/thread.h
tests/test-hdr/misc/cxx11_atomic_class.cpp
tests/test-hdr/misc/cxx11_atomic_func.cpp
tests/test-hdr/misc/cxx11_convert_memory_order.h
tests/unit/map2/map_delodd.cpp
tests/unit/map2/map_insdel_func.cpp
tests/unit/queue/intrusive_queue_reader_writer.cpp
tests/unit/queue/queue_reader_writer.cpp
tests/unit/set2/set_delodd.cpp
tests/unit/set2/set_insdel_func.h
tests/unit/stack/stack_intrusive_pushpop.cpp
tests/unit/stack/stack_pushpop.cpp

index 9663d39d427d7fd8021b7a3d30a3b5ac9f264cc6..ccd7f5854bbcd1bb571e305b72d58ae5a075fb5e 100644 (file)
@@ -93,10 +93,10 @@ namespace cds { namespace algo {
             Each data structure based on flat combining contains a class derived from \p %publication_record
         */
         struct publication_record {
-            CDS_ATOMIC::atomic<unsigned int>    nRequest;   ///< Request field (depends on data structure)
-            CDS_ATOMIC::atomic<unsigned int>    nState;     ///< Record state: inactive, active, removed
+            atomics::atomic<unsigned int>    nRequest;   ///< Request field (depends on data structure)
+            atomics::atomic<unsigned int>    nState;     ///< Record state: inactive, active, removed
             unsigned int                        nAge;       ///< Age of the record
-            CDS_ATOMIC::atomic<publication_record *> pNext; ///< Next record in publication list
+            atomics::atomic<publication_record *> pNext; ///< Next record in publication list
             void *                              pOwner;    ///< [internal data] Pointer to \ref kernel object that manages the publication list
 
             /// Initializes publication record
@@ -111,13 +111,13 @@ namespace cds { namespace algo {
             /// Returns the value of \p nRequest field
             unsigned int op() const
             {
-                return nRequest.load( CDS_ATOMIC::memory_order_relaxed );
+                return nRequest.load( atomics::memory_order_relaxed );
             }
 
             /// Checks if the operation is done
             bool is_done() const
             {
-                return nRequest.load( CDS_ATOMIC::memory_order_relaxed ) == req_Response;
+                return nRequest.load( atomics::memory_order_relaxed ) == req_Response;
             }
         };
 
@@ -543,7 +543,7 @@ namespace cds { namespace algo {
                     if ( pRec->nState.load(memory_model::memory_order_relaxed) == active && pRec->pOwner ) {
                         // record is active and kernel is alive
                         unsigned int nState = active;
-                        pRec->nState.compare_exchange_strong( nState, removed, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                        pRec->nState.compare_exchange_strong( nState, removed, memory_model::memory_order_release, atomics::memory_order_relaxed );
                     }
                     else {
                         // record is not in publication list or kernel already deleted
@@ -577,7 +577,7 @@ namespace cds { namespace algo {
                             pRec->pNext = p;
                             // Failed CAS changes p
                         } while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast<publication_record *>(pRec),
-                            memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                            memory_model::memory_order_release, atomics::memory_order_relaxed ));
                         m_Stat.onActivatPubRecord();
                     }
                 }
@@ -746,7 +746,7 @@ namespace cds { namespace algo {
                         if ( pPrev ) {
                             publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire );
                             if ( pPrev->pNext.compare_exchange_strong( p, pNext,
-                                memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                                memory_model::memory_order_release, atomics::memory_order_relaxed ))
                             {
                                 p->nState.store( inactive, memory_model::memory_order_release );
                                 p = pNext;
@@ -767,7 +767,7 @@ namespace cds { namespace algo {
                 if ( pPrev ) {
                     publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire );
                     if ( pPrev->pNext.compare_exchange_strong( p, pNext,
-                        memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                        memory_model::memory_order_release, atomics::memory_order_relaxed ))
                     {
                         cxx11_allocator().Delete( static_cast<publication_record_type *>( p ));
                         m_Stat.onDeletePubRecord();
index 96ab439917f70db610c26b3c8d5cb7d9c30eb5ee..9ad958a6eaa13255b7e4f700f81c006da4f674e9 100644 (file)
@@ -7,7 +7,7 @@
 #include <cds/details/defs.h>
 #include <cds/details/aligned_type.h>
 
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
     typedef enum memory_order {
         memory_order_relaxed,
         memory_order_consume,
@@ -17,7 +17,7 @@ namespace cds { namespace cxx11_atomics {
         memory_order_seq_cst
     } memory_order;
 
-}}  // namespace cds::cxx11_atomics
+}}  // namespace cds::cxx11_atomic
 
 
 #if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS)
@@ -51,7 +51,7 @@ namespace cds { namespace cxx11_atomics {
 // In C++11, make_unsigned is declared in <type_traits>
 #include <boost/type_traits/make_unsigned.hpp>  // for make_unsigned
 
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
 
     // forward declarations
     template <class T>
@@ -2271,7 +2271,7 @@ namespace cds { namespace cxx11_atomics {
         platform::signal_fence( order );
     }
 
-}}  // namespace cds::cxx11_atomics
+}}  // namespace cds::cxx11_atomic
 
 //@endcond
 #endif // #ifndef __CDS_COMPILER_CXX11_ATOMIC_H
index b79befcc11e367803eed24c823b00d46404abf6b..fc701e8f6e2316a1bbcf127e60a4ddf183e5770f 100644 (file)
@@ -7,11 +7,11 @@
 #include <cds/compiler/gcc/x86/cxx11_atomic32.h>
 
 //@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
     namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace amd64 {
 #   ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT
         // primitives up to 32bit + fences
-        using namespace cds::cxx11_atomics::platform::gcc::x86;
+        using namespace cds::cxx11_atomic::platform::gcc::x86;
 #   endif
 
         //-----------------------------------------------------------------------------
@@ -201,7 +201,7 @@ namespace cds { namespace cxx11_atomics {
 #endif
     }   // namespace platform
 
-}}  // namespace cds::cxx11_atomics
+}}  // namespace cds::cxx11_atomic
 //@endcond
 
 #endif // #ifndef __CDS_COMPILER_GCC_AMD64_CXX11_ATOMIC_H
index 12582c2d987dc57fa3bc228818e855655fdadaae..64867a33f50039452dd163b4d3a56b91e134701c 100644 (file)
@@ -12,7 +12,7 @@
 #include <cstdint>
 
 //@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
     namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace ia64 {
 
         static inline void itanium_full_fence() CDS_NOEXCEPT
@@ -647,7 +647,7 @@ namespace cds { namespace cxx11_atomics {
     using namespace gcc::ia64;
 #endif
     }   // namespace platform
-}}  // namespace cds::cxx11_atomics
+}}  // namespace cds::cxx11_atomic
 //@endcond
 
 #endif // #ifndef __CDS_COMPILER_GCC_IA64_CXX11_ATOMIC_H
index f160e9097679006af17d6ef88ef27570ea7b41b2..771540d2fdf6340403fa540e694abf8e4f5188fe 100644 (file)
@@ -40,7 +40,7 @@
 #define CDS_SPARC_MB_SEQ_CST    CDS_SPARC_MB_FULL
 
 //@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
     namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace Sparc {
 
         static inline void fence_before( memory_order order ) CDS_NOEXCEPT
@@ -596,7 +596,7 @@ namespace cds { namespace cxx11_atomics {
     using namespace gcc::Sparc;
 #endif
     }   // namespace platform
-}}  // namespace cds::cxx11_atomics
+}}  // namespace cds::cxx11_atomic
 //@endcond
 
 #undef CDS_SPARC_MB_ACQ
index 52fc73983d4267e6e6583b1d179ece94e602e1d1..6323291a1acc152a476ae619ab7820a9cdb38be7 100644 (file)
@@ -7,7 +7,7 @@
 #include <cds/compiler/gcc/x86/cxx11_atomic32.h>
 
 //@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
     namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace x86 {
 
         //-----------------------------------------------------------------------------
@@ -178,7 +178,7 @@ namespace cds { namespace cxx11_atomics {
         using namespace gcc::x86;
 #endif
     }   // namespace platform
-}}  // namespace cds::cxx11_atomics
+}}  // namespace cds::cxx11_atomic
 //@endcond
 
 #endif // #ifndef __CDS_COMPILER_GCC_X86_CXX11_ATOMIC_H
index 29d8b8593444eabd6741d916ddbd7969da784dfd..9115c2c94127fdf745650c287407918a5cf418ca 100644 (file)
@@ -32,7 +32,7 @@
 #endif
 
 //@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
     namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace vc { CDS_CXX11_INLINE_NAMESPACE namespace amd64 {
 
         static inline void fence_before( memory_order order ) CDS_NOEXCEPT
@@ -578,7 +578,7 @@ namespace cds { namespace cxx11_atomics {
     using namespace vc::amd64;
 #endif
     } // namespace platform
-}}  // namespace cds::cxx11_atomics
+}}  // namespace cds::cxx11_atomic
 //@endcond
 
 #endif // #ifndef __CDS_COMPILER_VC_AMD64_CXX11_ATOMIC_H
index b11d50f75fadfa2a568dc0aec6a8a7d0a20c57c5..fbaea36685104ce952fa2c36cb06feae35062642 100644 (file)
@@ -27,7 +27,7 @@
 #endif
 
 //@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
     namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace vc { CDS_CXX11_INLINE_NAMESPACE namespace x86 {
 
         static inline void fence_before( memory_order order ) CDS_NOEXCEPT
@@ -550,7 +550,7 @@ namespace cds { namespace cxx11_atomics {
     using namespace vc::x86;
 #endif
     } // namespace platform
-}}  // namespace cds::cxx11_atomics
+}}  // namespace cds::cxx11_atomic
 //@endcond
 
 #endif // #ifndef __CDS_COMPILER_VC_X86_CXX11_ATOMIC_H
index 0b871d13abf276107124872db8970272b0a657e3..8f5f8e57c2877becac787bc8a8f8d20c5e84c7a6 100644 (file)
@@ -98,7 +98,7 @@ namespace cds { namespace container {
 
     protected:
         //@cond
-        typedef CDS_ATOMIC::atomic<size_t> sequence_type;
+        typedef atomics::atomic<size_t> sequence_type;
         struct cell_type
         {
             sequence_type   sequence;
index 8508fd8d8fcbbf77ea1b0c6b0687e3a886b60f68..5eb5e48218542bcdc0dcb3ebf54c59ae99d5555a 100644 (file)
@@ -38,8 +38,8 @@ namespace cds {
     Using \p CDS_ATOMIC macro you may call <tt>\<atomic\></tt> library functions and classes,
     for example:
     \code
-    CDS_ATOMIC::atomic<int> atomInt;
-    CDS_ATOMIC::atomic_store_explicit( &atomInt, 0, CDS_ATOMIC::memory_order_release );
+    atomics::atomic<int> atomInt;
+    atomics::atomic_store_explicit( &atomInt, 0, atomics::memory_order_release );
     \endcode
 
     \par Microsoft Visual C++
@@ -86,8 +86,8 @@ namespace cds {
     You can compile \p libcds and your projects with <tt>boost.atomic</tt> specifying \p -DCDS_USE_BOOST_ATOMIC
     in compiler's command line.
 */
-namespace cxx11_atomics {
-}} // namespace cds::cxx11_atomics
+namespace cxx11_atomic {
+}} // namespace cds::cxx11_atomic
 
 //@cond
 #if defined(CDS_USE_BOOST_ATOMIC)
@@ -95,7 +95,7 @@ namespace cxx11_atomics {
 #   include <boost/version.hpp>
 #   if BOOST_VERSION >= 105400
 #       include <boost/atomic.hpp>
-#       define CDS_ATOMIC boost
+        namespace atomics = boost;
 #       define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace boost {
 #       define CDS_CXX11_ATOMIC_END_NAMESPACE }
 #   else
@@ -104,13 +104,13 @@ namespace cxx11_atomics {
 #elif defined(CDS_USE_LIBCDS_ATOMIC)
     // libcds atomic
 #   include <cds/compiler/cxx11_atomic.h>
-#   define CDS_ATOMIC cds::cxx11_atomics
-#   define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace cds { namespace cxx11_atomics {
+    namespace atomics = cds::cxx11_atomic;
+#   define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace cds { namespace cxx11_atomic {
 #   define CDS_CXX11_ATOMIC_END_NAMESPACE }}
 #else
     // Compiler provided C++11 atomic
 #   include <atomic>
-#   define CDS_ATOMIC std
+    namespace atomics = std;
 #   define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace std {
 #   define CDS_CXX11_ATOMIC_END_NAMESPACE }
 #endif
@@ -132,7 +132,7 @@ namespace cds {
         class event_counter
         {
             //@cond
-            CDS_ATOMIC::atomic_size_t   m_counter;
+            atomics::atomic_size_t   m_counter;
             //@endcond
 
         public:
@@ -152,7 +152,7 @@ namespace cds {
                 value_type n    //< new value of the counter
             ) CDS_NOEXCEPT
             {
-                m_counter.exchange( n, CDS_ATOMIC::memory_order_relaxed );
+                m_counter.exchange( n, atomics::memory_order_relaxed );
                 return n;
             }
 
@@ -164,7 +164,7 @@ namespace cds {
                 size_t n    ///< addendum
             ) CDS_NOEXCEPT
             {
-                return m_counter.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ) + n;
+                return m_counter.fetch_add( n, atomics::memory_order_relaxed ) + n;
             }
 
             /// Substraction
@@ -175,47 +175,47 @@ namespace cds {
                 size_t n    ///< subtrahend
             ) CDS_NOEXCEPT
             {
-                return m_counter.fetch_sub( n, CDS_ATOMIC::memory_order_relaxed ) - n;
+                return m_counter.fetch_sub( n, atomics::memory_order_relaxed ) - n;
             }
 
             /// Get current value of the counter
             operator size_t () const CDS_NOEXCEPT
             {
-                return m_counter.load( CDS_ATOMIC::memory_order_relaxed );
+                return m_counter.load( atomics::memory_order_relaxed );
             }
 
             /// Preincrement
             size_t operator ++() CDS_NOEXCEPT
             {
-                return m_counter.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ) + 1;
+                return m_counter.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
             }
             /// Postincrement
             size_t operator ++(int) CDS_NOEXCEPT
             {
-                return m_counter.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+                return m_counter.fetch_add( 1, atomics::memory_order_relaxed );
             }
 
             /// Predecrement
             size_t operator --() CDS_NOEXCEPT
             {
-                return m_counter.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ) - 1;
+                return m_counter.fetch_sub( 1, atomics::memory_order_relaxed ) - 1;
             }
             /// Postdecrement
             size_t operator --(int) CDS_NOEXCEPT
             {
-                return m_counter.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed );
+                return m_counter.fetch_sub( 1, atomics::memory_order_relaxed );
             }
 
             /// Get current value of the counter
             size_t get() const CDS_NOEXCEPT
             {
-                return m_counter.load( CDS_ATOMIC::memory_order_relaxed );
+                return m_counter.load( atomics::memory_order_relaxed );
             }
 
             /// Resets the counter to 0
             void reset() CDS_NOEXCEPT
             {
-                m_counter.store( 0, CDS_ATOMIC::memory_order_release );
+                m_counter.store( 0, atomics::memory_order_release );
             }
 
         };
@@ -228,7 +228,7 @@ namespace cds {
         class item_counter
         {
         public:
-            typedef CDS_ATOMIC::atomic_size_t   atomic_type ;   ///< atomic type used
+            typedef atomics::atomic_size_t   atomic_type ;   ///< atomic type used
             typedef size_t counter_type    ;   ///< Integral item counter type (size_t)
 
         private:
@@ -243,7 +243,7 @@ namespace cds {
             {}
 
             /// Returns current value of the counter
-            counter_type    value(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed) const
+            counter_type    value(atomics::memory_order order = atomics::memory_order_relaxed) const
             {
                 return m_Counter.load( order );
             }
@@ -267,13 +267,13 @@ namespace cds {
             }
 
             /// Increments the counter. Semantics: postincrement
-            counter_type inc(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed )
+            counter_type inc(atomics::memory_order order = atomics::memory_order_relaxed )
             {
                 return m_Counter.fetch_add( 1, order );
             }
 
             /// Decrements the counter. Semantics: postdecrement
-            counter_type dec(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed)
+            counter_type dec(atomics::memory_order order = atomics::memory_order_relaxed)
             {
                 return m_Counter.fetch_sub( 1, order );
             }
@@ -301,7 +301,7 @@ namespace cds {
             }
 
             /// Resets count to 0
-            void reset(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed)
+            void reset(atomics::memory_order order = atomics::memory_order_relaxed)
             {
                 m_Counter.store( 0, order );
             }
@@ -320,7 +320,7 @@ namespace cds {
             typedef size_t counter_type    ;  ///< Counter type
         public:
             /// Returns 0
-            counter_type    value(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed) const
+            counter_type    value(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) const
             {
                 return 0;
             }
@@ -332,13 +332,13 @@ namespace cds {
             }
 
             /// Dummy increment. Always returns 0
-            size_t inc(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed)
+            size_t inc(atomics::memory_order /*order*/ = atomics::memory_order_relaxed)
             {
                 return 0;
             }
 
             /// Dummy increment. Always returns 0
-            size_t dec(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed)
+            size_t dec(atomics::memory_order /*order*/ = atomics::memory_order_relaxed)
             {
                 return 0;
             }
@@ -366,7 +366,7 @@ namespace cds {
             }
 
             /// Dummy function
-            void reset(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed)
+            void reset(atomics::memory_order /*order*/ = atomics::memory_order_relaxed)
             {}
         };
 
index 7260b893080f2f8d157989a3f815a0c6c0fd22fe..b74820f9e7fb39ecb306acd008ad22faac29deac 100644 (file)
@@ -34,7 +34,7 @@
    schema used. However, any implementation supports common interface for the type of data structure.
 
    To implement any lock-free data structure, two things are needed:
-   - atomic operation library conforming with C++11 memory model. The <b>libcds</b> has such feature, see cds::cxx11_atomics namespace for
+   - atomic operation library conforming with C++11 memory model. The <b>libcds</b> has such feature, see cds::cxx11_atomic namespace for
      details and compiler-specific information.
    - safe memory reclamation (SMR) or garbage collecting (GC) algorithm. The <b>libcds</b> has an implementation of several
      well-known SMR algos, see below.
index 24eed5d3b3d002404f29b47d4dfa8a4057561316..b656f0204dde1216773aa91e5507701f22b6718e 100644 (file)
@@ -257,7 +257,7 @@ CDS_CXX11_ATOMIC_BEGIN_NAMESPACE
     {
     private:
         typedef cds::details::marked_ptr<T, Bitmask> marked_ptr;
-        typedef CDS_ATOMIC::atomic<T *>  atomic_impl;
+        typedef atomics::atomic<T *>  atomic_impl;
 
         atomic_impl m_atomic;
     public:
index 6c00f4c542ed7e626de6e05443b03633da0f8672..d2c98b7401cfeec87b3bd74691b47e8cb16319df 100644 (file)
@@ -33,24 +33,24 @@ namespace cds { namespace gc {
         /**
             @headerfile cds/gc/hp.h
         */
-        template <typename T> using atomic_ref = CDS_ATOMIC::atomic<T *>;
+        template <typename T> using atomic_ref = atomics::atomic<T *>;
 
         /// Atomic marked pointer
         /**
             @headerfile cds/gc/hp.h
         */
-        template <typename MarkedPtr> using atomic_marked_ptr = CDS_ATOMIC::atomic<MarkedPtr>;
+        template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
 
         /// Atomic type
         /**
             @headerfile cds/gc/hp.h
         */
-        template <typename T> using atomic_type = CDS_ATOMIC::atomic<T>;
+        template <typename T> using atomic_type = atomics::atomic<T>;
 #else
         template <typename T>
-        class atomic_ref: public CDS_ATOMIC::atomic<T *>
+        class atomic_ref: public atomics::atomic<T *>
         {
-            typedef CDS_ATOMIC::atomic<T *> base_class;
+            typedef atomics::atomic<T *> base_class;
         public:
 #   ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
             atomic_ref() = default;
@@ -65,9 +65,9 @@ namespace cds { namespace gc {
         };
 
         template <typename T>
-        class atomic_type: public CDS_ATOMIC::atomic<T>
+        class atomic_type: public atomics::atomic<T>
         {
-            typedef CDS_ATOMIC::atomic<T> base_class;
+            typedef atomics::atomic<T> base_class;
         public:
 #   ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
             atomic_type() = default;
@@ -82,9 +82,9 @@ namespace cds { namespace gc {
         };
 
         template <typename MarkedPtr>
-        class atomic_marked_ptr: public CDS_ATOMIC::atomic<MarkedPtr>
+        class atomic_marked_ptr: public atomics::atomic<MarkedPtr>
         {
-            typedef CDS_ATOMIC::atomic<MarkedPtr> base_class;
+            typedef atomics::atomic<MarkedPtr> base_class;
         public:
 #   ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
             atomic_marked_ptr() CDS_NOEXCEPT_DEFAULTED_( noexcept(base_class()) ) = default;
@@ -170,13 +170,13 @@ namespace cds { namespace gc {
                 to the HP slot repeatedly until the guard's value equals \p toGuard
             */
             template <typename T>
-            T protect( CDS_ATOMIC::atomic<T> const& toGuard )
+            T protect( atomics::atomic<T> const& toGuard )
             {
-                T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+                T pCur = toGuard.load(atomics::memory_order_relaxed);
                 T pRet;
                 do {
                     pRet = assign( pCur );
-                    pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+                    pCur = toGuard.load(atomics::memory_order_acquire);
                 } while ( pRet != pCur );
                 return pCur;
             }
@@ -199,14 +199,14 @@ namespace cds { namespace gc {
                 Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
             */
             template <typename T, class Func>
-            T protect( CDS_ATOMIC::atomic<T> const& toGuard, Func f )
+            T protect( atomics::atomic<T> const& toGuard, Func f )
             {
-                T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+                T pCur = toGuard.load(atomics::memory_order_relaxed);
                 T pRet;
                 do {
                     pRet = pCur;
                     assign( f( pCur ) );
-                    pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+                    pCur = toGuard.load(atomics::memory_order_acquire);
                 } while ( pRet != pCur );
                 return pCur;
             }
@@ -297,12 +297,12 @@ namespace cds { namespace gc {
                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
             */
             template <typename T>
-            T protect(size_t nIndex, CDS_ATOMIC::atomic<T> const& toGuard )
+            T protect(size_t nIndex, atomics::atomic<T> const& toGuard )
             {
                 T pRet;
                 do {
-                    pRet = assign( nIndex, toGuard.load(CDS_ATOMIC::memory_order_acquire) );
-                } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_relaxed));
+                    pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire) );
+                } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
 
                 return pRet;
             }
@@ -325,12 +325,12 @@ namespace cds { namespace gc {
                 Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
             */
             template <typename T, class Func>
-            T protect(size_t nIndex, CDS_ATOMIC::atomic<T> const& toGuard, Func f )
+            T protect(size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
             {
                 T pRet;
                 do {
-                    assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_acquire) ));
-                } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_relaxed));
+                    assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire) ));
+                } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
 
                 return pRet;
             }
index cee87104ae5d720853473e396b1c4ee700e9a232..aa2adcd412531d46c60054cceadb5771c9ecfe33 100644 (file)
@@ -16,11 +16,11 @@ namespace cds { namespace gc { namespace hrc {
 
         /// Retired node descriptor
         struct retired_node {
-            CDS_ATOMIC::atomic<ContainerNode *> m_pNode        ;    ///< node to destroy
+            atomics::atomic<ContainerNode *> m_pNode        ;    ///< node to destroy
             free_retired_ptr_func               m_funcFree     ;    ///< pointer to the destructor function
             size_t                              m_nNextFree    ;    ///< Next free item in retired array
-            CDS_ATOMIC::atomic<unsigned int>    m_nClaim       ;    ///< Access to reclaimed node
-            CDS_ATOMIC::atomic<bool>            m_bDone        ;    ///< the record is in work (concurrent access flag)
+            atomics::atomic<unsigned int>    m_nClaim       ;    ///< Access to reclaimed node
+            atomics::atomic<bool>            m_bDone        ;    ///< the record is in work (concurrent access flag)
 
             /// Default ctor
             retired_node()
@@ -45,16 +45,16 @@ namespace cds { namespace gc { namespace hrc {
             /// Compares two \ref retired_node
             static bool Less( const retired_node& p1, const retired_node& p2 )
             {
-                return p1.m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) < p2.m_pNode.load( CDS_ATOMIC::memory_order_relaxed );
+                return p1.m_pNode.load( atomics::memory_order_relaxed ) < p2.m_pNode.load( atomics::memory_order_relaxed );
             }
 
             /// Assignment operator
             retired_node& set( ContainerNode * pNode, free_retired_ptr_func func )
             {
-                m_bDone.store( false, CDS_ATOMIC::memory_order_relaxed );
-                m_nClaim.store( 0, CDS_ATOMIC::memory_order_relaxed );
+                m_bDone.store( false, atomics::memory_order_relaxed );
+                m_nClaim.store( 0, atomics::memory_order_relaxed );
                 m_funcFree = func;
-                m_pNode.store( pNode, CDS_ATOMIC::memory_order_release );
+                m_pNode.store( pNode, atomics::memory_order_release );
                 CDS_COMPILER_RW_BARRIER;
                 return *this;
             }
@@ -63,7 +63,7 @@ namespace cds { namespace gc { namespace hrc {
             void free()
             {
                 assert( m_funcFree != nullptr );
-                m_funcFree( m_pNode.load( CDS_ATOMIC::memory_order_relaxed ));
+                m_funcFree( m_pNode.load( atomics::memory_order_relaxed ));
             }
         };
 
@@ -116,7 +116,7 @@ namespace cds { namespace gc { namespace hrc {
                 size_t nCount = 0;
                 const size_t nCapacity = capacity();
                 for ( size_t i = 0; i < nCapacity; ++i ) {
-                    if ( m_arr[i].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) != nullptr )
+                    if ( m_arr[i].m_pNode.load( atomics::memory_order_relaxed ) != nullptr )
                         ++nCount;
                 }
                 return nCount;
@@ -128,7 +128,7 @@ namespace cds { namespace gc { namespace hrc {
                 assert( !isFull());
 
                 size_t n = m_nFreeList;
-                assert( m_arr[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+                assert( m_arr[n].m_pNode.load( atomics::memory_order_relaxed ) == nullptr );
                 m_nFreeList = m_arr[n].m_nNextFree;
                 CDS_DEBUG_DO( m_arr[n].m_nNextFree = m_nEndFreeList ; )
                 m_arr[n].set( p, pFunc );
@@ -138,7 +138,7 @@ namespace cds { namespace gc { namespace hrc {
             void pop( size_t n )
             {
                 assert( n < capacity() );
-                m_arr[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_release );
+                m_arr[n].m_pNode.store( nullptr, atomics::memory_order_release );
                 m_arr[n].m_nNextFree = m_nFreeList;
                 m_nFreeList = n;
             }
index 068adecd239e9b627d4686f65b4b3698888b8a89..a5e45e51e244c90b2f8cd572b8165ff1ddb3e905 100644 (file)
@@ -21,7 +21,7 @@
 
 #if CDS_COMPILER == CDS_COMPILER_MSVC
 #   pragma warning(push)
-// warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomics::atomic<T>'
+// warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomic::atomic<T>'
 // needs to have dll-interface to be used by clients of class 'cds::gc::hzp::GarbageCollector'
 #   pragma warning(disable: 4251)
 #endif
@@ -75,8 +75,8 @@ namespace cds { namespace gc {
             friend class gc::HRC;
 
             unsigned_ref_counter        m_RC        ;    ///< reference counter
-            CDS_ATOMIC::atomic<bool>    m_bTrace    ;    ///< \p true - node is tracing by Scan
-            CDS_ATOMIC::atomic<bool>    m_bDeleted  ;    ///< \p true - node is deleted
+            atomics::atomic<bool>    m_bTrace    ;    ///< \p true - node is tracing by Scan
+            atomics::atomic<bool>    m_bDeleted  ;    ///< \p true - node is deleted
 
         protected:
             //@cond
@@ -106,12 +106,12 @@ namespace cds { namespace gc {
             /// Returns the mark whether the node is deleted
             bool            isDeleted() const CDS_NOEXCEPT
             {
-                return m_bDeleted.load( CDS_ATOMIC::memory_order_acquire );
+                return m_bDeleted.load( atomics::memory_order_acquire );
             }
 
         protected:
             //@cond
-            void clean( CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT
+            void clean( atomics::memory_order order ) CDS_NOEXCEPT
             {
                 m_bDeleted.store( false, order );
                 m_bTrace.store( false, order );
@@ -294,7 +294,7 @@ namespace cds { namespace gc {
             {
                 thread_list_node *  m_pNext     ; ///< next list record
                 ThreadGC *          m_pOwner    ; ///< Owner of record
-                CDS_ATOMIC::atomic<cds::OS::ThreadId>   m_idOwner   ; ///< Id of thread owned; 0 - record is free
+                atomics::atomic<cds::OS::ThreadId>   m_idOwner   ; ///< Id of thread owned; 0 - record is free
                 bool                m_bFree        ; ///< Node is help-scanned
 
                 //@cond
@@ -309,13 +309,13 @@ namespace cds { namespace gc {
                 ~thread_list_node()
                 {
                     assert( m_pOwner == nullptr );
-                    assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == cds::OS::c_NullThreadId );
+                    assert( m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::c_NullThreadId );
                 }
                 //@endcond
             };
 
         private:
-            CDS_ATOMIC::atomic<thread_list_node *> m_pListHead  ;  ///< Head of thread list
+            atomics::atomic<thread_list_node *> m_pListHead  ;  ///< Head of thread list
 
             static GarbageCollector *    m_pGC    ;    ///< HRC garbage collector instance
 
@@ -545,7 +545,7 @@ namespace cds { namespace gc {
             /// Retire (deferred delete) node \p pNode guarded by \p hp hazard pointer
             void retireNode( ContainerNode * pNode, details::HPGuard& hp, details::free_retired_ptr_func pFunc )
             {
-                assert( !pNode->m_bDeleted.load( CDS_ATOMIC::memory_order_relaxed ) );
+                assert( !pNode->m_bDeleted.load( atomics::memory_order_relaxed ) );
                 assert( pNode == hp );
 
                 retireNode( pNode, pFunc );
@@ -555,10 +555,10 @@ namespace cds { namespace gc {
             /// Retire (deferred delete) node \p pNode. Do not use this function directly!
             void retireNode( ContainerNode * pNode, details::free_retired_ptr_func pFunc )
             {
-                assert( !pNode->m_bDeleted.load( CDS_ATOMIC::memory_order_relaxed ) );
+                assert( !pNode->m_bDeleted.load( atomics::memory_order_relaxed ) );
 
-                pNode->m_bDeleted.store( true, CDS_ATOMIC::memory_order_release );
-                pNode->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+                pNode->m_bDeleted.store( true, atomics::memory_order_release );
+                pNode->m_bTrace.store( false, atomics::memory_order_release );
 
                 m_pDesc->m_arrRetired.push( pNode, pFunc );
 
@@ -580,8 +580,8 @@ namespace cds { namespace gc {
                 details::retired_vector::iterator itEnd = m_pDesc->m_arrRetired.end();
                 for ( details::retired_vector::iterator it = m_pDesc->m_arrRetired.begin(); it != itEnd; ++it ) {
                     details::retired_node& node = *it;
-                    ContainerNode * pNode = node.m_pNode.load(CDS_ATOMIC::memory_order_acquire);
-                    if ( pNode && !node.m_bDone.load(CDS_ATOMIC::memory_order_acquire) )
+                    ContainerNode * pNode = node.m_pNode.load(atomics::memory_order_acquire);
+                    if ( pNode && !node.m_bDone.load(atomics::memory_order_acquire) )
                         pNode->cleanUp( this );
                 }
             }
index 8c1c62606c23f60b40340f8482bae6ca9901e588..bd33362bcf39c9b14b8ef8098fc5ebfc0317f144 100644 (file)
@@ -138,10 +138,10 @@ namespace cds { namespace gc {
             @headerfile cds/gc/hrc.h
         */
         template <typename T>
-        class atomic_ref: protected CDS_ATOMIC::atomic<T *>
+        class atomic_ref: protected atomics::atomic<T *>
         {
             //@cond
-            typedef CDS_ATOMIC::atomic<T *> base_class;
+            typedef atomics::atomic<T *> base_class;
             //@endcond
         public:
             //@cond
@@ -158,26 +158,26 @@ namespace cds { namespace gc {
             //@endcond
 
             /// Read reference value
-            T * load( CDS_ATOMIC::memory_order order ) const CDS_NOEXCEPT
+            T * load( atomics::memory_order order ) const CDS_NOEXCEPT
             {
                 return base_class::load( order );
             }
             //@cond
-            T * load( CDS_ATOMIC::memory_order order ) const volatile CDS_NOEXCEPT
+            T * load( atomics::memory_order order ) const volatile CDS_NOEXCEPT
             {
                 return base_class::load( order );
             }
             //@endcond
 
             /// Store new value to reference
-            void store( T * pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT
+            void store( T * pNew, atomics::memory_order order ) CDS_NOEXCEPT
             {
                 before_store( pNew );
                 T * pOld = base_class::exchange( pNew, order );
                 after_store( pOld, pNew );
             }
             //@cond
-            void store( T * pNew, CDS_ATOMIC::memory_order order ) volatile CDS_NOEXCEPT
+            void store( T * pNew, atomics::memory_order order ) volatile CDS_NOEXCEPT
             {
                 before_store( pNew );
                 T * pOld = base_class::exchange( pNew, order );
@@ -191,7 +191,7 @@ namespace cds { namespace gc {
 
                 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
             */
-            bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT
+            bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
             {
                 before_cas( pNew );
                 bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail );
@@ -199,20 +199,20 @@ namespace cds { namespace gc {
                 return bSuccess;
             }
             //@cond
-            bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) volatile CDS_NOEXCEPT
+            bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) volatile CDS_NOEXCEPT
             {
                 before_cas( pNew );
                 bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail );
                 after_cas( bSuccess, pOld, pNew );
                 return bSuccess;
             }
-            bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT
+            bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
             {
-                return compare_exchange_strong( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed );
+                return compare_exchange_strong( pOld, pNew, mo_success, atomics::memory_order_relaxed );
             }
-            bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) volatile CDS_NOEXCEPT
+            bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success ) volatile CDS_NOEXCEPT
             {
-                return compare_exchange_strong( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed );
+                return compare_exchange_strong( pOld, pNew, mo_success, atomics::memory_order_relaxed );
             }
             //@endcond
 
@@ -222,7 +222,7 @@ namespace cds { namespace gc {
 
                 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
             */
-            bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT
+            bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
             {
                 before_cas( pNew );
                 bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
@@ -230,20 +230,20 @@ namespace cds { namespace gc {
                 return bSuccess;
             }
             //@cond
-            bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) volatile CDS_NOEXCEPT
+            bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) volatile CDS_NOEXCEPT
             {
                 before_cas( pNew );
                 bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
                 after_cas( bSuccess, pOld, pNew );
                 return bSuccess;
             }
-            bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT
+            bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
             {
-                return compare_exchange_weak( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed );
+                return compare_exchange_weak( pOld, pNew, mo_success, atomics::memory_order_relaxed );
             }
-            bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) volatile CDS_NOEXCEPT
+            bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success ) volatile CDS_NOEXCEPT
             {
-                return compare_exchange_weak( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed );
+                return compare_exchange_weak( pOld, pNew, mo_success, atomics::memory_order_relaxed );
             }
             //@endcond
 
@@ -257,7 +257,7 @@ namespace cds { namespace gc {
             static void after_store( T * pOld, T * pNew ) CDS_NOEXCEPT
             {
                 if ( pNew )
-                    pNew->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+                    pNew->m_bTrace.store( false, atomics::memory_order_release );
                 if ( pOld )
                     --pOld->m_RC;
             }
@@ -265,7 +265,7 @@ namespace cds { namespace gc {
             {
                 if ( p ) {
                     ++p->m_RC;
-                    p->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+                    p->m_bTrace.store( false, atomics::memory_order_release );
                 }
             }
             static void after_cas( bool bSuccess, T * pOld, T * pNew ) CDS_NOEXCEPT
@@ -290,7 +290,7 @@ namespace cds { namespace gc {
         class atomic_marked_ptr
         {
             //@cond
-            CDS_ATOMIC::atomic< MarkedPtr >     m_a;
+            atomics::atomic< MarkedPtr >     m_a;
             //@endcond
         public:
             /// Marked pointer type
@@ -316,13 +316,13 @@ namespace cds { namespace gc {
 
 
             /// Read reference value
-            marked_ptr load(CDS_ATOMIC::memory_order order) const CDS_NOEXCEPT
+            marked_ptr load(atomics::memory_order order) const CDS_NOEXCEPT
             {
                 return m_a.load(order);
             }
 
             /// Store new value to reference
-            void store( marked_ptr pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT
+            void store( marked_ptr pNew, atomics::memory_order order ) CDS_NOEXCEPT
             {
                 before_store( pNew.ptr() );
                 marked_ptr pOld = m_a.exchange( pNew, order );
@@ -330,7 +330,7 @@ namespace cds { namespace gc {
             }
 
             /// Store new value to reference
-            void store( typename marked_ptr::pointer_type pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT
+            void store( typename marked_ptr::pointer_type pNew, atomics::memory_order order ) CDS_NOEXCEPT
             {
                 before_store( pNew );
                 marked_ptr pOld = m_a.exchange( marked_ptr(pNew), order );
@@ -343,7 +343,7 @@ namespace cds { namespace gc {
 
                 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
             */
-            bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT
+            bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
             {
                 before_cas( pNew.ptr() );
                 bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
@@ -351,7 +351,7 @@ namespace cds { namespace gc {
                 return bSuccess;
             }
             //@cond
-            bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT
+            bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
             {
                 before_cas( pNew.ptr() );
                 bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success );
@@ -366,7 +366,7 @@ namespace cds { namespace gc {
 
                 \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
             */
-            bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT
+            bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
             {
                 // protect pNew
                 before_cas( pNew.ptr() );
@@ -375,7 +375,7 @@ namespace cds { namespace gc {
                 return bSuccess;
             }
             //@cond
-            bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT
+            bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
             {
                 before_cas( pNew.ptr() );
                 bool bSuccess = m_a.compare_exchange_strong( pOld, pNew, mo_success );
@@ -394,7 +394,7 @@ namespace cds { namespace gc {
             static void after_store( typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT
             {
                 if ( pNew )
-                    pNew->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+                    pNew->m_bTrace.store( false, atomics::memory_order_release );
                 if ( pOld )
                     --pOld->m_RC;
             }
@@ -402,7 +402,7 @@ namespace cds { namespace gc {
             {
                 if ( p ) {
                     ++p->m_RC;
-                    p->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+                    p->m_bTrace.store( false, atomics::memory_order_release );
                 }
             }
             static void after_cas( bool bSuccess, typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT
@@ -444,11 +444,11 @@ namespace cds { namespace gc {
             template <typename T>
             T * protect( atomic_ref<T> const& toGuard )
             {
-                T * pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+                T * pCur = toGuard.load(atomics::memory_order_relaxed);
                 T * pRet;
                 do {
                     pRet = assign( pCur );
-                    pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+                    pCur = toGuard.load(atomics::memory_order_acquire);
                 } while ( pRet != pCur );
                 return pCur;
             }
@@ -473,12 +473,12 @@ namespace cds { namespace gc {
             template <typename T, class Func>
             T * protect( atomic_ref<T> const& toGuard, Func f )
             {
-                T * pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+                T * pCur = toGuard.load(atomics::memory_order_relaxed);
                 T * pRet;
                 do {
                     pRet = pCur;
                     assign( f( pCur ) );
-                    pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+                    pCur = toGuard.load(atomics::memory_order_acquire);
                 } while ( pRet != pCur );
                 return pCur;
             }
@@ -495,8 +495,8 @@ namespace cds { namespace gc {
             {
                 typename atomic_marked_ptr<T>::marked_ptr p;
                 do {
-                    assign( ( p = link.load(CDS_ATOMIC::memory_order_relaxed)).ptr() );
-                } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) );
+                    assign( ( p = link.load(atomics::memory_order_relaxed)).ptr() );
+                } while ( p != link.load(atomics::memory_order_acquire) );
                 return p;
             }
 
@@ -522,9 +522,9 @@ namespace cds { namespace gc {
             {
                 typename atomic_marked_ptr<T>::marked_ptr pCur;
                 do {
-                    pCur = link.load(CDS_ATOMIC::memory_order_relaxed);
+                    pCur = link.load(atomics::memory_order_relaxed);
                     assign( f( pCur ));
-                } while ( pCur != link.load(CDS_ATOMIC::memory_order_acquire) );
+                } while ( pCur != link.load(atomics::memory_order_acquire) );
                 return pCur;
             }
 
@@ -615,8 +615,8 @@ namespace cds { namespace gc {
             {
                 T * p;
                 do {
-                    p = assign( nIndex, link.load(CDS_ATOMIC::memory_order_relaxed) );
-                } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) );
+                    p = assign( nIndex, link.load(atomics::memory_order_relaxed) );
+                } while ( p != link.load(atomics::memory_order_acquire) );
                 return p;
             }
 
@@ -632,8 +632,8 @@ namespace cds { namespace gc {
             {
                 typename atomic_marked_ptr<T>::marked_ptr p;
                 do {
-                    assign( nIndex, ( p = link.load(CDS_ATOMIC::memory_order_relaxed)).ptr() );
-                } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) );
+                    assign( nIndex, ( p = link.load(atomics::memory_order_relaxed)).ptr() );
+                } while ( p != link.load(atomics::memory_order_acquire) );
                 return p;
             }
 
@@ -659,8 +659,8 @@ namespace cds { namespace gc {
             {
                 T * pRet;
                 do {
-                    assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_relaxed) ));
-                } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire));
+                    assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) ));
+                } while ( pRet != toGuard.load(atomics::memory_order_acquire));
 
                 return pRet;
             }
@@ -687,9 +687,9 @@ namespace cds { namespace gc {
             {
                 typename atomic_marked_ptr<T>::marked_ptr p;
                 do {
-                    p = link.load(CDS_ATOMIC::memory_order_relaxed);
+                    p = link.load(atomics::memory_order_relaxed);
                     assign( nIndex, f( p ) );
-                } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) );
+                } while ( p != link.load(atomics::memory_order_acquire) );
                 return p;
             }
 
index 40561fba0f491fd8d7fb9d7b27d9ab0a2e379365..ad69e3cf17e90527b4e5f37b52c63ac30c7521c2 100644 (file)
@@ -23,13 +23,13 @@ namespace cds {
                 \li HazardPointer - type of hazard pointer. It is \ref hazard_pointer for Michael's Hazard Pointer reclamation schema
         */
         template <typename HazardPointer>
-        class HPGuardT: protected CDS_ATOMIC::atomic<HazardPointer>
+        class HPGuardT: protected atomics::atomic<HazardPointer>
         {
         public:
             typedef HazardPointer   hazard_ptr ;    ///< Hazard pointer type
         private:
             //@cond
-            typedef CDS_ATOMIC::atomic<hazard_ptr>  base_class;
+            typedef atomics::atomic<hazard_ptr>  base_class;
             //@endcond
 
         protected:
@@ -52,7 +52,7 @@ namespace cds {
             T * operator =( T * p ) CDS_NOEXCEPT
             {
                 // We use atomic store with explicit memory order because other threads may read this hazard pointer concurrently
-                base_class::store( reinterpret_cast<hazard_ptr>(p), CDS_ATOMIC::memory_order_release );
+                base_class::store( reinterpret_cast<hazard_ptr>(p), atomics::memory_order_release );
                 return p;
             }
 
@@ -79,7 +79,7 @@ namespace cds {
             */
             hazard_ptr get() const CDS_NOEXCEPT
             {
-                return base_class::load( CDS_ATOMIC::memory_order_acquire );
+                return base_class::load( atomics::memory_order_acquire );
             }
 
             /// Clears HP
@@ -89,7 +89,7 @@ namespace cds {
             void clear() CDS_NOEXCEPT
             {
                 // memory order is not necessary here
-                base_class::store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+                base_class::store( nullptr, atomics::memory_order_relaxed );
                 //CDS_COMPILER_RW_BARRIER;
             }
         };
index 27d1cec76e01e080f34543a8075212132fcb93bb..92eb84d3d471ff0cf515dbb9e29c9fccc331954d 100644 (file)
@@ -16,7 +16,7 @@
 
 #if CDS_COMPILER == CDS_COMPILER_MSVC
 #   pragma warning(push)
-    // warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomics::atomic<T>'
+    // warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomic::atomic<T>'
     // needs to have dll-interface to be used by clients of class 'cds::gc::hzp::GarbageCollector'
 #   pragma warning(disable: 4251)
 #endif
@@ -208,8 +208,8 @@ namespace cds {
             struct hplist_node: public details::HPRec
             {
                 hplist_node *                       m_pNextNode ; ///< next hazard ptr record in list
-                CDS_ATOMIC::atomic<OS::ThreadId>    m_idOwner   ; ///< Owner thread id; 0 - the record is free (not owned)
-                CDS_ATOMIC::atomic<bool>            m_bFree     ; ///< true if record if free (not owned)
+                atomics::atomic<OS::ThreadId>    m_idOwner   ; ///< Owner thread id; 0 - the record is free (not owned)
+                atomics::atomic<bool>            m_bFree     ; ///< true if record if free (not owned)
 
                 //@cond
                 hplist_node( const GarbageCollector& HzpMgr )
@@ -221,13 +221,13 @@ namespace cds {
 
                 ~hplist_node()
                 {
-                    assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == OS::c_NullThreadId );
-                    assert( m_bFree.load(CDS_ATOMIC::memory_order_relaxed) );
+                    assert( m_idOwner.load( atomics::memory_order_relaxed ) == OS::c_NullThreadId );
+                    assert( m_bFree.load(atomics::memory_order_relaxed) );
                 }
                 //@endcond
             };
 
-            CDS_ATOMIC::atomic<hplist_node *>   m_pListHead  ;  ///< Head of GC list
+            atomics::atomic<hplist_node *>   m_pListHead  ;  ///< Head of GC list
 
             static GarbageCollector *    m_pHZPManager  ;   ///< GC instance pointer
 
index b1afc4ff916a29ce3ceca291cacde9d8b1a859ad..b4d716b22bb8ed26364b654bb1e264f7893a7285 100644 (file)
@@ -75,7 +75,7 @@ namespace cds { namespace gc {
                 typedef retired_ptr_node *      handoff_ptr ;   ///< trapped value type
                 typedef void *  guarded_ptr  ;   ///< type of value guarded
 
-                CDS_ATOMIC::atomic<guarded_ptr>         pPost   ;   ///< pointer guarded
+                atomics::atomic<guarded_ptr>         pPost   ;   ///< pointer guarded
 
 #if 0
                 typedef cds::SpinLock   handoff_spin ;  ///< type of spin-lock for accessing to \p pHandOff field
@@ -83,8 +83,8 @@ namespace cds { namespace gc {
                 handoff_ptr     pHandOff        ;   ///< trapped pointer
 #endif
 
-                CDS_ATOMIC::atomic<guard_data *>     pGlobalNext ;   ///< next item of global list of allocated guards
-                CDS_ATOMIC::atomic<guard_data *>     pNextFree   ;   ///< pointer to the next item in global or thread-local free-list
+                atomics::atomic<guard_data *>     pGlobalNext ;   ///< next item of global list of allocated guards
+                atomics::atomic<guard_data *>     pNextFree   ;   ///< pointer to the next item in global or thread-local free-list
 
                 guard_data *             pThreadNext ;   ///< next item of thread's local list of guards
 
@@ -101,14 +101,14 @@ namespace cds { namespace gc {
 
                 void init()
                 {
-                    pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+                    pPost.store( nullptr, atomics::memory_order_relaxed );
                 }
                 //@endcond
 
                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
                 bool isFree() const
                 {
-                    return pPost.load( CDS_ATOMIC::memory_order_acquire ) == nullptr;
+                    return pPost.load( atomics::memory_order_acquire ) == nullptr;
                 }
             };
 
@@ -118,8 +118,8 @@ namespace cds { namespace gc {
             {
                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
 
-                CDS_ATOMIC::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
-                CDS_ATOMIC::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
+                atomics::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
+                atomics::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
                 SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
 
                 /*
@@ -139,11 +139,11 @@ namespace cds { namespace gc {
                     // Link guard to the list
                     // m_GuardList is accumulated list and it cannot support concurrent deletion,
                     // so, ABA problem is impossible for it
-                    details::guard_data * pHead = m_GuardList.load( CDS_ATOMIC::memory_order_acquire );
+                    details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
                     do {
-                        pGuard->pGlobalNext.store( pHead, CDS_ATOMIC::memory_order_relaxed );
+                        pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
                         // pHead is changed by compare_exchange_weak
-                    } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                    } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
                     pGuard->init();
                     return pGuard;
@@ -160,8 +160,8 @@ namespace cds { namespace gc {
                 ~guard_allocator()
                 {
                     guard_data * pNext;
-                    for ( guard_data * pData = m_GuardList.load( CDS_ATOMIC::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
-                        pNext = pData->pGlobalNext.load( CDS_ATOMIC::memory_order_relaxed );
+                    for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
+                        pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
                         m_GuardAllocator.Delete( pData );
                     }
                 }
@@ -174,9 +174,9 @@ namespace cds { namespace gc {
 
                     {
                         cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
-                        pGuard = m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed);
+                        pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
                         if ( pGuard )
-                            m_FreeGuardList.store( pGuard->pNextFree.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
+                            m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
                     }
                     if ( !pGuard )
                         return allocNew();
@@ -191,11 +191,11 @@ namespace cds { namespace gc {
                 */
                 void free( guard_data * pGuard )
                 {
-                    pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+                    pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
 
                     cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
-                    pGuard->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
-                    m_FreeGuardList.store( pGuard, CDS_ATOMIC::memory_order_relaxed );
+                    pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
+                    m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
                 }
 
                 /// Allocates list of guard
@@ -218,11 +218,11 @@ namespace cds { namespace gc {
                     // so, we can use relaxed memory order
                     while ( --nCount ) {
                         guard_data * p = alloc();
-                        pLast->pNextFree.store( pLast->pThreadNext = p, CDS_ATOMIC::memory_order_relaxed );
+                        pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
                         pLast = p;
                     }
 
-                    pLast->pNextFree.store( pLast->pThreadNext = nullptr, CDS_ATOMIC::memory_order_relaxed );
+                    pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
 
                     return pHead;
                 }
@@ -239,21 +239,21 @@ namespace cds { namespace gc {
 
                     guard_data * pLast = pList;
                     while ( pLast->pThreadNext ) {
-                        pLast->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+                        pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
                         guard_data * p;
-                        pLast->pNextFree.store( p = pLast->pThreadNext, CDS_ATOMIC::memory_order_relaxed );
+                        pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
                         pLast = p;
                     }
 
                     cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
-                    pLast->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
-                    m_FreeGuardList.store( pList, CDS_ATOMIC::memory_order_relaxed );
+                    pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
+                    m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
                 }
 
                 /// Returns the list's head of guards allocated
                 guard_data * begin()
                 {
-                    return m_GuardList.load(CDS_ATOMIC::memory_order_acquire);
+                    return m_GuardList.load(atomics::memory_order_acquire);
                 }
             };
 
@@ -265,8 +265,8 @@ namespace cds { namespace gc {
             */
             class retired_ptr_buffer
             {
-                CDS_ATOMIC::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
-                CDS_ATOMIC::atomic<size_t>              m_nItemCount;   ///< buffer's item count
+                atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
+                atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
 
             public:
                 //@cond
@@ -277,20 +277,20 @@ namespace cds { namespace gc {
 
                 ~retired_ptr_buffer()
                 {
-                    assert( m_pHead.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+                    assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
                 }
                 //@endcond
 
                 /// Pushes new node into the buffer. Returns current buffer size
                 size_t push( retired_ptr_node& node )
                 {
-                    retired_ptr_node * pHead = m_pHead.load(CDS_ATOMIC::memory_order_acquire);
+                    retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
                     do {
                         node.m_pNext = pHead;
                         // pHead is changed by compare_exchange_weak
-                    } while ( !m_pHead.compare_exchange_weak( pHead, &node, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                    } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
-                    return m_nItemCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ) + 1;
+                    return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
                 }
 
                 /// Result of \ref ptb_gc_privatve "privatize" function.
@@ -305,18 +305,18 @@ namespace cds { namespace gc {
                 privatize_result privatize()
                 {
                     privatize_result res;
-                    res.first = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_acq_rel );
+                    res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
 
                     // Item counter is needed only as a threshold for liberate function
                     // So, we may clear the item counter without synchronization with m_pHead
-                    res.second = m_nItemCount.exchange( 0, CDS_ATOMIC::memory_order_relaxed );
+                    res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
                     return res;
                 }
 
                 /// Returns current size of buffer (approximate)
                 size_t size() const
                 {
-                    return m_nItemCount.load(CDS_ATOMIC::memory_order_relaxed);
+                    return m_nItemCount.load(atomics::memory_order_relaxed);
                 }
             };
 
@@ -339,13 +339,13 @@ namespace cds { namespace gc {
                     item        items[m_nItemPerBlock]  ;   ///< item array
                 };
 
-                CDS_ATOMIC::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
+                atomics::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
 
                 // To solve ABA problem we use epoch-based approach
                 static const unsigned int c_nEpochCount = 4     ;   ///< Max epoch count
-                CDS_ATOMIC::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
-                CDS_ATOMIC::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
-                CDS_ATOMIC::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
+                atomics::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
+                atomics::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
+                atomics::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
 
                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
 
@@ -365,30 +365,30 @@ namespace cds { namespace gc {
 
                     // link new block to block list
                     {
-                        block * pHead = m_pBlockListHead.load(CDS_ATOMIC::memory_order_acquire);
+                        block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
                         do {
                             pNew->pNext = pHead;
                             // pHead is changed by compare_exchange_weak
-                        } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                        } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
                     }
 
                     // link block's items to free list
                     {
-                        item * pHead = m_pGlobalFreeHead.load(CDS_ATOMIC::memory_order_acquire);
+                        item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
                         do {
                             pLastItem->m_pNextFree = pHead;
                             // pHead is changed by compare_exchange_weak
-                        } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                        } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
                     }
                 }
 
                 unsigned int current_epoch() const
                 {
-                    return m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) & (c_nEpochCount - 1);
+                    return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
                 }
                 unsigned int next_epoch() const
                 {
-                    return (m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) - 1) & (c_nEpochCount - 1);
+                    return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
                 }
                 //@endcond
 
@@ -400,7 +400,7 @@ namespace cds { namespace gc {
                     , m_pGlobalFreeHead( nullptr )
                 {
                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
-                        m_pEpochFree[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+                        m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
 
                     allocNewBlock();
                 }
@@ -408,7 +408,7 @@ namespace cds { namespace gc {
                 ~retired_ptr_pool()
                 {
                     block * p;
-                    for ( block * pBlock = m_pBlockListHead.load(CDS_ATOMIC::memory_order_relaxed); pBlock; pBlock = p ) {
+                    for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
                         p = pBlock->pNext;
                         m_BlockAllocator.Delete( pBlock );
                     }
@@ -417,7 +417,7 @@ namespace cds { namespace gc {
                 /// Increments current epoch
                 void inc_epoch()
                 {
-                    m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_acq_rel );
+                    m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
                 }
 
                 //@endcond
@@ -428,17 +428,17 @@ namespace cds { namespace gc {
                     unsigned int nEpoch;
                     item * pItem;
                     for (;;) {
-                        pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire);
+                        pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
                         if ( !pItem )
                             goto retry;
-                        if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                        if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
                             goto success;
                     }
 
                     /*
-                    item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire);
+                    item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
                     while ( pItem ) {
-                        if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                        if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
                             goto success;
                     }
                     */
@@ -446,14 +446,14 @@ namespace cds { namespace gc {
                     // Epoch free list is empty
                     // Alloc from global free list
                 retry:
-                    pItem = m_pGlobalFreeHead.load( CDS_ATOMIC::memory_order_acquire );
+                    pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
                     do {
                         if ( !pItem ) {
                             allocNewBlock();
                             goto retry;
                         }
                         // pItem is changed by compare_exchange_weak
-                    } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                    } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
                 success:
                     CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
@@ -480,9 +480,9 @@ namespace cds { namespace gc {
                     unsigned int nEpoch;
                     item * pCurHead;
                     do {
-                        pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(CDS_ATOMIC::memory_order_acquire);
+                        pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
                         pTail->m_pNextFree = pCurHead;
-                    } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                    } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
                 }
             };
 
@@ -506,7 +506,7 @@ namespace cds { namespace gc {
                 void set( void * p )
                 {
                     assert( m_pGuard != nullptr );
-                    m_pGuard->pPost.store( p, CDS_ATOMIC::memory_order_release );
+                    m_pGuard->pPost.store( p, atomics::memory_order_release );
                     //CDS_COMPILER_RW_BARRIER;
                 }
 
@@ -514,7 +514,7 @@ namespace cds { namespace gc {
                 void clear()
                 {
                     assert( m_pGuard != nullptr );
-                    m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+                    m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
                     CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
                 }
 
@@ -689,8 +689,8 @@ namespace cds { namespace gc {
             /// Internal GC statistics
             struct internal_stat
             {
-                CDS_ATOMIC::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
-                CDS_ATOMIC::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
+                atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
+                atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
 
                 internal_stat()
                     : m_nGuardCount(0)
@@ -717,8 +717,8 @@ namespace cds { namespace gc {
 
                 InternalState& operator =( internal_stat const& s )
                 {
-                    m_nGuardCount = s.m_nGuardCount.load(CDS_ATOMIC::memory_order_relaxed);
-                    m_nFreeGuardCount = s.m_nFreeGuardCount.load(CDS_ATOMIC::memory_order_relaxed);
+                    m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
+                    m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
 
                     return *this;
                 }
@@ -731,9 +731,9 @@ namespace cds { namespace gc {
             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
-            //CDS_ATOMIC::atomic<size_t>      m_nInLiberate       ;   ///< number of parallel \p liberate fnction call
+            //atomics::atomic<size_t>      m_nInLiberate       ;   ///< number of parallel \p liberate fnction call
 
-            CDS_ATOMIC::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call liberate
+            atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call liberate
             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
 
             internal_stat   m_stat  ;   ///< Internal statistics
@@ -827,7 +827,7 @@ namespace cds { namespace gc {
             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
             void retirePtr( retired_ptr const& p )
             {
-                if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(CDS_ATOMIC::memory_order_relaxed) )
+                if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
                     liberate();
             }
 
@@ -933,7 +933,7 @@ namespace cds { namespace gc {
                 assert( m_pList != nullptr );
                 if ( m_pFree ) {
                     g.m_pGuard = m_pFree;
-                    m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed);
+                    m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
                 }
                 else {
                     g.m_pGuard = m_gc.allocGuard();
@@ -946,8 +946,8 @@ namespace cds { namespace gc {
             void freeGuard( Guard& g )
             {
                 assert( m_pList != nullptr );
-                g.m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
-                g.m_pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed );
+                g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+                g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
                 m_pFree = g.m_pGuard;
             }
 
@@ -960,7 +960,7 @@ namespace cds { namespace gc {
 
                 while ( m_pFree && nCount < Count ) {
                     arr[nCount].set_guard( m_pFree );
-                    m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed);
+                    m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
                     ++nCount;
                 }
 
@@ -981,12 +981,12 @@ namespace cds { namespace gc {
                 details::guard_data * pGuard;
                 for ( size_t i = 0; i < Count - 1; ++i ) {
                     pGuard = arr[i].get_guard();
-                    pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
-                    pGuard->pNextFree.store( arr[i+1].get_guard(), CDS_ATOMIC::memory_order_relaxed );
+                    pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+                    pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
                 }
                 pGuard = arr[Count-1].get_guard();
-                pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
-                pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed );
+                pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+                pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
                 m_pFree = arr[0].get_guard();
             }
 
index 175e9064f1f30152cdbcbd7f7ae7e95f59589c45..eae4dcf86f6c13595e7815acf7a2f8b3215e69c9 100644 (file)
@@ -35,24 +35,24 @@ namespace cds { namespace gc {
         /**
             @headerfile cds/gc/ptb.h
         */
-        template <typename T> using atomic_ref = CDS_ATOMIC::atomic<T *>;
+        template <typename T> using atomic_ref = atomics::atomic<T *>;
 
         /// Atomic type
         /**
             @headerfile cds/gc/ptb.h
         */
-        template <typename T> using atomic_type = CDS_ATOMIC::atomic<T>;
+        template <typename T> using atomic_type = atomics::atomic<T>;
 
         /// Atomic marked pointer
         /**
             @headerfile cds/gc/ptb.h
         */
-        template <typename MarkedPtr> using atomic_marked_ptr = CDS_ATOMIC::atomic<MarkedPtr>;
+        template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
 #else
         template <typename T>
-        class atomic_ref: public CDS_ATOMIC::atomic<T *>
+        class atomic_ref: public atomics::atomic<T *>
         {
-            typedef CDS_ATOMIC::atomic<T *> base_class;
+            typedef atomics::atomic<T *> base_class;
         public:
 #   ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
             atomic_ref() = default;
@@ -67,9 +67,9 @@ namespace cds { namespace gc {
         };
 
         template <typename T>
-        class atomic_type: public CDS_ATOMIC::atomic<T>
+        class atomic_type: public atomics::atomic<T>
         {
-            typedef CDS_ATOMIC::atomic<T> base_class;
+            typedef atomics::atomic<T> base_class;
         public:
 #   ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
             atomic_type() = default;
@@ -84,9 +84,9 @@ namespace cds { namespace gc {
         };
 
         template <typename MarkedPtr>
-        class atomic_marked_ptr: public CDS_ATOMIC::atomic<MarkedPtr>
+        class atomic_marked_ptr: public atomics::atomic<MarkedPtr>
         {
-            typedef CDS_ATOMIC::atomic<MarkedPtr> base_class;
+            typedef atomics::atomic<MarkedPtr> base_class;
         public:
 #   ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
             atomic_marked_ptr() = default;
@@ -172,13 +172,13 @@ namespace cds { namespace gc {
                 to the HP slot repeatedly until the guard's value equals \p toGuard
             */
             template <typename T>
-            T protect( CDS_ATOMIC::atomic<T> const& toGuard )
+            T protect( atomics::atomic<T> const& toGuard )
             {
-                T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+                T pCur = toGuard.load(atomics::memory_order_relaxed);
                 T pRet;
                 do {
                     pRet = assign( pCur );
-                    pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+                    pCur = toGuard.load(atomics::memory_order_acquire);
                 } while ( pRet != pCur );
                 return pCur;
             }
@@ -201,14 +201,14 @@ namespace cds { namespace gc {
                 Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
             */
             template <typename T, class Func>
-            T protect( CDS_ATOMIC::atomic<T> const& toGuard, Func f )
+            T protect( atomics::atomic<T> const& toGuard, Func f )
             {
-                T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+                T pCur = toGuard.load(atomics::memory_order_relaxed);
                 T pRet;
                 do {
                     pRet = pCur;
                     assign( f( pCur ) );
-                    pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+                    pCur = toGuard.load(atomics::memory_order_acquire);
                 } while ( pRet != pCur );
                 return pCur;
             }
@@ -264,7 +264,7 @@ namespace cds { namespace gc {
             /// Get native guarded pointer stored
             guarded_pointer get_native() const
             {
-                return base_class::get_guard()->pPost.load(CDS_ATOMIC::memory_order_relaxed);
+                return base_class::get_guard()->pPost.load(atomics::memory_order_relaxed);
             }
 
         };
@@ -301,12 +301,12 @@ namespace cds { namespace gc {
                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
             */
             template <typename T>
-            T protect(size_t nIndex, CDS_ATOMIC::atomic<T> const& toGuard )
+            T protect(size_t nIndex, atomics::atomic<T> const& toGuard )
             {
                 T pRet;
                 do {
-                    pRet = assign( nIndex, toGuard.load(CDS_ATOMIC::memory_order_relaxed) );
-                } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire));
+                    pRet = assign( nIndex, toGuard.load(atomics::memory_order_relaxed) );
+                } while ( pRet != toGuard.load(atomics::memory_order_acquire));
 
                 return pRet;
             }
@@ -329,12 +329,12 @@ namespace cds { namespace gc {
                 Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
             */
             template <typename T, class Func>
-            T protect(size_t nIndex, CDS_ATOMIC::atomic<T> const& toGuard, Func f )
+            T protect(size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
             {
                 T pRet;
                 do {
-                    assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_relaxed) ));
-                } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire));
+                    assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) ));
+                } while ( pRet != toGuard.load(atomics::memory_order_acquire));
 
                 return pRet;
             }
@@ -389,7 +389,7 @@ namespace cds { namespace gc {
             /// Get native guarded pointer stored
             guarded_pointer get_native( size_t nIndex ) const
             {
-                return base_class::operator[](nIndex).get_guard()->pPost.load(CDS_ATOMIC::memory_order_relaxed);
+                return base_class::operator[](nIndex).get_guard()->pPost.load(atomics::memory_order_relaxed);
             }
 
             /// Capacity of the guard array
index ccf896459e4652132f170c3bb5260a1b5715fd36..4e52726c8f4a3fb4de2819a5755c9f725dd5ce22 100644 (file)
@@ -70,9 +70,9 @@ namespace cds { namespace intrusive {
 
                 while ( true ) {
                     marked_ptr pNext = aGuards.protect( 0, m_pNext );
-                    if ( pNext.ptr() && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) {
+                    if ( pNext.ptr() && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) {
                         marked_ptr p = aGuards.protect( 1, pNext->m_pNext );
-                        m_pNext.compare_exchange_strong( pNext, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+                        m_pNext.compare_exchange_strong( pNext, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
                         continue;
                     }
                     else {
@@ -84,11 +84,11 @@ namespace cds { namespace intrusive {
             virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent )
             {
                 if ( bConcurrent ) {
-                    marked_ptr pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed);
-                    do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+                    marked_ptr pNext = m_pNext.load(atomics::memory_order_relaxed);
+                    do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) );
                 }
                 else {
-                    m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+                    m_pNext.store( marked_ptr(), atomics::memory_order_relaxed );
                 }
             }
         };
index 340c4ede4532f05de9d941ebbbb6cd7807e5d8f2..89a9fd167bd3718143bfe679197d05001d427f94 100644 (file)
@@ -658,8 +658,8 @@ namespace cds { namespace intrusive {
             //@cond
             static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1;
 
-            CDS_ATOMIC::atomic< owner_t >   m_Owner     ;   ///< owner mark (thread id + boolean flag)
-            CDS_ATOMIC::atomic<size_t>      m_nCapacity ;   ///< lock array capacity
+            atomics::atomic< owner_t >   m_Owner     ;   ///< owner mark (thread id + boolean flag)
+            atomics::atomic<size_t>      m_nCapacity ;   ///< lock array capacity
             lock_array_ptr                  m_arrLocks[ c_nArity ]  ; ///< Lock array. The capacity of array is specified in constructor.
             spinlock_type                   m_access    ;   ///< access to m_arrLocks
             statistics_type                 m_Stat      ;   ///< internal statistics
@@ -695,7 +695,7 @@ namespace cds { namespace intrusive {
 
                     // wait while resizing
                     while ( true ) {
-                        who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+                        who = m_Owner.load( atomics::memory_order_acquire );
                         if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
                             break;
                         bkoff();
@@ -715,7 +715,7 @@ namespace cds { namespace intrusive {
                             parrLock[i]->lock();
                         }
 
-                        who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+                        who = m_Owner.load( atomics::memory_order_acquire );
                         if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks[0] == pLockArr[0] ) {
                             m_Stat.onCellLock();
                             return;
@@ -742,7 +742,7 @@ namespace cds { namespace intrusive {
                 // It is assumed that the current thread already has a lock
                 // and requires a second lock for other hash
 
-                size_t const nMask = m_nCapacity.load(CDS_ATOMIC::memory_order_acquire) - 1;
+                size_t const nMask = m_nCapacity.load(atomics::memory_order_acquire) - 1;
                 size_t nCell = m_arrLocks[0]->try_lock( arrHash[0] & nMask);
                 if ( nCell == lock_array_type::c_nUnspecifiedCell ) {
                     m_Stat.onSecondCellLockFailed();
@@ -765,7 +765,7 @@ namespace cds { namespace intrusive {
                 back_off bkoff;
                 while ( true ) {
                     owner_t ownNull = 0;
-                    if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) {
+                    if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acq_rel, atomics::memory_order_relaxed )) {
                         m_arrLocks[0]->lock_all();
 
                         m_Stat.onFullLock();
@@ -779,7 +779,7 @@ namespace cds { namespace intrusive {
             void release_all()
             {
                 m_arrLocks[0]->unlock_all();
-                m_Owner.store( 0, CDS_ATOMIC::memory_order_release );
+                m_Owner.store( 0, atomics::memory_order_release );
             }
 
             void acquire_resize( lock_array_ptr * pOldLocks )
@@ -795,9 +795,9 @@ namespace cds { namespace intrusive {
 
                     // global lock
                     owner_t ownNull = 0;
-                    if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) {
+                    if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acq_rel, atomics::memory_order_relaxed )) {
                         if ( pOldLocks[0] != m_arrLocks[0] ) {
-                            m_Owner.store( 0, CDS_ATOMIC::memory_order_release );
+                            m_Owner.store( 0, atomics::memory_order_release );
                             m_Stat.onResizeLockArrayChanged();
                         }
                         else {
@@ -820,7 +820,7 @@ namespace cds { namespace intrusive {
 
             void release_resize( lock_array_ptr * pOldLocks )
             {
-                m_Owner.store( 0, CDS_ATOMIC::memory_order_release );
+                m_Owner.store( 0, atomics::memory_order_release );
                 pOldLocks[0]->unlock_all();
             }
             //@endcond
@@ -935,7 +935,7 @@ namespace cds { namespace intrusive {
                     for ( unsigned int i = 0; i < c_nArity; ++i )
                         m_arrLocks[i] = pNew[i];
                 }
-                m_nCapacity.store( nCapacity, CDS_ATOMIC::memory_order_release );
+                m_nCapacity.store( nCapacity, atomics::memory_order_release );
 
                 m_Stat.onResize();
             }
@@ -947,7 +947,7 @@ namespace cds { namespace intrusive {
             */
             size_t lock_count() const
             {
-                return m_nCapacity.load(CDS_ATOMIC::memory_order_relaxed);
+                return m_nCapacity.load(atomics::memory_order_relaxed);
             }
 
             /// Returns the arity of \p refinable mutex policy
index af21db42efcd2c7685c1df1ad24230c1fc91cf38..56a2a925146151ba2c3be385d414595dd09e1108 100644 (file)
@@ -39,7 +39,7 @@ namespace cds { namespace intrusive { namespace details {
             {
                 assert( p != nullptr );
 
-                p->m_pNext.store( nullptr, CDS_ATOMIC::memory_order_release );
+                p->m_pNext.store( nullptr, atomics::memory_order_release );
                 allocator_type().Delete( p );
             }
         };
index 25b81e8f3e426dee5d469af7350fc129a51081b5..a52390a4b3b2598c0ee07629a1ecdd9d29e6f9ed 100644 (file)
@@ -200,9 +200,9 @@ namespace cds { namespace intrusive {
             typedef typename update_desc_type::update_ptr  update_ptr ; ///< Marked pointer to update descriptor
 
             key_type                         m_Key       ;   ///< Regular key
-            CDS_ATOMIC::atomic<base_class *> m_pLeft     ;   ///< Left subtree
-            CDS_ATOMIC::atomic<base_class *> m_pRight    ;   ///< Right subtree
-            CDS_ATOMIC::atomic<update_ptr>   m_pUpdate   ;   ///< Update descriptor
+            atomics::atomic<base_class *> m_pLeft     ;   ///< Left subtree
+            atomics::atomic<base_class *> m_pRight    ;   ///< Right subtree
+            atomics::atomic<update_ptr>   m_pUpdate   ;   ///< Update descriptor
             //@cond
             uintptr_t                           m_nEmptyUpdate; ///< ABA prevention for m_pUpdate, from 0..2^16 step 4
             //@endcond
index f4890a8dc2c9056586eadab069fe083a88f6cf88..d8e6c0dd17b8d0da68200ab047655d43fda81f4a 100644 (file)
@@ -970,8 +970,8 @@ namespace cds { namespace intrusive {
 
         bool check_consistency( internal_node const * pRoot ) const
         {
-            tree_node * pLeft  = pRoot->m_pLeft.load( CDS_ATOMIC::memory_order_relaxed );
-            tree_node * pRight = pRoot->m_pRight.load( CDS_ATOMIC::memory_order_relaxed );
+            tree_node * pLeft  = pRoot->m_pLeft.load( atomics::memory_order_relaxed );
+            tree_node * pRight = pRoot->m_pRight.load( atomics::memory_order_relaxed );
             assert( pLeft );
             assert( pRight );
 
@@ -1017,7 +1017,7 @@ namespace cds { namespace intrusive {
             return p;
         }
 
-        update_ptr search_protect_update( search_result& res, CDS_ATOMIC::atomic<update_ptr> const& src ) const
+        update_ptr search_protect_update( search_result& res, atomics::atomic<update_ptr> const& src ) const
         {
             update_ptr ret;
             update_ptr upd( src.load( memory_model::memory_order_relaxed ) );
@@ -1221,17 +1221,17 @@ namespace cds { namespace intrusive {
             tree_node * pLeaf = static_cast<tree_node *>( pOp->iInfo.pLeaf );
             if ( pOp->iInfo.bRightLeaf ) {
                 CDS_VERIFY( pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast<tree_node *>( pOp->iInfo.pNew ),
-                    memory_model::memory_order_relaxed, CDS_ATOMIC::memory_order_relaxed ));
+                    memory_model::memory_order_relaxed, atomics::memory_order_relaxed ));
             }
             else {
                 CDS_VERIFY( pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast<tree_node *>( pOp->iInfo.pNew ),
-                    memory_model::memory_order_relaxed, CDS_ATOMIC::memory_order_relaxed ));
+                    memory_model::memory_order_relaxed, atomics::memory_order_relaxed ));
             }
 
             // Unflag parent
             update_ptr cur( pOp, update_desc::IFlag );
             CDS_VERIFY( pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(),
-                memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                memory_model::memory_order_release, atomics::memory_order_relaxed ));
         }
 
         bool check_delete_precondition( search_result& res ) const
@@ -1261,7 +1261,7 @@ namespace cds { namespace intrusive {
             update_ptr pUpdate( pOp->dInfo.pUpdateParent );
             update_ptr pMark( pOp, update_desc::Mark );
             if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark, // *
-                memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
             {
                 help_marked( pOp );
 
@@ -1280,7 +1280,7 @@ namespace cds { namespace intrusive {
                 // Undo grandparent dInfo
                 update_ptr pDel( pOp, update_desc::DFlag );
                 if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(),
-                    memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                    memory_model::memory_order_release, atomics::memory_order_relaxed ))
                 {
                     retire_update_desc( pOp );
                 }
@@ -1288,7 +1288,7 @@ namespace cds { namespace intrusive {
             }
         }
 
-        tree_node * protect_sibling( typename gc::Guard& guard, CDS_ATOMIC::atomic<tree_node *>& sibling )
+        tree_node * protect_sibling( typename gc::Guard& guard, atomics::atomic<tree_node *>& sibling )
         {
             typename gc::Guard guardLeaf;
 
@@ -1317,16 +1317,16 @@ namespace cds { namespace intrusive {
 
             if ( pOp->dInfo.bRightParent ) {
                 CDS_VERIFY( pOp->dInfo.pGrandParent->m_pRight.compare_exchange_strong( pParent, pOpposite,
-                    memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                    memory_model::memory_order_release, atomics::memory_order_relaxed ));
             }
             else {
                 CDS_VERIFY( pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( pParent, pOpposite,
-                    memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                    memory_model::memory_order_release, atomics::memory_order_relaxed ));
             }
 
             update_ptr upd( pOp, update_desc::DFlag );
             CDS_VERIFY( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(),
-                memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                memory_model::memory_order_release, atomics::memory_order_relaxed ));
         }
 
         bool try_insert( value_type& val, internal_node * pNewInternal, search_result& res )
@@ -1376,7 +1376,7 @@ namespace cds { namespace intrusive {
 
                 update_ptr updCur( res.updParent.ptr() );
                 if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ),
-                    memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                    memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                 {
                     // do insert
                     help_insert( pOp );
@@ -1421,7 +1421,7 @@ namespace cds { namespace intrusive {
 
                         update_ptr updGP( res.updGrandParent.ptr() );
                         if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
-                            memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                            memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                         {
                             if ( help_delete( pOp )) {
                                 // res.pLeaf is not deleted yet since it is guarded
@@ -1504,7 +1504,7 @@ namespace cds { namespace intrusive {
 
                         update_ptr updGP( res.updGrandParent.ptr() );
                         if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
-                            memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                            memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                         {
                             if ( help_delete( pOp ))
                                 break;
@@ -1552,7 +1552,7 @@ namespace cds { namespace intrusive {
 
                         update_ptr updGP( res.updGrandParent.ptr() );
                         if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
-                            memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                            memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                         {
                             if ( help_delete( pOp ))
                                 break;
index 3a8170912e7b05cd421488fecefb6e3c110b0b67..9e608ca412709a678f8130fdfbeaf915162d3134 100644 (file)
@@ -1390,8 +1390,8 @@ namespace cds { namespace intrusive {
 
         bool check_consistency( internal_node const * pRoot ) const
         {
-            tree_node * pLeft  = pRoot->m_pLeft.load( CDS_ATOMIC::memory_order_relaxed );
-            tree_node * pRight = pRoot->m_pRight.load( CDS_ATOMIC::memory_order_relaxed );
+            tree_node * pLeft  = pRoot->m_pLeft.load( atomics::memory_order_relaxed );
+            tree_node * pRight = pRoot->m_pRight.load( atomics::memory_order_relaxed );
             assert( pLeft );
             assert( pRight );
 
@@ -1440,16 +1440,16 @@ namespace cds { namespace intrusive {
             tree_node * pLeaf = static_cast<tree_node *>( pOp->iInfo.pLeaf );
             if ( pOp->iInfo.bRightLeaf ) {
                 pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast<tree_node *>( pOp->iInfo.pNew ),
-                    memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                    memory_model::memory_order_release, atomics::memory_order_relaxed );
             }
             else {
                 pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast<tree_node *>( pOp->iInfo.pNew ),
-                    memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                    memory_model::memory_order_release, atomics::memory_order_relaxed );
             }
 
             update_ptr cur( pOp, update_desc::IFlag );
             pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(),
-                      memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                      memory_model::memory_order_release, atomics::memory_order_relaxed );
         }
 
         bool check_delete_precondition( search_result& res )
@@ -1475,7 +1475,7 @@ namespace cds { namespace intrusive {
             update_ptr pUpdate( pOp->dInfo.pUpdateParent );
             update_ptr pMark( pOp, update_desc::Mark );
             if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark,
-                    memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                    memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
             {
                 help_marked( pOp );
                 retire_node( pOp->dInfo.pParent, rl );
@@ -1499,7 +1499,7 @@ namespace cds { namespace intrusive {
                 // Undo grandparent dInfo
                 update_ptr pDel( pOp, update_desc::DFlag );
                 if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(),
-                    memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                    memory_model::memory_order_release, atomics::memory_order_relaxed ))
                 {
                     retire_update_desc( pOp, rl, false );
                 }
@@ -1517,19 +1517,19 @@ namespace cds { namespace intrusive {
                     pOp->dInfo.bRightLeaf
                         ? pOp->dInfo.pParent->m_pLeft.load( memory_model::memory_order_acquire )
                         : pOp->dInfo.pParent->m_pRight.load( memory_model::memory_order_acquire ),
-                    memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                    memory_model::memory_order_release, atomics::memory_order_relaxed );
             }
             else {
                 pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( p,
                     pOp->dInfo.bRightLeaf
                         ? pOp->dInfo.pParent->m_pLeft.load( memory_model::memory_order_acquire )
                         : pOp->dInfo.pParent->m_pRight.load( memory_model::memory_order_acquire ),
-                    memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                    memory_model::memory_order_release, atomics::memory_order_relaxed );
             }
 
             update_ptr upd( pOp, update_desc::DFlag );
             pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(),
-                memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                memory_model::memory_order_release, atomics::memory_order_relaxed );
         }
 
         template <typename KeyValue, typename Compare>
@@ -1722,7 +1722,7 @@ namespace cds { namespace intrusive {
 
                             update_ptr updGP( res.updGrandParent.ptr() );
                             if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
-                                memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                                memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                             {
                                 if ( help_delete( pOp, updRetire )) {
                                     // res.pLeaf is not deleted yet since RCU is blocked
@@ -1797,7 +1797,7 @@ namespace cds { namespace intrusive {
 
                             update_ptr updGP( res.updGrandParent.ptr() );
                             if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
-                                memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                                memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                             {
                                 if ( help_delete( pOp, updRetire )) {
                                     ptr = node_traits::to_value_ptr( res.pLeaf );
@@ -1860,7 +1860,7 @@ namespace cds { namespace intrusive {
 
                             update_ptr updGP( res.updGrandParent.ptr() );
                             if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
-                                memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                                memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                             {
                                 if ( help_delete( pOp, updRetire )) {
                                     result = node_traits::to_value_ptr( res.pLeaf );
@@ -1921,7 +1921,7 @@ namespace cds { namespace intrusive {
 
                             update_ptr updGP( res.updGrandParent.ptr() );
                             if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
-                                memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                                memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                             {
                                 if ( help_delete( pOp, updRetire )) {
                                     result = node_traits::to_value_ptr( res.pLeaf );
@@ -2047,7 +2047,7 @@ namespace cds { namespace intrusive {
 
                 update_ptr updCur( res.updParent.ptr() );
                 if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ),
-                    memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                    memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                 {
                     // do insert
                     help_insert( pOp );
index f2ee2bb764f1e2bde2246c85e6fc4d942fea26dd..8c1cd61bb48ead6aca1b09067dc78c4ab6c6171e 100644 (file)
@@ -45,7 +45,7 @@ namespace cds { namespace intrusive {
             /// Checks if node is marked
             bool is_marked() const
             {
-                return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0;
+                return m_pNext.load(atomics::memory_order_relaxed).bits() != 0;
             }
 
             /// Default ctor
@@ -177,7 +177,7 @@ namespace cds { namespace intrusive {
             */
             static void is_empty( node_type const * pNode )
             {
-                assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+                assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr );
             }
         };
 
index 9d397ed025ab42d4725ba00bb563e7d2c26178ba..445c473610efafafd929b5a262a26c354e436d74 100644 (file)
@@ -26,7 +26,7 @@ namespace cds { namespace intrusive { namespace lazy_list {
         /// Checks if node is marked
         bool is_marked() const
         {
-            return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0;
+            return m_pNext.load(atomics::memory_order_relaxed).bits() != 0;
         }
 
         node()
@@ -42,9 +42,9 @@ namespace cds { namespace intrusive { namespace lazy_list {
             while ( true ) {
                 marked_ptr pNextMarked( aGuards.protect( 0, m_pNext ));
                 node * pNext = pNextMarked.ptr();
-                if ( pNext != nullptr && pNext->m_bDeleted.load( CDS_ATOMIC::memory_order_acquire ) ) {
+                if ( pNext != nullptr && pNext->m_bDeleted.load( atomics::memory_order_acquire ) ) {
                     marked_ptr p = aGuards.protect( 1, pNext->m_pNext );
-                    m_pNext.compare_exchange_weak( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+                    m_pNext.compare_exchange_weak( pNextMarked, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
                     continue;
                 }
                 else {
@@ -56,11 +56,11 @@ namespace cds { namespace intrusive { namespace lazy_list {
         virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent )
         {
             if ( bConcurrent ) {
-                marked_ptr pNext( m_pNext.load(CDS_ATOMIC::memory_order_relaxed));
-                do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+                marked_ptr pNext( m_pNext.load(atomics::memory_order_relaxed));
+                do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) );
             }
             else {
-                m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+                m_pNext.store( marked_ptr(), atomics::memory_order_relaxed );
             }
         }
     };
index ca43491ff9b8afd4347426795be00ff6ed084050..70be1c4c2348dfdbbd6a9df9041f39e5d4b8d0d7 100644 (file)
@@ -20,7 +20,7 @@ namespace cds { namespace intrusive {
             typedef Lock        lock_type   ;   ///< Lock type
             typedef Tag         tag         ;   ///< tag
 
-            CDS_ATOMIC::atomic<node *> m_pNext ; ///< pointer to the next node in the list
+            atomics::atomic<node *> m_pNext ; ///< pointer to the next node in the list
             mutable lock_type   m_Lock  ; ///< Node lock
 
             node()
index 7e15553dba7d1aaf9b8d9d7d14f3d44595ac90b2..027ff733370ad1864c397eb9840d2becb5861410 100644 (file)
@@ -23,7 +23,7 @@ namespace cds { namespace intrusive {
             typedef Tag         tag         ;   ///< tag
 
             typedef cds::details::marked_ptr<node, 1>   marked_ptr          ;   ///< marked pointer
-            typedef CDS_ATOMIC::atomic<marked_ptr>      atomic_marked_ptr   ;   ///< atomic marked pointer specific for GC
+            typedef atomics::atomic<marked_ptr>      atomic_marked_ptr   ;   ///< atomic marked pointer specific for GC
 
             atomic_marked_ptr   m_pNext ; ///< pointer to the next node in the list
             mutable lock_type   m_Lock  ; ///< Node lock
@@ -31,7 +31,7 @@ namespace cds { namespace intrusive {
             /// Checks if node is marked
             bool is_marked() const
             {
-                return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0;
+                return m_pNext.load(atomics::memory_order_relaxed).bits() != 0;
             }
 
             /// Default ctor
@@ -42,7 +42,7 @@ namespace cds { namespace intrusive {
             /// Clears internal fields
             void clear()
             {
-                m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release );
+                m_pNext.store( marked_ptr(), atomics::memory_order_release );
             }
         };
     }   // namespace lazy_list
index d9d2af4af2dead47e94341f6d60b948488120e39..8de82e1ec7f780c99bea88e43d9ebaa2c3f2ddf7 100644 (file)
@@ -113,14 +113,14 @@ namespace cds { namespace intrusive {
             //@cond
             node()
             {
-                m_Links.store( anchor(0,0), CDS_ATOMIC::memory_order_release );
+                m_Links.store( anchor(0,0), atomics::memory_order_release );
             }
 
             explicit node( anchor const& a )
                 : m_Links()
                 , m_nIndex(0)
             {
-                m_Links.store( a, CDS_ATOMIC::memory_order_release );
+                m_Links.store( a, atomics::memory_order_release );
             }
             //@endcond
         };
@@ -240,7 +240,7 @@ namespace cds { namespace intrusive {
             static void is_empty( const node_type * pNode )
             {
 #           ifdef _DEBUG
-                anchor a = pNode->m_Links.load(CDS_ATOMIC::memory_order_relaxed);
+                anchor a = pNode->m_Links.load(atomics::memory_order_relaxed);
                 assert( a.idxLeft == 0 && a.idxRight == 0 );
 #           endif
             }
@@ -490,7 +490,7 @@ namespace cds { namespace intrusive {
 #       endif
 
             mapper_type     m_set;
-            CDS_ATOMIC::atomic<unsigned int>    m_nLastIndex;
+            atomics::atomic<unsigned int>    m_nLastIndex;
 
         public:
 
@@ -795,7 +795,7 @@ namespace cds { namespace intrusive {
             :m_Anchor()
             ,m_Mapper( 4096, 4 )
         {
-            m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), CDS_ATOMIC::memory_order_release );
+            m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), atomics::memory_order_release );
 
             // GC and node_type::gc must be the same
             static_assert(( std::is_same<gc, typename node_type::gc>::value ), "GC and node_type::gc must be the same");
@@ -814,7 +814,7 @@ namespace cds { namespace intrusive {
             :m_Anchor()
             ,m_Mapper( nMaxItemCount, nLoadFactor )
         {
-            m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), CDS_ATOMIC::memory_order_release );
+            m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), atomics::memory_order_release );
 
             // GC and node_type::gc must be the same
             static_assert(( std::is_same<gc, typename node_type::gc>::value ), "GC and node_type::gc must be the same");
index 39ccaa55b94fc62f00dcb2213abd583ec928e86e..bb0bf8ea73a22f53638b0018812948ca5ae49d49 100644 (file)
@@ -129,7 +129,7 @@ namespace cds { namespace intrusive {
             */
             static void is_empty( const node_type * pNode )
             {
-                assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+                assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr );
             }
         };
 
index f3a783fb2e8680362f3194de007b327067680c4f..fdcf37d5f90f9d4ac3687c8e4318273d178eecf2 100644 (file)
@@ -32,9 +32,9 @@ namespace cds { namespace intrusive { namespace michael_list {
             while ( true ) {
                 marked_ptr pNextMarked( aGuards.protect( 0, m_pNext ));
                 node * pNext = pNextMarked.ptr();
-                if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) {
+                if ( pNext && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) {
                     marked_ptr p = aGuards.protect( 1, pNext->m_pNext );
-                    m_pNext.compare_exchange_strong( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+                    m_pNext.compare_exchange_strong( pNextMarked, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
                     continue;
                 }
                 else {
@@ -46,11 +46,11 @@ namespace cds { namespace intrusive { namespace michael_list {
         virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent )
         {
             if ( bConcurrent ) {
-                marked_ptr pNext = m_pNext.load(CDS_ATOMIC::memory_order_acquire);
-                do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+                marked_ptr pNext = m_pNext.load(atomics::memory_order_acquire);
+                do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) );
             }
             else {
-                m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+                m_pNext.store( marked_ptr(), atomics::memory_order_relaxed );
             }
         }
     };
index 4fd358bc559bc9a7dc55f650013f62d1ac83a0d1..3fbd853ff676d1d459fead52ec1a189a61bc6e4f 100644 (file)
@@ -271,7 +271,7 @@ namespace cds { namespace intrusive {
 
             marked_node_ptr cur(pos.pCur);
             pNode->m_pNext.store( cur, memory_model::memory_order_relaxed );
-            return pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+            return pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed );
         }
 
         bool unlink_node( position& pos )
@@ -281,11 +281,11 @@ namespace cds { namespace intrusive {
 
             // Mark the node (logical deleting)
             marked_node_ptr next(pos.pNext, 0);
-            if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+            if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
                 // physical deletion may be performed by search function if it detects that a node is logically deleted (marked)
                 // CAS may be successful here or in other thread that searching something
                 marked_node_ptr cur(pos.pCur);
-                if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
                     retire_node( pos.pCur );
                 return true;
             }
@@ -1143,7 +1143,7 @@ try_again:
                 if ( pNext.bits() == 1 ) {
                     // pCur marked i.e. logically deleted. Help the erase/unlink function to unlink pCur node
                     marked_node_ptr cur( pCur.ptr());
-                    if ( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+                    if ( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
                         retire_node( pCur.ptr() );
                     }
                     else {
index 67952fba5ff74c9fa42a1fa7c9ce16d1bf6072ea..e1054a6ed10bd7a718b9929f005f3b1b98d15d04 100644 (file)
@@ -20,7 +20,7 @@ namespace cds { namespace intrusive {
             typedef gc::nogc        gc  ;   ///< Garbage collector
             typedef Tag             tag ;   ///< tag
 
-            typedef CDS_ATOMIC::atomic< node * >   atomic_ptr  ;    ///< atomic marked pointer
+            typedef atomics::atomic< node * >   atomic_ptr  ;    ///< atomic marked pointer
 
             atomic_ptr m_pNext ; ///< pointer to the next node in the container
 
@@ -121,7 +121,7 @@ namespace cds { namespace intrusive {
             link_checker::is_empty( pNode );
 
             pNode->m_pNext.store( pos.pCur, memory_model::memory_order_relaxed );
-            return pos.pPrev->compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+            return pos.pPrev->compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed );
         }
         //@endcond
 
index c19953e4d26aaf31df975d5472fac277229e46ec..0239c191def339c7c96f35122bcbba1d1b403cca 100644 (file)
@@ -165,19 +165,19 @@ namespace cds { namespace intrusive {
 
             marked_node_ptr p( pos.pCur );
             pNode->m_pNext.store( p, memory_model::memory_order_relaxed );
-            return pos.pPrev->compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+            return pos.pPrev->compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed );
         }
 
         bool unlink_node( position& pos )
         {
             // Mark the node (logical deleting)
             marked_node_ptr next(pos.pNext, 0);
-            if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) {
+            if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                 marked_node_ptr cur(pos.pCur);
-                if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
                     return true;
                 next |= 1;
-                CDS_VERIFY( pos.pCur->m_pNext.compare_exchange_strong( next, next ^ 1, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                CDS_VERIFY( pos.pCur->m_pNext.compare_exchange_strong( next, next ^ 1, memory_model::memory_order_release, atomics::memory_order_relaxed ));
             }
             return false;
         }
index 0ccbbdc3f0c351e5066d52befa2f69829b07b195..fe9ad8c548888eccd4ac63c8fda148dd935402d2 100644 (file)
@@ -126,10 +126,10 @@ namespace cds { namespace intrusive {
                 if ( pNext == nullptr )
                     return false    ;    // queue is empty
 
-                if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+                if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
                     node_type * t = base_class::m_pTail.load(memory_model::memory_order_acquire);
                     if ( h == t )
-                        base_class::m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                        base_class::m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed );
                     break;
                 }
 
index 67f51d4dcff339be5d092db25c0739d045cb5331..7a8196130cb08af414c0b2a821ce336622971e44 100644 (file)
@@ -204,12 +204,12 @@ namespace cds { namespace intrusive {
                 node_type * t = m_pTail.load(memory_model::memory_order_acquire);
                 if ( h == t ) {
                     // It is needed to help enqueue
-                    m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                    m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed );
                     m_Stat.onBadTail();
                     continue;
                 }
 
-                if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ))
                     break;
 
                 m_Stat.onDequeueRace();
@@ -334,13 +334,13 @@ namespace cds { namespace intrusive {
                 node_type * pNext = t->m_pNext.load(memory_model::memory_order_acquire);
                 if ( pNext != nullptr ) {
                     // Tail is misplaced, advance it
-                    m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                    m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed );
                     m_Stat.onBadTail();
                     continue;
                 }
 
                 node_type * tmp = nullptr;
-                if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed ))
                     break;
 
                 m_Stat.onEnqueueRace();
@@ -349,7 +349,7 @@ namespace cds { namespace intrusive {
             ++m_ItemCounter;
             m_Stat.onEnqueue();
 
-            if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ))
+            if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
                 m_Stat.onAdvanceTailFailed();
             return true;
         }
index eda9ce2092f43e05c18260131c83cebf0860c320..43421108e8b6079056c446f5355d0bb1076ab313 100644 (file)
@@ -117,8 +117,8 @@ namespace cds { namespace intrusive {
             */
             static void is_empty( const node_type * pNode )
             {
-                assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
-                assert( pNode->m_pPrev.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+                assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr );
+                assert( pNode->m_pPrev.load( atomics::memory_order_relaxed ) == nullptr );
             }
         };
 
@@ -408,7 +408,7 @@ namespace cds { namespace intrusive {
                             fix_list( pTail, pHead );
                             continue;
                         }
-                        if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+                        if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
                             // dequeue success
                             break;
                         }
@@ -513,7 +513,7 @@ namespace cds { namespace intrusive {
             node_type * pTail = guards.protect( 0, m_pTail, node_to_value() )  ;   // Read the tail
             while( true ) {
                 pNew->m_pNext.store( pTail, memory_model::memory_order_release );
-                if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) {     // Try to CAS the tail
+                if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {     // Try to CAS the tail
                     pTail->m_pPrev.store( pNew, memory_model::memory_order_release )     ;           // Success, write prev
                     ++m_ItemCounter;
                     m_Stat.onEnqueue();
index 05ff51c48697e3cba2839b1736dc5386efa3cfe8..1b61e12b6f2a1b4b01b0737101afa0170b349b3a 100644 (file)
@@ -209,14 +209,14 @@ namespace cds { namespace intrusive {
         // Segment
         struct segment: public boost::intrusive::slist_base_hook<>
         {
-            CDS_ATOMIC::atomic< cell > *    cells;  // Cell array of size \ref m_nQuasiFactor
+            atomics::atomic< cell > *    cells;  // Cell array of size \ref m_nQuasiFactor
             size_t   version;   // version tag (ABA prevention tag)
             // cell array is placed here in one continuous memory block
 
             // Initializes the segment
             segment( size_t nCellCount )
                 // MSVC warning C4355: 'this': used in base member initializer list
-                : cells( reinterpret_cast< CDS_ATOMIC::atomic< cell > * >( this + 1 ))
+                : cells( reinterpret_cast< atomics::atomic< cell > * >( this + 1 ))
                 , version( 0 )
             {
                 init( nCellCount );
@@ -224,17 +224,17 @@ namespace cds { namespace intrusive {
 
             void init( size_t nCellCount )
             {
-                CDS_ATOMIC::atomic< cell > * pLastCell = cells + nCellCount;
-                for ( CDS_ATOMIC::atomic< cell > * pCell = cells; pCell < pLastCell; ++pCell )
-                    pCell->store( cell(), CDS_ATOMIC::memory_order_relaxed );
-                CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
+                atomics::atomic< cell > * pLastCell = cells + nCellCount;
+                for ( atomics::atomic< cell > * pCell = cells; pCell < pLastCell; ++pCell )
+                    pCell->store( cell(), atomics::memory_order_relaxed );
+                atomics::atomic_thread_fence( memory_model::memory_order_release );
             }
 
         private:
             segment(); //=delete
         };
 
-        typedef typename opt::details::alignment_setter< CDS_ATOMIC::atomic<segment *>, options::alignment >::type aligned_segment_ptr;
+        typedef typename opt::details::alignment_setter< atomics::atomic<segment *>, options::alignment >::type aligned_segment_ptr;
         //@endcond
 
     protected:
@@ -300,8 +300,8 @@ namespace cds { namespace intrusive {
             bool populated( segment const& s ) const
             {
                 // The lock should be held
-                CDS_ATOMIC::atomic< cell > const * pLastCell = s.cells + quasi_factor();
-                for ( CDS_ATOMIC::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
+                atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor();
+                for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
                     if ( !pCell->load( memory_model::memory_order_relaxed ).all() )
                         return false;
                 }
@@ -310,8 +310,8 @@ namespace cds { namespace intrusive {
             bool exhausted( segment const& s ) const
             {
                 // The lock should be held
-                CDS_ATOMIC::atomic< cell > const * pLastCell = s.cells + quasi_factor();
-                for ( CDS_ATOMIC::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
+                atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor();
+                for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
                     if ( !pCell->load( memory_model::memory_order_relaxed ).bits() )
                         return false;
                 }
@@ -474,7 +474,7 @@ namespace cds { namespace intrusive {
                         // Empty cell found, try to enqueue here
                         cell nullCell;
                         if ( pTailSegment->cells[i].compare_exchange_strong( nullCell, cell( &val ),
-                            memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                            memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         {
                             // Ok to push item
                             m_Stat.onPush();
@@ -641,7 +641,7 @@ namespace cds { namespace intrusive {
                         if ( !item.bits() ) {
                             // Try to mark the cell as deleted
                             if ( pHeadSegment->cells[i].compare_exchange_strong( item, item | 1,
-                                memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                                memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                             {
                                 --m_ItemCounter;
                                 m_Stat.onPop();
index 324409abb8916eace35d83e1596955bb712f2145..4f557f33666869f0dc901c50194274a64baa08ba 100644 (file)
@@ -65,9 +65,9 @@ namespace cds { namespace intrusive {
 
                 while ( true ) {
                     node * pNext = aGuards.protect( 0, m_pNext );
-                    if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) {
+                    if ( pNext && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) {
                         node * p = aGuards.protect( 1, pNext->m_pNext );
-                        m_pNext.compare_exchange_strong( pNext, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+                        m_pNext.compare_exchange_strong( pNext, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
                         continue;
                     }
                     else {
@@ -79,11 +79,11 @@ namespace cds { namespace intrusive {
             virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent )
             {
                 if ( bConcurrent ) {
-                    node * pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed);
-                    do {} while ( !m_pNext.compare_exchange_weak( pNext, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+                    node * pNext = m_pNext.load(atomics::memory_order_relaxed);
+                    do {} while ( !m_pNext.compare_exchange_weak( pNext, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) );
                 }
                 else {
-                    m_pNext.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+                    m_pNext.store( nullptr, atomics::memory_order_relaxed );
                 }
             }
         };
@@ -166,7 +166,7 @@ namespace cds { namespace intrusive {
             */
             static void is_empty( const node_type * pNode )
             {
-                assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+                assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr );
             }
         };
 
index 9e048e6e376898630e16045e3e4a3b2b49035dcb..57a5a85ffe05f8ca3558653b5034de7a1e40e134 100644 (file)
@@ -117,7 +117,7 @@ namespace cds { namespace intrusive {
             void clear()
             {
                 assert( m_arrNext == nullptr );
-                m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release );
+                m_pNext.store( marked_ptr(), atomics::memory_order_release );
             }
 
             //@cond
@@ -248,7 +248,7 @@ namespace cds { namespace intrusive {
         */
         class xorshift {
             //@cond
-            CDS_ATOMIC::atomic<unsigned int>    m_nSeed;
+            atomics::atomic<unsigned int>    m_nSeed;
             //@endcond
         public:
             /// The upper bound of generator's return value. The generator produces random number in range <tt>[0..c_nUpperBound)</tt>
@@ -257,7 +257,7 @@ namespace cds { namespace intrusive {
             /// Initializes the generator instance
             xorshift()
             {
-                m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), CDS_ATOMIC::memory_order_relaxed );
+                m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), atomics::memory_order_relaxed );
             }
 
             /// Main generator function
@@ -276,11 +276,11 @@ namespace cds { namespace intrusive {
                     return level;
                 }
                 */
-                unsigned int x = m_nSeed.load( CDS_ATOMIC::memory_order_relaxed );
+                unsigned int x = m_nSeed.load( atomics::memory_order_relaxed );
                 x ^= x << 13;
                 x ^= x >> 17;
                 x ^= x << 5;
-                m_nSeed.store( x, CDS_ATOMIC::memory_order_relaxed );
+                m_nSeed.store( x, atomics::memory_order_relaxed );
                 unsigned int nLevel = ((x & 0x00000001) != 0) ? 0 : cds::bitop::LSB( (~(x >> 1)) & 0x7FFFFFFF );
                 assert( nLevel < c_nUpperBound );
                 return nLevel;
@@ -298,7 +298,7 @@ namespace cds { namespace intrusive {
         class turbo_pascal
         {
             //@cond
-            CDS_ATOMIC::atomic<unsigned int>    m_nSeed;
+            atomics::atomic<unsigned int>    m_nSeed;
             //@endcond
         public:
             /// The upper bound of generator's return value. The generator produces random number in range <tt>[0..c_nUpperBound)</tt>
@@ -307,7 +307,7 @@ namespace cds { namespace intrusive {
             /// Initializes the generator instance
             turbo_pascal()
             {
-                m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), CDS_ATOMIC::memory_order_relaxed );
+                m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), atomics::memory_order_relaxed );
             }
 
             /// Main generator function
@@ -330,8 +330,8 @@ namespace cds { namespace intrusive {
                     upper 16 bits) so we traverse from highest bit down (i.e., test
                     sign), thus hardly ever use lower bits.
                 */
-                unsigned int x = m_nSeed.load( CDS_ATOMIC::memory_order_relaxed ) * 134775813 + 1;
-                m_nSeed.store( x, CDS_ATOMIC::memory_order_relaxed );
+                unsigned int x = m_nSeed.load( atomics::memory_order_relaxed ) * 134775813 + 1;
+                m_nSeed.store( x, atomics::memory_order_relaxed );
                 unsigned int nLevel = ( x & 0x80000000 ) ? (31 - cds::bitop::MSBnz( (x & 0x7FFFFFFF) | 1 )) : 0;
                 assert( nLevel < c_nUpperBound );
                 return nLevel;
@@ -588,7 +588,7 @@ namespace cds { namespace intrusive {
                 head_node( unsigned int nHeight )
                 {
                     for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i )
-                        m_Tower[i].store( typename node_type::marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+                        m_Tower[i].store( typename node_type::marked_ptr(), atomics::memory_order_relaxed );
 
                     node_type::make_tower( nHeight, m_Tower );
                 }
index a283915dadbcf9803f393415711144d5f8cd11df..67bcff6471c6e135ee4c4e4004c4e1fbb0be6a74 100644 (file)
@@ -40,7 +40,7 @@ namespace cds { namespace intrusive { namespace skip_list {
         ~node()
         {
             release_tower();
-            m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+            m_pNext.store( marked_ptr(), atomics::memory_order_relaxed );
         }
 
         /// Constructs a node of height \p nHeight
@@ -63,7 +63,7 @@ namespace cds { namespace intrusive { namespace skip_list {
                 m_arrNext = nullptr;
                 m_nHeight = 1;
                 for ( unsigned int i = 0; i < nHeight; ++i )
-                    pTower[i].store( marked_ptr(), CDS_ATOMIC::memory_order_release );
+                    pTower[i].store( marked_ptr(), atomics::memory_order_release );
             }
             return pTower;
         }
@@ -120,9 +120,9 @@ namespace cds { namespace intrusive { namespace skip_list {
                 while ( true ) {
                     marked_ptr pNextMarked( aGuards.protect( 0, next(i) ));
                     node * pNext = pNextMarked.ptr();
-                    if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) {
+                    if ( pNext && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) {
                         marked_ptr p = aGuards.protect( 1, pNext->next(i) );
-                        next(i).compare_exchange_strong( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+                        next(i).compare_exchange_strong( pNextMarked, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
                         continue;
                     }
                     else {
@@ -137,13 +137,13 @@ namespace cds { namespace intrusive { namespace skip_list {
             unsigned int const nHeight = height();
             if ( bConcurrent ) {
                 for (unsigned int i = 0; i < nHeight; ++i ) {
-                    marked_ptr pNext = next(i).load(CDS_ATOMIC::memory_order_relaxed);
-                    while ( !next(i).compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+                    marked_ptr pNext = next(i).load(atomics::memory_order_relaxed);
+                    while ( !next(i).compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) );
                 }
             }
             else {
                 for (unsigned int i = 0; i < nHeight; ++i )
-                    next(i).store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+                    next(i).store( marked_ptr(), atomics::memory_order_relaxed );
             }
         }
     };
@@ -173,7 +173,7 @@ namespace cds { namespace intrusive { namespace skip_list {
                 : m_pHead( new head_tower() )
             {
                 for ( size_t i = 0; i < sizeof(m_pHead->m_Tower) / sizeof(m_pHead->m_Tower[0]); ++i )
-                    m_pHead->m_Tower[i].store( typename node_type::marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+                    m_pHead->m_Tower[i].store( typename node_type::marked_ptr(), atomics::memory_order_relaxed );
 
                 m_pHead->make_tower( nHeight, m_pHead->m_Tower );
             }
index b19fb52f8d3feb993816ffa3cc90d9b4874ac579..b58ba19cc22b98988e8b1f89e1aa80783b73edc6 100644 (file)
@@ -48,7 +48,7 @@ namespace cds { namespace intrusive {
                 back_off bkoff;
 
                 for (;;) {
-                    if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+                    if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
                         // Current node is marked as deleted. So, its next pointer can point to anything
                         // In this case we interrupt our iteration and returns end() iterator.
                         *this = iterator();
@@ -62,7 +62,7 @@ namespace cds { namespace intrusive {
                         bkoff();
                         continue;
                     }
-                    else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) {
+                    else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
                         // p is marked as deleted. Spin waiting for physical removal
                         bkoff();
                         continue;
@@ -89,7 +89,7 @@ namespace cds { namespace intrusive {
 
                     node_type * pp = p.ptr();
                     // Logically deleted node is marked from highest level
-                    if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+                    if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
                         m_pNode = pp;
                         break;
                     }
@@ -481,7 +481,7 @@ namespace cds { namespace intrusive {
 
         item_counter                m_ItemCounter       ;   ///< item counter
         random_level_generator      m_RandomLevelGen    ;   ///< random level generator instance
-        CDS_ATOMIC::atomic<unsigned int>    m_nHeight   ;   ///< estimated high level
+        atomics::atomic<unsigned int>    m_nHeight   ;   ///< estimated high level
         mutable stat                m_Stat              ;   ///< internal statistics
 
     protected:
@@ -550,7 +550,7 @@ namespace cds { namespace intrusive {
                         // pCur is marked, i.e. logically deleted.
                         marked_node_ptr p( pCur.ptr() );
                         if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
-                            memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                            memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         {
                             if ( nLevel == 0 ) {
                                 gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
@@ -618,7 +618,7 @@ namespace cds { namespace intrusive {
                         // pCur is marked, i.e. logically deleted.
                         marked_node_ptr p( pCur.ptr() );
                         if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
-                            memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                            memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         {
                             if ( nLevel == 0 )
                                 gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
@@ -672,7 +672,7 @@ namespace cds { namespace intrusive {
                         // pCur is marked, i.e. logically deleted.
                         marked_node_ptr p( pCur.ptr() );
                         if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
-                            memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                            memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         {
                             if ( nLevel == 0 )
                                 gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
@@ -708,7 +708,7 @@ namespace cds { namespace intrusive {
             {
                 marked_node_ptr p( pos.pSucc[0] );
                 pNode->next( 0 ).store( p, memory_model::memory_order_release );
-                if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) {
+                if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {
                     return false;
                 }
                 cds::unref( f )( val );
@@ -718,7 +718,7 @@ namespace cds { namespace intrusive {
                 marked_node_ptr p;
                 while ( true ) {
                     marked_node_ptr q( pos.pSucc[ nLevel ]);
-                    if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+                    if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
                         // pNode has been marked as removed while we are inserting it
                         // Stop inserting
                         assert( p.bits() );
@@ -726,7 +726,7 @@ namespace cds { namespace intrusive {
                         return true;
                     }
                     p = q;
-                    if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) )
+                    if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
                         break;
 
                     // Renew insert position
@@ -754,7 +754,7 @@ namespace cds { namespace intrusive {
                 while ( true ) {
                     pSucc = gSucc.protect( pDel->next(nLevel), gc_protect );
                     if ( pSucc.bits() || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1,
-                         memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                         memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                     {
                         break;
                     }
@@ -765,7 +765,7 @@ namespace cds { namespace intrusive {
                 pSucc = gSucc.protect( pDel->next(0), gc_protect );
                 marked_node_ptr p( pSucc.ptr() );
                 if ( pDel->next(0).compare_exchange_strong( p, marked_node_ptr(p.ptr(), 1),
-                     memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                     memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                 {
                     cds::unref(f)( *node_traits::to_value_ptr( pDel ));
 
@@ -775,7 +775,7 @@ namespace cds { namespace intrusive {
                     for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
                         pSucc = gSucc.protect( pDel->next(nLevel), gc_protect );
                         if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( p, marked_node_ptr(pSucc.ptr()),
-                            memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) )
+                            memory_model::memory_order_release, atomics::memory_order_relaxed) )
                         {
                             // Make slow erase
                             find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
@@ -1036,7 +1036,7 @@ namespace cds { namespace intrusive {
         {
             unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
             if ( nCur < nHeight )
-                m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
         }
         //@endcond
 
@@ -1055,7 +1055,7 @@ namespace cds { namespace intrusive {
             gc::check_available_guards( c_nHazardPtrCount );
 
             // Barrier for head node
-            CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_release );
         }
 
         /// Clears and destructs the skip-list
index b5e6c97d00c60270c750e51c4d9bcfe4cb145945..3062062b5d792c7c930f393fe34ceb576cc142d1 100644 (file)
@@ -22,7 +22,7 @@ namespace cds { namespace intrusive {
             typedef cds::gc::nogc   gc          ;   ///< Garbage collector
             typedef Tag             tag         ;   ///< tag
 
-            typedef CDS_ATOMIC::atomic<node * > atomic_ptr;
+            typedef atomics::atomic<node * > atomic_ptr;
             typedef atomic_ptr                  tower_item_type;
 
         protected:
@@ -103,12 +103,12 @@ namespace cds { namespace intrusive {
             void clear()
             {
                 assert( m_arrNext == nullptr );
-                m_pNext.store( nullptr, CDS_ATOMIC::memory_order_release );
+                m_pNext.store( nullptr, atomics::memory_order_release );
             }
 
             bool is_cleared() const
             {
-                return m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr
+                return m_pNext.load( atomics::memory_order_relaxed ) == nullptr
                     && m_arrNext == nullptr
                     && m_nHeight <= 1
 ;
@@ -137,7 +137,7 @@ namespace cds { namespace intrusive {
 
         public: // for internal use only!!!
             iterator( node_type& refHead )
-                : m_pNode( refHead[0].load( CDS_ATOMIC::memory_order_relaxed ) )
+                : m_pNode( refHead[0].load( atomics::memory_order_relaxed ) )
             {}
 
             static iterator from_node( node_type * pNode )
@@ -176,7 +176,7 @@ namespace cds { namespace intrusive {
             iterator& operator ++()
             {
                 if ( m_pNode )
-                    m_pNode = m_pNode->next(0).load( CDS_ATOMIC::memory_order_relaxed );
+                    m_pNode = m_pNode->next(0).load( atomics::memory_order_relaxed );
                 return *this;
             }
 
@@ -443,7 +443,7 @@ namespace cds { namespace intrusive {
             head_node( unsigned int nHeight )
             {
                 for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i )
-                    m_Tower[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+                    m_Tower[i].store( nullptr, atomics::memory_order_relaxed );
 
                 node_type::make_tower( nHeight, m_Tower );
             }
@@ -456,8 +456,8 @@ namespace cds { namespace intrusive {
             void clear()
             {
                 for (unsigned int i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i )
-                    m_Tower[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
-                node_type::m_pNext.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+                    m_Tower[i].store( nullptr, atomics::memory_order_relaxed );
+                node_type::m_pNext.store( nullptr, atomics::memory_order_relaxed );
             }
         };
         //@endcond
@@ -467,7 +467,7 @@ namespace cds { namespace intrusive {
 
         item_counter                m_ItemCounter       ;   ///< item counter
         random_level_generator      m_RandomLevelGen    ;   ///< random level generator instance
-        CDS_ATOMIC::atomic<unsigned int>    m_nHeight   ;   ///< estimated high level
+        atomics::atomic<unsigned int>    m_nHeight   ;   ///< estimated high level
         mutable stat                m_Stat              ;   ///< internal statistics
 
     protected:
@@ -601,7 +601,7 @@ namespace cds { namespace intrusive {
         void increase_height( unsigned int nHeight )
         {
             unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
-            while ( nCur < nHeight && !m_nHeight.compare_exchange_weak( nCur, nHeight, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ) );
+            while ( nCur < nHeight && !m_nHeight.compare_exchange_weak( nCur, nHeight, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) );
         }
         //@endcond
 
@@ -618,7 +618,7 @@ namespace cds { namespace intrusive {
             static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
 
             // Barrier for head node
-            CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_release );
         }
 
         /// Clears and destructs the skip-list
index 3fb6778cab61e93c365950245876a81f899f54b5..da58a7eab1d436d9d3296b714a0e4a037f287b2f 100644 (file)
@@ -29,7 +29,7 @@ namespace cds { namespace intrusive {
             //  bit 0 - the item is logically deleted
             //  bit 1 - the item is extracted (only for level 0)
             typedef cds::details::marked_ptr<node, 3>   marked_ptr          ;   ///< marked pointer
-            typedef CDS_ATOMIC::atomic< marked_ptr >    atomic_marked_ptr   ;   ///< atomic marked pointer
+            typedef atomics::atomic< marked_ptr >    atomic_marked_ptr   ;   ///< atomic marked pointer
             typedef atomic_marked_ptr                   tower_item_type;
 
         protected:
@@ -92,7 +92,7 @@ namespace cds { namespace intrusive {
             void clear_tower()
             {
                 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
-                    next(nLevel).store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+                    next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
             }
 
             /// Access to element of next pointer array
@@ -135,7 +135,7 @@ namespace cds { namespace intrusive {
             void clear()
             {
                 assert( m_arrNext == nullptr );
-                m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release );
+                m_pNext.store( marked_ptr(), atomics::memory_order_release );
                 m_pDelChain = nullptr;
             }
 
@@ -180,21 +180,21 @@ namespace cds { namespace intrusive {
                 back_off bkoff;
 
                 for (;;) {
-                    if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+                    if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
                         // Current node is marked as deleted. So, its next pointer can point to anything
                         // In this case we interrupt our iteration and returns end() iterator.
                         *this = iterator();
                         return;
                     }
 
-                    marked_ptr p = m_pNode->next(0).load( CDS_ATOMIC::memory_order_relaxed );
+                    marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
                     node_type * pp = p.ptr();
                     if ( p.bits() ) {
                         // p is marked as deleted. Spin waiting for physical removal
                         bkoff();
                         continue;
                     }
-                    else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) {
+                    else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
                         // p is marked as deleted. Spin waiting for physical removal
                         bkoff();
                         continue;
@@ -215,7 +215,7 @@ namespace cds { namespace intrusive {
                 back_off bkoff;
 
                 for (;;) {
-                    marked_ptr p = refHead.next(0).load( CDS_ATOMIC::memory_order_relaxed );
+                    marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
                     if ( !p.ptr() ) {
                         // empty skip-list
                         break;
@@ -223,7 +223,7 @@ namespace cds { namespace intrusive {
 
                     node_type * pp = p.ptr();
                     // Logically deleted node is marked from highest level
-                    if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+                    if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
                         m_pNode = pp;
                         break;
                     }
@@ -653,8 +653,8 @@ namespace cds { namespace intrusive {
 
         item_counter                m_ItemCounter       ;   ///< item counter
         random_level_generator      m_RandomLevelGen    ;   ///< random level generator instance
-        CDS_ATOMIC::atomic<unsigned int>    m_nHeight   ;   ///< estimated high level
-        CDS_ATOMIC::atomic<node_type *>     m_pDeferredDelChain ;   ///< Deferred deleted node chain
+        atomics::atomic<unsigned int>    m_nHeight   ;   ///< estimated high level
+        atomics::atomic<node_type *>     m_pDeferredDelChain ;   ///< Deferred deleted node chain
         mutable stat                m_Stat              ;   ///< internal statistics
 
     protected:
@@ -737,7 +737,7 @@ namespace cds { namespace intrusive {
                         // pCur is marked, i.e. logically deleted.
                         marked_node_ptr p( pCur.ptr() );
                         if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
-                             memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                             memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         {
                             if ( nLevel == 0 ) {
 #                       ifdef _DEBUG
@@ -811,7 +811,7 @@ namespace cds { namespace intrusive {
                         // pCur is marked, i.e. logically deleted.
                         marked_node_ptr p( pCur.ptr() );
                         if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
-                            memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                            memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         {
                             if ( nLevel == 0 ) {
 #                       ifdef _DEBUG
@@ -875,7 +875,7 @@ retry:
                         // pCur is marked, i.e. logically deleted.
                         marked_node_ptr p( pCur.ptr() );
                         if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
-                            memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                            memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         {
                             if ( nLevel == 0 ) {
 #                       ifdef _DEBUG
@@ -922,7 +922,7 @@ retry:
             {
                 marked_node_ptr p( pos.pSucc[0] );
                 pNode->next( 0 ).store( p, memory_model::memory_order_release );
-                if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+                if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
                     return false;
                 }
 #       ifdef _DEBUG
@@ -935,7 +935,7 @@ retry:
                 marked_node_ptr p;
                 while ( true ) {
                     marked_node_ptr q( pos.pSucc[ nLevel ]);
-                    if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) {
+                    if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                         // pNode has been marked as removed while we are inserting it
                         // Stop inserting
                         assert( p.bits() );
@@ -943,7 +943,7 @@ retry:
                         return true;
                     }
                     p = q;
-                    if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) )
+                    if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
                         break;
 
                     // Renew insert position
@@ -979,7 +979,7 @@ retry:
                 pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
                 while ( true ) {
                     if ( pSucc.bits()
-                      || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                      || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                     {
                         break;
                     }
@@ -992,7 +992,7 @@ retry:
                     return false;
 
                 int const nMask = bExtract ? 3 : 1;
-                if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+                if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                 {
                     cds::unref(f)( *node_traits::to_value_ptr( pDel ));
 
@@ -1002,7 +1002,7 @@ retry:
                     for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
                         if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
                             marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
-                            memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) )
+                            memory_model::memory_order_release, atomics::memory_order_relaxed) )
                         {
                             // Do slow erase
                             find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
@@ -1366,7 +1366,7 @@ retry:
         {
             unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
             if ( nCur < nHeight )
-                m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
         }
 
         class deferred_list_iterator
@@ -1439,7 +1439,7 @@ retry:
                 node_type * pDeferList = m_pDeferredDelChain.load( memory_model::memory_order_relaxed );
                 do {
                     pTail->m_pDelChain = pDeferList;
-                } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ));
+                } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ));
 
                 pos.pDelChain = nullptr;
             }
@@ -1457,7 +1457,7 @@ retry:
             static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
 
             // Barrier for head node
-            CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_release );
         }
 
         /// Clears and destructs the skip-list
index 98143326314a9647be91fc9a8f0b48908b6f70d7..790b5ecb567f86a9ad2bfb5362b8a4e096df6f8a 100644 (file)
@@ -346,7 +346,7 @@ namespace cds { namespace intrusive {
     protected:
         ordered_list_wrapper    m_List              ;   ///< Ordered list containing split-list items
         bucket_table            m_Buckets           ;   ///< bucket table
-        CDS_ATOMIC::atomic<size_t> m_nBucketCountLog2  ;   ///< log2( current bucket count )
+        atomics::atomic<size_t> m_nBucketCountLog2  ;   ///< log2( current bucket count )
         item_counter            m_ItemCounter       ;   ///< Item counter
         hash                    m_HashFunctor       ;   ///< Hash functor
 
@@ -371,7 +371,7 @@ namespace cds { namespace intrusive {
 
         size_t bucket_no( size_t nHash ) const
         {
-            return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 );
+            return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 );
         }
 
         static size_t parent_bucket( size_t nBucket )
@@ -449,10 +449,10 @@ namespace cds { namespace intrusive {
 
         void    inc_item_count()
         {
-            size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed);
+            size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed);
             if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() )
             {
-                m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed );
+                m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed );
             }
         }
 
index a7bc10abca813e9e1e8e589fd7bf1d2e7c730062..7f366ddccb0ae009d2a4a90900f58828644d0bf5 100644 (file)
@@ -172,7 +172,7 @@ namespace cds { namespace intrusive {
         public:
             typedef GC      gc          ;   ///< Garbage collector
             typedef Node    node_type   ;   ///< Bucket node type
-            typedef CDS_ATOMIC::atomic<node_type *> table_entry ;   ///< Table entry type
+            typedef atomics::atomic<node_type *> table_entry ;   ///< Table entry type
 
             /// Bucket table allocator
             typedef cds::details::Allocator< table_entry, typename options::allocator >  bucket_table_allocator;
@@ -283,13 +283,13 @@ namespace cds { namespace intrusive {
         public:
             typedef GC      gc          ;   ///< Garbage collector
             typedef Node    node_type   ;   ///< Bucket node type
-            typedef CDS_ATOMIC::atomic<node_type *> table_entry ;   ///< Table entry type
+            typedef atomics::atomic<node_type *> table_entry ;   ///< Table entry type
 
             /// Memory model for atomic operations
             typedef typename options::memory_model     memory_model;
 
         protected:
-            typedef CDS_ATOMIC::atomic<table_entry *>   segment_type    ;   ///< Bucket table segment type
+            typedef atomics::atomic<table_entry *>   segment_type    ;   ///< Bucket table segment type
 
         public:
             /// Bucket table allocator
@@ -442,7 +442,7 @@ namespace cds { namespace intrusive {
                 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
                     table_entry * pNewSegment = allocate_segment();
                     table_entry * pNull = nullptr;
-                    if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+                    if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
                         destroy_segment( pNewSegment );
                     }
                 }
index 8d6640967aed8e4c58fe883dea1abbcb644972d2..f5f585dd205e0a956ca22fcb1f50e586e2675dab 100644 (file)
@@ -140,7 +140,7 @@ namespace cds { namespace intrusive {
     protected:
         ordered_list_wrapper    m_List              ;   ///< Ordered list containing split-list items
         bucket_table            m_Buckets           ;   ///< bucket table
-        CDS_ATOMIC::atomic<size_t>  m_nBucketCountLog2  ;   ///< log2( current bucket count )
+        atomics::atomic<size_t>  m_nBucketCountLog2  ;   ///< log2( current bucket count )
         item_counter            m_ItemCounter       ;   ///< Item counter
         hash                    m_HashFunctor       ;   ///< Hash functor
 
@@ -165,7 +165,7 @@ namespace cds { namespace intrusive {
 
         size_t bucket_no( size_t nHash ) const
         {
-            return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 );
+            return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 );
         }
 
         static size_t parent_bucket( size_t nBucket )
@@ -243,10 +243,10 @@ namespace cds { namespace intrusive {
 
         void    inc_item_count()
         {
-            size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed);
+            size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed);
             if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() )
             {
-                m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed );
+                m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed );
             }
         }
 
index 92be12f74f47eda09c6979915f805f33351f0211..4eeb2b0c0ceac3bbe3f3386cc15f90443f62afec 100644 (file)
@@ -235,7 +235,7 @@ namespace cds { namespace intrusive {
     protected:
         ordered_list_wrapper    m_List              ;   ///< Ordered list containing split-list items
         bucket_table            m_Buckets           ;   ///< bucket table
-        CDS_ATOMIC::atomic<size_t> m_nBucketCountLog2  ;   ///< log2( current bucket count )
+        atomics::atomic<size_t> m_nBucketCountLog2  ;   ///< log2( current bucket count )
         item_counter            m_ItemCounter       ;   ///< Item counter
         hash                    m_HashFunctor       ;   ///< Hash functor
 
@@ -260,7 +260,7 @@ namespace cds { namespace intrusive {
 
         size_t bucket_no( size_t nHash ) const
         {
-            return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 );
+            return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 );
         }
 
         static size_t parent_bucket( size_t nBucket )
@@ -338,10 +338,10 @@ namespace cds { namespace intrusive {
 
         void    inc_item_count()
         {
-            size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed);
+            size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed);
             if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() )
             {
-                m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed );
+                m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed );
             }
         }
 
index 1e3ec4c39b4630d3118fe25838be7ddbfbdef8af..f37986012820a7bfde6b720a2d7f610b6d14fda1 100644 (file)
@@ -153,8 +153,8 @@ namespace cds { namespace intrusive { namespace striped_set {
         static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1;
 
         lock_array_ptr                  m_arrLocks  ;   ///< Lock array. The capacity of array is specified in constructor.
-        CDS_ATOMIC::atomic< owner_t >   m_Owner     ;   ///< owner mark (thread id + boolean flag)
-        CDS_ATOMIC::atomic<size_t>      m_nCapacity ;   ///< Lock array capacity
+        atomics::atomic< owner_t >   m_Owner     ;   ///< owner mark (thread id + boolean flag)
+        atomics::atomic<size_t>      m_nCapacity ;   ///< Lock array capacity
         spinlock_type                   m_access    ;   ///< access to m_arrLocks
         //@endcond
 
@@ -169,7 +169,7 @@ namespace cds { namespace intrusive { namespace striped_set {
 
         lock_array_ptr create_lock_array( size_t nCapacity )
         {
-            m_nCapacity.store( nCapacity, CDS_ATOMIC::memory_order_relaxed );
+            m_nCapacity.store( nCapacity, atomics::memory_order_relaxed );
             return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer() );
         }
 
@@ -182,7 +182,7 @@ namespace cds { namespace intrusive { namespace striped_set {
             while ( true ) {
                 // wait while resizing
                 while ( true ) {
-                    who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+                    who = m_Owner.load( atomics::memory_order_acquire );
                     if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
                         break;
                     bkoff();
@@ -197,7 +197,7 @@ namespace cds { namespace intrusive { namespace striped_set {
                 lock_type& lock = pLocks->at( nHash & (pLocks->size() - 1));
                 lock.lock();
 
-                who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+                who = m_Owner.load( atomics::memory_order_acquire );
                 if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks )
                     return lock;
                 lock.unlock();
@@ -213,7 +213,7 @@ namespace cds { namespace intrusive { namespace striped_set {
             while ( true ) {
                 // wait while resizing
                 while ( true ) {
-                    who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+                    who = m_Owner.load( atomics::memory_order_acquire );
                     if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
                         break;
                     bkoff();
@@ -227,7 +227,7 @@ namespace cds { namespace intrusive { namespace striped_set {
 
                 pLocks->lock_all();
 
-                who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+                who = m_Owner.load( atomics::memory_order_acquire );
                 if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks )
                     return pLocks;
 
@@ -247,7 +247,7 @@ namespace cds { namespace intrusive { namespace striped_set {
             back_off bkoff;
             for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) {
                 owner_t ownNull = 0;
-                if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) {
+                if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
                     lock_array_ptr pOldLocks = m_arrLocks;
                     size_t const nLockCount = pOldLocks->size();
                     for ( size_t i = 0; i < nLockCount; ++i ) {
@@ -267,7 +267,7 @@ namespace cds { namespace intrusive { namespace striped_set {
 
         void release_resize()
         {
-            m_Owner.store( 0, CDS_ATOMIC::memory_order_release );
+            m_Owner.store( 0, atomics::memory_order_release );
         }
         //@endcond
     public:
@@ -338,7 +338,7 @@ namespace cds { namespace intrusive { namespace striped_set {
         */
         size_t lock_count() const
         {
-            return m_nCapacity.load( CDS_ATOMIC::memory_order_relaxed );
+            return m_nCapacity.load( atomics::memory_order_relaxed );
         }
 
         /// Resize for new capacity
index dfb549d53cfe8811dc5a045eddc91dd3078d81ce..9a580f040ef40e795e45c940c45b66703d17d87f 100644 (file)
@@ -32,7 +32,7 @@ namespace cds { namespace intrusive {
         {
             operation_id    idOp;   ///< Op id
             T *             pVal;   ///< for push: pointer to argument; for pop: accepts a return value
-            CDS_ATOMIC::atomic<unsigned int> nStatus; ///< Internal elimination status
+            atomics::atomic<unsigned int> nStatus; ///< Internal elimination status
 
             operation()
                 : pVal( nullptr )
@@ -165,7 +165,7 @@ namespace cds { namespace intrusive {
                 struct bkoff_predicate {
                     operation_desc * pOp;
                     bkoff_predicate( operation_desc * p ): pOp(p) {}
-                    bool operator()() { return pOp->nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; }
+                    bool operator()() { return pOp->nStatus.load( atomics::memory_order_acquire ) != op_busy; }
                 };
 #           endif
 
@@ -212,7 +212,7 @@ namespace cds { namespace intrusive {
                 bool backoff( operation_desc& op, Stat& stat )
                 {
                     elimination_backoff_type bkoff;
-                    op.nStatus.store( op_busy, CDS_ATOMIC::memory_order_relaxed );
+                    op.nStatus.store( op_busy, atomics::memory_order_relaxed );
 
                     elimination_rec * myRec = cds::algo::elimination::init_record( op );
 
@@ -231,12 +231,12 @@ namespace cds { namespace intrusive {
                                 slot.pRec = nullptr;
                                 slot.lock.unlock();
 
-                                himOp->nStatus.store( op_collided, CDS_ATOMIC::memory_order_release );
+                                himOp->nStatus.store( op_collided, atomics::memory_order_release );
                                 cds::algo::elimination::clear_record();
                                 stat.onActiveCollision( op.idOp );
                                 return true;
                             }
-                            himOp->nStatus.store( op_free, CDS_ATOMIC::memory_order_release );
+                            himOp->nStatus.store( op_free, atomics::memory_order_release );
                         }
                         slot.pRec = myRec;
                         slot.lock.unlock();
@@ -245,13 +245,13 @@ namespace cds { namespace intrusive {
                     // Wait for colliding operation
 #               if defined(CDS_CXX11_LAMBDA_SUPPORT) && !(CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC10)
                     // MSVC++ 2010 compiler error C2065: 'op_busy' : undeclared identifier
-                    bkoff( [&op]() -> bool { return op.nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; } );
+                    bkoff( [&op]() -> bool { return op.nStatus.load( atomics::memory_order_acquire ) != op_busy; } );
 #               else
                     // Local structs is not supported by old compilers (for example, GCC 4.3)
                     //struct bkoff_predicate {
                     //    operation_desc * pOp;
                     //    bkoff_predicate( operation_desc * p ): pOp(p) {}
-                    //    bool operator()() { return pOp->nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; }
+                    //    bool operator()() { return pOp->nStatus.load( atomics::memory_order_acquire ) != op_busy; }
                     //};
                     bkoff( bkoff_predicate(&op) );
 #               endif
@@ -262,7 +262,7 @@ namespace cds { namespace intrusive {
                             slot.pRec = nullptr;
                     }
 
-                    bool bCollided = op.nStatus.load( CDS_ATOMIC::memory_order_acquire ) == op_collided;
+                    bool bCollided = op.nStatus.load( atomics::memory_order_acquire ) == op_collided;
 
                     if ( !bCollided )
                         stat.onEliminationFailed();
@@ -584,7 +584,7 @@ namespace cds { namespace intrusive {
             node_type * t = m_Top.load(memory_model::memory_order_relaxed);
             while ( true ) {
                 pNew->m_pNext.store( t, memory_model::memory_order_relaxed );
-                if ( m_Top.compare_exchange_weak( t, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {     // #1 sync-with #2
+                if ( m_Top.compare_exchange_weak( t, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) {     // #1 sync-with #2
                     ++m_ItemCounter;
                     m_stat.onPush();
                     return true;
@@ -618,7 +618,7 @@ namespace cds { namespace intrusive {
                     return nullptr;    // stack is empty
 
                 node_type * pNext = t->m_pNext.load(memory_model::memory_order_relaxed);
-                if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) {              // #2
+                if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {              // #2
                     clear_links( t );
                     --m_ItemCounter;
                     m_stat.onPop();
@@ -656,7 +656,7 @@ namespace cds { namespace intrusive {
                 pTop = m_Top.load( memory_model::memory_order_relaxed );
                 if ( pTop == nullptr )
                     return;
-                if ( m_Top.compare_exchange_weak( pTop, nullptr, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) ) {    // sync-with #1 and #2
+                if ( m_Top.compare_exchange_weak( pTop, nullptr, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ) ) {    // sync-with #1 and #2
                     m_ItemCounter.reset();
                     break;
                 }
index 01822729b6a66e1604cf232942cc93fadc057eae..b734d7f60e63431231f73fa9fbb908dc61a70113 100644 (file)
@@ -98,10 +98,10 @@ namespace cds { namespace intrusive {
 
     protected:
         //@cond
-        typedef typename options::buffer::template rebind< CDS_ATOMIC::atomic<value_type *> >::other buffer;
+        typedef typename options::buffer::template rebind< atomics::atomic<value_type *> >::other buffer;
         typedef typename opt::details::alignment_setter< buffer, options::alignment >::type aligned_buffer;
         typedef size_t index_type;
-        typedef typename opt::details::alignment_setter< CDS_ATOMIC::atomic<index_type>, options::alignment >::type aligned_index;
+        typedef typename opt::details::alignment_setter< atomics::atomic<index_type>, options::alignment >::type aligned_index;
         //@endcond
 
     protected:
@@ -216,7 +216,7 @@ namespace cds { namespace intrusive {
                     }
 
                     // help the dequeue to update head
-                    m_nHead.compare_exchange_strong( temp, ate, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                    m_nHead.compare_exchange_strong( temp, ate, memory_model::memory_order_release, atomics::memory_order_relaxed );
                     continue;
                 }
 
@@ -226,9 +226,9 @@ namespace cds { namespace intrusive {
                     continue;
 
                 // get actual tail and try to enqueue new node
-                if ( m_buffer[ate].compare_exchange_strong( tt, pNewNode, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) {
+                if ( m_buffer[ate].compare_exchange_strong( tt, pNewNode, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {
                     if ( temp % 2 == 0 )
-                        m_nTail.compare_exchange_strong( te, temp, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                        m_nTail.compare_exchange_strong( te, temp, memory_model::memory_order_release, atomics::memory_order_relaxed );
                     ++m_ItemCounter;
                     return true;
                 }
@@ -275,7 +275,7 @@ namespace cds { namespace intrusive {
                 // check whether the queue is empty
                 if ( temp == m_nTail.load(memory_model::memory_order_acquire) ) {
                     // help the enqueue to update end
-                    m_nTail.compare_exchange_strong( temp, (temp + 1) & nModulo, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                    m_nTail.compare_exchange_strong( temp, (temp + 1) & nModulo, memory_model::memory_order_release, atomics::memory_order_relaxed );
                     continue;
                 }
 
@@ -285,9 +285,9 @@ namespace cds { namespace intrusive {
                     continue;
 
                 // Get the actual head, null means empty
-                if ( m_buffer[temp].compare_exchange_strong( tt, pNull, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+                if ( m_buffer[temp].compare_exchange_strong( tt, pNull, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
                     if ( temp % 2 == 0 )
-                        m_nHead.compare_exchange_strong( th, temp, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                        m_nHead.compare_exchange_strong( th, temp, memory_model::memory_order_release, atomics::memory_order_relaxed );
                     --m_ItemCounter;
                     return reinterpret_cast<value_type *>(reinterpret_cast<intptr_t>( tt ) & ~intptr_t(1));
                 }
index daf9e9e893c6e4b65715b62d6924c43b7e24a673..0e5f457f43b5e6b550d4242a4af8c97e611fb592 100644 (file)
@@ -52,7 +52,7 @@ namespace cds {
         public:
             typedef        Backoff      backoff_strategy    ;        ///< back-off strategy type
         private:
-            CDS_ATOMIC::atomic<bool>    m_spin  ;       ///< Spin
+            atomics::atomic<bool>    m_spin  ;       ///< Spin
 #    ifdef CDS_DEBUG
             typename OS::ThreadId       m_dbgOwnerId        ;       ///< Owner thread id (only for debug mode)
 #    endif
@@ -64,7 +64,7 @@ namespace cds {
                 :m_dbgOwnerId( OS::c_NullThreadId )
 #    endif
             {
-                m_spin.store( false, CDS_ATOMIC::memory_order_relaxed );
+                m_spin.store( false, atomics::memory_order_relaxed );
             }
 
             /// Construct spin-lock in specified state
@@ -76,7 +76,7 @@ namespace cds {
                 :m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::c_NullThreadId )
 #    endif
             {
-                m_spin.store( bLocked, CDS_ATOMIC::memory_order_relaxed );
+                m_spin.store( bLocked, atomics::memory_order_relaxed );
             }
 
             /// Dummy copy constructor
@@ -95,13 +95,13 @@ namespace cds {
             /// Destructor. On debug time it checks whether spin-lock is free
             ~Spinlock()
             {
-                assert( !m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
+                assert( !m_spin.load( atomics::memory_order_relaxed ) );
             }
 
             /// Check if the spin is locked
             bool is_locked() const CDS_NOEXCEPT
             {
-                return m_spin.load( CDS_ATOMIC::memory_order_relaxed );
+                return m_spin.load( atomics::memory_order_relaxed );
             }
 
             /// Try to lock the object
@@ -120,7 +120,7 @@ namespace cds {
             bool tryLock() CDS_NOEXCEPT
             {
                 bool bCurrent = false;
-                m_spin.compare_exchange_strong( bCurrent, true, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+                m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
 
                 CDS_DEBUG_DO(
                     if ( !bCurrent ) {
@@ -162,7 +162,7 @@ namespace cds {
 
                 // TATAS algorithm
                 while ( !tryLock() ) {
-                    while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ) {
+                    while ( m_spin.load( atomics::memory_order_relaxed ) ) {
                         backoff();
                     }
                 }
@@ -172,12 +172,12 @@ namespace cds {
             /// Unlock the spin-lock. Debug version: deadlock may be detected
             void unlock() CDS_NOEXCEPT
             {
-                assert( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
+                assert( m_spin.load( atomics::memory_order_relaxed ) );
 
                 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
                 CDS_DEBUG_DO( m_dbgOwnerId = OS::c_NullThreadId; )
 
-                m_spin.store( false, CDS_ATOMIC::memory_order_release );
+                m_spin.store( false, atomics::memory_order_release );
             }
         };
 
@@ -202,7 +202,7 @@ namespace cds {
             typedef Backoff         backoff_strategy    ; ///< The backoff type
 
         private:
-            CDS_ATOMIC::atomic<integral_type>   m_spin      ; ///< spin-lock atomic
+            atomics::atomic<integral_type>   m_spin      ; ///< spin-lock atomic
             thread_id                           m_OwnerId   ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
 
         private:
@@ -225,7 +225,7 @@ namespace cds {
             bool    tryLockOwned( thread_id tid ) CDS_NOEXCEPT
             {
                 if ( isOwned( tid )) {
-                    m_spin.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+                    m_spin.fetch_add( 1, atomics::memory_order_relaxed );
                     return true;
                 }
                 return false;
@@ -234,7 +234,7 @@ namespace cds {
             bool tryAcquireLock() CDS_NOEXCEPT
             {
                 integral_type nCurrent = 0;
-                return m_spin.compare_exchange_weak( nCurrent, 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+                return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
             }
 
             bool tryAcquireLock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
@@ -254,7 +254,7 @@ namespace cds {
                 // TATAS algorithm
                 backoff_strategy bkoff;
                 while ( !tryAcquireLock() ) {
-                    while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) )
+                    while ( m_spin.load( atomics::memory_order_relaxed ) )
                         bkoff();
                 }
             }
@@ -294,7 +294,7 @@ namespace cds {
             */
             bool is_locked() const CDS_NOEXCEPT
             {
-                return !( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() ));
+                return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() ));
             }
 
             /// Try to lock the spin-lock (synonym for \ref try_lock)
@@ -364,12 +364,12 @@ namespace cds {
             bool unlock() CDS_NOEXCEPT
             {
                 if ( isOwned( OS::getCurrentThreadId() ) ) {
-                    integral_type n = m_spin.load( CDS_ATOMIC::memory_order_relaxed );
+                    integral_type n = m_spin.load( atomics::memory_order_relaxed );
                     if ( n > 1 )
-                        m_spin.store( n - 1, CDS_ATOMIC::memory_order_relaxed );
+                        m_spin.store( n - 1, atomics::memory_order_relaxed );
                     else {
                         free();
-                        m_spin.store( 0, CDS_ATOMIC::memory_order_release );
+                        m_spin.store( 0, atomics::memory_order_release );
                     }
                     return true;
                 }
index d6ce92d7628ea31ac2b1feaa99a6c592a5f5e927..cbc9f4a766c42d760b2a0e17e13abbebf2d02a48 100644 (file)
@@ -786,7 +786,7 @@ namespace michael {
             : public options::free_list::item_hook
             , public options::partial_list::item_hook
         {
-            CDS_ATOMIC::atomic<anchor_tag>          anchor      ;   ///< anchor, see \ref anchor_tag
+            atomics::atomic<anchor_tag>          anchor      ;   ///< anchor, see \ref anchor_tag
             byte *              pSB         ;   ///< ptr to superblock
             processor_heap_base * pProcHeap ;   ///< pointer to owner processor heap
             unsigned int        nBlockSize  ;   ///< block size in bytes
@@ -1099,10 +1099,10 @@ namespace michael {
         /// Processor heap
         struct processor_heap_base
         {
-            CDS_DATA_ALIGNMENT(8) CDS_ATOMIC::atomic<active_tag> active;   ///< pointer to the descriptor of active superblock owned by processor heap
+            CDS_DATA_ALIGNMENT(8) atomics::atomic<active_tag> active;   ///< pointer to the descriptor of active superblock owned by processor heap
             processor_desc *    pProcDesc   ;   ///< pointer to parent processor descriptor
             const size_class *  pSizeClass  ;   ///< pointer to size class
-            CDS_ATOMIC::atomic<superblock_desc *>   pPartial    ;   ///< pointer to partial filled superblock (may be \p nullptr)
+            atomics::atomic<superblock_desc *>   pPartial    ;   ///< pointer to partial filled superblock (may be \p nullptr)
             partial_list        partialList ;   ///< list of partial filled superblocks owned by the processor heap
             unsigned int        nPageIdx    ;   ///< page size-class index, \ref c_nPageSelfAllocation - "small page"
 
@@ -1130,13 +1130,13 @@ namespace michael {
             /// Get partial superblock owned by the processor heap
             superblock_desc * get_partial()
             {
-                superblock_desc * pDesc = pPartial.load(CDS_ATOMIC::memory_order_acquire);
+                superblock_desc * pDesc = pPartial.load(atomics::memory_order_acquire);
                 do {
                     if ( !pDesc ) {
                         pDesc =  partialList.pop();
                         break;
                     }
-                } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+                } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) );
 
                 //assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
                 //assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
@@ -1150,7 +1150,7 @@ namespace michael {
                 //assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
 
                 superblock_desc * pCur = nullptr;
-                if ( !pPartial.compare_exchange_strong(pCur, pDesc, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed) )
+                if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed) )
                     partialList.push( pDesc );
             }
 
@@ -1186,7 +1186,7 @@ namespace michael {
         system_heap         m_LargeHeap          ;  ///< Heap for large block
         aligned_heap        m_AlignedHeap        ;  ///< Internal aligned heap
         sizeclass_selector  m_SizeClassSelector  ;  ///< Size-class selector
-        CDS_ATOMIC::atomic<processor_desc *> *   m_arrProcDesc  ;  ///< array of pointers to the processor descriptors
+        atomics::atomic<processor_desc *> *   m_arrProcDesc  ;  ///< array of pointers to the processor descriptors
         unsigned int        m_nProcessorCount    ;  ///< Processor count
         bound_checker       m_BoundChecker       ;  ///< Bound checker
 
@@ -1213,7 +1213,7 @@ namespace michael {
             // Reserve block
             while ( true ) {
                 ++nCollision;
-                oldActive = pProcHeap->active.load(CDS_ATOMIC::memory_order_acquire);
+                oldActive = pProcHeap->active.load(atomics::memory_order_acquire);
                 if ( !oldActive.ptr() )
                     return nullptr;
                 unsigned int nCredits = oldActive.credits();
@@ -1222,7 +1222,7 @@ namespace michael {
                     newActive = oldActive;
                     newActive.credits( nCredits - 1 );
                 }
-                if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, atomics::memory_order_release, atomics::memory_order_relaxed ))
                     break;
             }
 
@@ -1240,7 +1240,7 @@ namespace michael {
             nCollision = -1;
             do {
                 ++nCollision;
-                newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+                newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
 
                 assert( oldAnchor.avail < pDesc->nCapacity );
                 pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize;
@@ -1256,7 +1256,7 @@ namespace michael {
                         newAnchor.count -= nMoreCredits;
                     }
                 }
-            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
             if ( nCollision )
                 pProcHeap->stat.incActiveAnchorCASFailureCount( nCollision );
@@ -1297,7 +1297,7 @@ namespace michael {
             do {
                 ++nCollision;
 
-                newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+                newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
                 if ( oldAnchor.state == SBSTATE_EMPTY ) {
                     free_superblock( pDesc );
                     goto retry;
@@ -1307,7 +1307,7 @@ namespace michael {
                 newAnchor.count -= nMoreCredits + 1;
                 newAnchor.state = (nMoreCredits > 0) ? SBSTATE_ACTIVE : SBSTATE_FULL;
                 newAnchor.tag += 1;
-            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) );
+            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
 
             if ( nCollision )
                 pProcHeap->stat.incPartialDescCASFailureCount( nCollision );
@@ -1322,13 +1322,13 @@ namespace michael {
             do {
                 ++nCollision;
 
-                newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+                newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
 
                 assert( oldAnchor.avail < pDesc->nCapacity );
                 pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize;
                 newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
                 ++newAnchor.tag;
-            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) );
+            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
 
             if ( nCollision )
                 pProcHeap->stat.incPartialAnchorCASFailureCount( nCollision );
@@ -1356,7 +1356,7 @@ namespace michael {
             assert( pDesc != nullptr );
             pDesc->pSB = new_superblock_buffer( pProcHeap );
 
-            anchor_tag anchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_relaxed);
+            anchor_tag anchor = pDesc->anchor.load(atomics::memory_order_relaxed);
             anchor.tag += 1;
 
             // Make single-linked list of free blocks in superblock
@@ -1374,10 +1374,10 @@ namespace michael {
 
             anchor.count = pDesc->nCapacity - 1 - (newActive.credits() + 1);
             anchor.state = SBSTATE_ACTIVE;
-            pDesc->anchor.store(anchor, CDS_ATOMIC::memory_order_relaxed);
+            pDesc->anchor.store(anchor, atomics::memory_order_relaxed);
 
             active_tag curActive;
-            if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+            if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, atomics::memory_order_release, atomics::memory_order_relaxed )) {
                 pProcHeap->stat.incAllocFromNew();
                 //reinterpret_cast<block_header *>( pDesc->pSB )->set( pDesc, 0 );
                 return reinterpret_cast<block_header *>( pDesc->pSB );
@@ -1398,11 +1398,11 @@ namespace michael {
             if ( nProcessorId >= m_nProcessorCount )
                 nProcessorId = 0;
 
-            processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( CDS_ATOMIC::memory_order_relaxed );
+            processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( atomics::memory_order_relaxed );
             while ( !pDesc ) {
 
                 processor_desc * pNewDesc = new_processor_desc( nProcessorId );
-                if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) {
+                if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed ) ) {
                     pDesc = pNewDesc;
                     break;
                 }
@@ -1421,7 +1421,7 @@ namespace michael {
             active_tag  newActive;
             newActive.set( pDesc, nCredits - 1 );
 
-            if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
+            if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
                 return;
 
             // Someone installed another active superblock.
@@ -1431,10 +1431,10 @@ namespace michael {
             anchor_tag  newAnchor;
 
             do {
-                newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+                newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
                 newAnchor.count += nCredits;
                 newAnchor.state = SBSTATE_PARTIAL;
-            } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+            } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
             pDesc->pProcHeap->add_partial( pDesc );
         }
@@ -1509,13 +1509,13 @@ namespace michael {
                     m_AlignedHeap.free( pDesc );
                 }
 
-                superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed);
+                superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
                 if ( pPartial ) {
                     free( pPartial->pSB );
                     m_AlignedHeap.free( pPartial );
                 }
 
-                pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr();
+                pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
                 if ( pDesc ) {
                     free( pDesc->pSB );
                     m_AlignedHeap.free( pDesc );
@@ -1530,13 +1530,13 @@ namespace michael {
                     m_AlignedHeap.free( pDesc );
                 }
 
-                superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed);
+                superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
                 if ( pPartial ) {
                     pageHeap.free( pPartial->pSB );
                     m_AlignedHeap.free( pPartial );
                 }
 
-                pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr();
+                pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
                 if ( pDesc ) {
                     pageHeap.free( pDesc->pSB );
                     m_AlignedHeap.free( pDesc );
@@ -1575,9 +1575,9 @@ namespace michael {
                 pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc;
                 assert( (uptr_atomic_t(pDesc) & (c_nAlignment - 1)) == 0 );
 
-                anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed );
+                anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
                 anchor.tag = 0;
-                pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed );
+                pDesc->anchor.store( anchor, atomics::memory_order_relaxed );
 
                 pProcHeap->stat.incDescAllocCount();
             }
@@ -1586,9 +1586,9 @@ namespace michael {
             assert( pDesc->nCapacity <= c_nMaxBlockInSuperBlock );
             pDesc->pProcHeap = pProcHeap;
 
-            anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed );
+            anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
             anchor.avail = 1;
-            pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed );
+            pDesc->anchor.store( anchor, atomics::memory_order_relaxed );
 
             return pDesc;
         }
@@ -1663,7 +1663,7 @@ namespace michael {
 
             m_nProcessorCount = m_Topology.processor_count();
             m_arrProcDesc = new( m_AlignedHeap.alloc(sizeof(processor_desc *) * m_nProcessorCount, c_nAlignment ))
-                CDS_ATOMIC::atomic<processor_desc *>[ m_nProcessorCount ];
+                atomics::atomic<processor_desc *>[ m_nProcessorCount ];
             memset( m_arrProcDesc, 0, sizeof(processor_desc *) * m_nProcessorCount )    ;   // ?? memset for atomic<>
         }
 
@@ -1674,7 +1674,7 @@ namespace michael {
         ~Heap()
         {
             for ( unsigned int i = 0; i < m_nProcessorCount; ++i ) {
-                processor_desc * pDesc = m_arrProcDesc[i].load(CDS_ATOMIC::memory_order_relaxed);
+                processor_desc * pDesc = m_arrProcDesc[i].load(atomics::memory_order_relaxed);
                 if ( pDesc )
                     free_processor_desc( pDesc );
             }
@@ -1739,7 +1739,7 @@ namespace michael {
 
             pProcHeap->stat.incDeallocatedBytes( pDesc->nBlockSize );
 
-            oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+            oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
             do {
                 newAnchor = oldAnchor;
                 reinterpret_cast<free_block_header *>( pBlock )->nNextFree = oldAnchor.avail;
@@ -1758,7 +1758,7 @@ namespace michael {
                 }
                 else
                     newAnchor.count += 1;
-            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ) );
 
             pProcHeap->stat.incFreeCount();
 
@@ -1897,7 +1897,7 @@ namespace michael {
         {
             size_t nProcHeapCount = m_SizeClassSelector.size();
             for ( unsigned int nProcessor = 0; nProcessor < m_nProcessorCount; ++nProcessor ) {
-                processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(CDS_ATOMIC::memory_order_relaxed);
+                processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(atomics::memory_order_relaxed);
                 if ( pProcDesc ) {
                     for ( unsigned int i = 0; i < nProcHeapCount; ++i ) {
                         processor_heap_base * pProcHeap = pProcDesc->arrProcHeap + i;
index 4d04dfb0bc2edc9276fb8d3d42662eb0b8a91531..52d861ec31c3dfdbaddee32b9d69604093abe5f4 100644 (file)
@@ -11,10 +11,10 @@ namespace cds { namespace memory { namespace michael {
     struct os_allocated_atomic
     {
         ///@cond
-        CDS_ATOMIC::atomic<size_t>              nAllocCount         ;   ///< Event count of large block allocation from %OS
-        CDS_ATOMIC::atomic<size_t>              nFreeCount          ;   ///< Event count of large block deallocation to %OS
-        CDS_ATOMIC::atomic<unsigned long long>  nBytesAllocated     ;   ///< Total size of allocated large blocks, in bytes
-        CDS_ATOMIC::atomic<unsigned long long>  nBytesDeallocated   ;   ///< Total size of deallocated large blocks, in bytes
+        atomics::atomic<size_t>              nAllocCount         ;   ///< Event count of large block allocation from %OS
+        atomics::atomic<size_t>              nFreeCount          ;   ///< Event count of large block deallocation to %OS
+        atomics::atomic<unsigned long long>  nBytesAllocated     ;   ///< Total size of allocated large blocks, in bytes
+        atomics::atomic<unsigned long long>  nBytesDeallocated   ;   ///< Total size of deallocated large blocks, in bytes
 
         os_allocated_atomic()
             : nAllocCount(0)
@@ -27,39 +27,39 @@ namespace cds { namespace memory { namespace michael {
         /// Adds \p nSize to nBytesAllocated counter
         void incBytesAllocated( size_t nSize )
         {
-            nAllocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed);
-            nBytesAllocated.fetch_add( nSize, CDS_ATOMIC::memory_order_relaxed );
+            nAllocCount.fetch_add( 1, atomics::memory_order_relaxed);
+            nBytesAllocated.fetch_add( nSize, atomics::memory_order_relaxed );
         }
 
         /// Adds \p nSize to nBytesDeallocated counter
         void incBytesDeallocated( size_t nSize )
         {
-            nFreeCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
-            nBytesDeallocated.fetch_add( nSize, CDS_ATOMIC::memory_order_relaxed );
+            nFreeCount.fetch_add( 1, atomics::memory_order_relaxed );
+            nBytesDeallocated.fetch_add( nSize, atomics::memory_order_relaxed );
         }
 
         /// Returns count of \p alloc and \p alloc_aligned function call (for large block allocated directly from %OS)
         size_t allocCount() const
         {
-            return nAllocCount.load(CDS_ATOMIC::memory_order_relaxed);
+            return nAllocCount.load(atomics::memory_order_relaxed);
         }
 
         /// Returns count of \p free and \p free_aligned function call (for large block allocated directly from %OS)
         size_t freeCount() const
         {
-            return nFreeCount.load(CDS_ATOMIC::memory_order_relaxed);
+            return nFreeCount.load(atomics::memory_order_relaxed);
         }
 
         /// Returns current value of nBytesAllocated counter
         atomic64u_t allocatedBytes() const
         {
-            return nBytesAllocated.load(CDS_ATOMIC::memory_order_relaxed);
+            return nBytesAllocated.load(atomics::memory_order_relaxed);
         }
 
         /// Returns current value of nBytesAllocated counter
         atomic64u_t deallocatedBytes() const
         {
-            return nBytesDeallocated.load(CDS_ATOMIC::memory_order_relaxed);
+            return nBytesDeallocated.load(atomics::memory_order_relaxed);
         }
     };
 
index fa2466c54b698b11629cd6841806b02e6e268a4a..1200a8927d08a1d100165fe15513045f71947501 100644 (file)
@@ -19,21 +19,21 @@ namespace cds { namespace memory { namespace michael {
     class procheap_atomic_stat
     {
         //@cond
-        CDS_ATOMIC::atomic<size_t>      nAllocFromActive    ;  ///< Event count of allocation from active superblock
-        CDS_ATOMIC::atomic<size_t>      nAllocFromPartial   ;  ///< Event count of allocation from partial superblock
-        CDS_ATOMIC::atomic<size_t>      nAllocFromNew       ;  ///< Event count of allocation from new superblock
-        CDS_ATOMIC::atomic<size_t>      nFreeCount          ;  ///< \ref free function call count
-        CDS_ATOMIC::atomic<size_t>      nBlockCount         ;  ///< Count of superblock allocated
-        CDS_ATOMIC::atomic<size_t>      nBlockDeallocCount  ;  ///< Count of superblock deallocated
-        CDS_ATOMIC::atomic<size_t>      nDescAllocCount     ;  ///< Count of superblock descriptors
-        CDS_ATOMIC::atomic<size_t>      nDescFull           ;  ///< Count of full superblock
-        CDS_ATOMIC::atomic<unsigned long long> nBytesAllocated     ;  ///< Count of allocated bytes
-        CDS_ATOMIC::atomic<unsigned long long> nBytesDeallocated   ;  ///< Count of deallocated bytes
-
-        CDS_ATOMIC::atomic<size_t>      nActiveDescCASFailureCount ;   ///< CAS failure counter for active block of \p alloc_from_active Heap function
-        CDS_ATOMIC::atomic<size_t>      nActiveAnchorCASFailureCount;   ///< CAS failure counter for active block of \p alloc_from_active Heap function
-        CDS_ATOMIC::atomic<size_t>      nPartialDescCASFailureCount ;   ///< CAS failure counter for partial block of \p alloc_from_partial Heap function
-        CDS_ATOMIC::atomic<size_t>      nPartialAnchorCASFailureCount;   ///< CAS failure counter for partial block of \p alloc_from_partial Heap function
+        atomics::atomic<size_t>      nAllocFromActive    ;  ///< Event count of allocation from active superblock
+        atomics::atomic<size_t>      nAllocFromPartial   ;  ///< Event count of allocation from partial superblock
+        atomics::atomic<size_t>      nAllocFromNew       ;  ///< Event count of allocation from new superblock
+        atomics::atomic<size_t>      nFreeCount          ;  ///< \ref free function call count
+        atomics::atomic<size_t>      nBlockCount         ;  ///< Count of superblock allocated
+        atomics::atomic<size_t>      nBlockDeallocCount  ;  ///< Count of superblock deallocated
+        atomics::atomic<size_t>      nDescAllocCount     ;  ///< Count of superblock descriptors
+        atomics::atomic<size_t>      nDescFull           ;  ///< Count of full superblock
+        atomics::atomic<unsigned long long> nBytesAllocated     ;  ///< Count of allocated bytes
+        atomics::atomic<unsigned long long> nBytesDeallocated   ;  ///< Count of deallocated bytes
+
+        atomics::atomic<size_t>      nActiveDescCASFailureCount ;   ///< CAS failure counter for active block of \p alloc_from_active Heap function
+        atomics::atomic<size_t>      nActiveAnchorCASFailureCount;   ///< CAS failure counter for active block of \p alloc_from_active Heap function
+        atomics::atomic<size_t>      nPartialDescCASFailureCount ;   ///< CAS failure counter for partial block of \p alloc_from_partial Heap function
+        atomics::atomic<size_t>      nPartialAnchorCASFailureCount;   ///< CAS failure counter for partial block of \p alloc_from_partial Heap function
 
         //@endcond
 
@@ -59,134 +59,134 @@ namespace cds { namespace memory { namespace michael {
         /// Increment event counter of allocation from active superblock
         void incAllocFromActive()
         {
-            nAllocFromActive.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+            nAllocFromActive.fetch_add( 1, atomics::memory_order_relaxed );
         }
         /// Increment event counter of allocation from active superblock by \p n
         void incAllocFromActive( size_t n )
         {
-            nAllocFromActive.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+            nAllocFromActive.fetch_add( n, atomics::memory_order_relaxed );
         }
 
         /// Increment event counter of allocation from partial superblock
         void incAllocFromPartial()
         {
-            nAllocFromPartial.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+            nAllocFromPartial.fetch_add( 1, atomics::memory_order_relaxed );
         }
         /// Increment event counter of allocation from partial superblock by \p n
         void incAllocFromPartial( size_t n )
         {
-            nAllocFromPartial.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+            nAllocFromPartial.fetch_add( n, atomics::memory_order_relaxed );
         }
 
         /// Increment event count of allocation from new superblock
         void incAllocFromNew()
         {
-            nAllocFromNew.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+            nAllocFromNew.fetch_add( 1, atomics::memory_order_relaxed );
         }
         /// Increment event count of allocation from new superblock by \p n
         void incAllocFromNew( size_t n )
         {
-            nAllocFromNew.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+            nAllocFromNew.fetch_add( n, atomics::memory_order_relaxed );
         }
 
         /// Increment event counter of free calling
         void incFreeCount()
         {
-            nFreeCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+            nFreeCount.fetch_add( 1, atomics::memory_order_relaxed );
         }
         /// Increment event counter of free calling by \p n
         void incFreeCount( size_t n )
         {
-            nFreeCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+            nFreeCount.fetch_add( n, atomics::memory_order_relaxed );
         }
 
         /// Increment counter of superblock allocated
         void incBlockAllocated()
         {
-            nBlockCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+            nBlockCount.fetch_add( 1, atomics::memory_order_relaxed );
         }
         /// Increment counter of superblock allocated by \p n
         void incBlockAllocated( size_t n )
         {
-            nBlockCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+            nBlockCount.fetch_add( n, atomics::memory_order_relaxed );
         }
 
         /// Increment counter of superblock deallocated
         void incBlockDeallocated()
         {
-            nBlockDeallocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+            nBlockDeallocCount.fetch_add( 1, atomics::memory_order_relaxed );
         }
         /// Increment counter of superblock deallocated by \p n
         void incBlockDeallocated( size_t n )
         {
-            nBlockDeallocCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+            nBlockDeallocCount.fetch_add( n, atomics::memory_order_relaxed );
         }
 
         /// Increment counter of superblock descriptor allocated
         void incDescAllocCount()
         {
-            nDescAllocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+            nDescAllocCount.fetch_add( 1, atomics::memory_order_relaxed );
         }
         /// Increment counter of superblock descriptor allocated by \p n
         void incDescAllocCount( size_t n )
         {
-            nDescAllocCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+            nDescAllocCount.fetch_add( n, atomics::memory_order_relaxed );
         }
 
         /// Increment counter of full superblock descriptor
         void incDescFull()
         {
-            nDescFull.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+            nDescFull.fetch_add( 1, atomics::memory_order_relaxed );
         }
         /// Increment counter of full superblock descriptor by \p n
         void incDescFull( size_t n )
         {
-            nDescFull.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+            nDescFull.fetch_add( n, atomics::memory_order_relaxed );
         }
 
         /// Decrement counter of full superblock descriptor
         void decDescFull()
         {
-            nDescFull.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed );
+            nDescFull.fetch_sub( 1, atomics::memory_order_relaxed );
         }
         /// Decrement counter of full superblock descriptor by \p n
         void decDescFull(size_t n)
         {
-            nDescFull.fetch_sub( n, CDS_ATOMIC::memory_order_relaxed );
+            nDescFull.fetch_sub( n, atomics::memory_order_relaxed );
         }
         /// Add \p nBytes to allocated bytes counter
         void incAllocatedBytes( size_t nBytes )
         {
-            nBytesAllocated.fetch_add( nBytes, CDS_ATOMIC::memory_order_relaxed );
+            nBytesAllocated.fetch_add( nBytes, atomics::memory_order_relaxed );
         }
         /// Add \p nBytes to deallocated bytes counter
         void incDeallocatedBytes( size_t nBytes )
         {
-            nBytesDeallocated.fetch_add( nBytes, CDS_ATOMIC::memory_order_relaxed);
+            nBytesDeallocated.fetch_add( nBytes, atomics::memory_order_relaxed);
         }
 
         /// Add \p nCount to CAS failure counter of updating \p active field of active descriptor for \p alloc_from_active internal Heap function
         void incActiveDescCASFailureCount( int nCount )
         {
-            nActiveDescCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed );
+            nActiveDescCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed );
         }
 
         /// Add \p nCount to CAS failure counter of updating \p anchor field of active descriptor for \p alloc_from_active internal Heap function
         void incActiveAnchorCASFailureCount( int nCount )
         {
-            nActiveAnchorCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed );
+            nActiveAnchorCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed );
         }
 
         /// Add \p nCount to CAS failure counter of updating \p active field of partial descriptor for \p alloc_from_partial internal Heap function
         void incPartialDescCASFailureCount( int nCount )
         {
-            nPartialDescCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed );
+            nPartialDescCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed );
         }
 
         /// Add \p nCount to CAS failure counter of updating \p anchor field of partial descriptor for \p alloc_from_partial internal Heap function
         void incPartialAnchorCASFailureCount( int nCount )
         {
-            nPartialAnchorCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed );
+            nPartialAnchorCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed );
         }
 
         // -----------------------------------------------------------------
@@ -195,49 +195,49 @@ namespace cds { namespace memory { namespace michael {
         /// Read event counter of allocation from active superblock
         size_t allocFromActive() const
         {
-            return nAllocFromActive.load(CDS_ATOMIC::memory_order_relaxed);
+            return nAllocFromActive.load(atomics::memory_order_relaxed);
         }
 
         /// Read event counter of allocation from partial superblock
         size_t allocFromPartial() const
         {
-            return nAllocFromPartial.load(CDS_ATOMIC::memory_order_relaxed);
+            return nAllocFromPartial.load(atomics::memory_order_relaxed);
         }
 
         /// Read event count of allocation from new superblock
         size_t allocFromNew() const
         {
-            return nAllocFromNew.load(CDS_ATOMIC::memory_order_relaxed);
+            return nAllocFromNew.load(atomics::memory_order_relaxed);
         }
 
         /// Read event counter of free calling
         size_t freeCount() const
         {
-            return nFreeCount.load(CDS_ATOMIC::memory_order_relaxed);
+            return nFreeCount.load(atomics::memory_order_relaxed);
         }
 
         /// Read counter of superblock allocated
         size_t blockAllocated() const
         {
-            return nBlockCount.load(CDS_ATOMIC::memory_order_relaxed);
+            return nBlockCount.load(atomics::memory_order_relaxed);
         }
 
         /// Read counter of superblock deallocated
         size_t blockDeallocated() const
         {
-            return nBlockDeallocCount.load(CDS_ATOMIC::memory_order_relaxed);
+            return nBlockDeallocCount.load(atomics::memory_order_relaxed);
         }
 
         /// Read counter of superblock descriptor allocated
         size_t descAllocCount() const
         {
-            return nDescAllocCount.load(CDS_ATOMIC::memory_order_relaxed);
+            return nDescAllocCount.load(atomics::memory_order_relaxed);
         }
 
         /// Read counter of full superblock descriptor
         size_t descFull() const
         {
-            return nDescFull.load(CDS_ATOMIC::memory_order_relaxed);
+            return nDescFull.load(atomics::memory_order_relaxed);
         }
 
         /// Get counter of allocated bytes
@@ -249,7 +249,7 @@ namespace cds { namespace memory { namespace michael {
         */
         atomic64u_t allocatedBytes() const
         {
-            return nBytesAllocated.load(CDS_ATOMIC::memory_order_relaxed);
+            return nBytesAllocated.load(atomics::memory_order_relaxed);
         }
 
         /// Get counter of deallocated bytes
@@ -260,31 +260,31 @@ namespace cds { namespace memory { namespace michael {
         */
         atomic64u_t deallocatedBytes() const
         {
-            return nBytesDeallocated.load(CDS_ATOMIC::memory_order_relaxed);
+            return nBytesDeallocated.load(atomics::memory_order_relaxed);
         }
 
         /// Get CAS failure counter of updating \p active field of active descriptor for \p alloc_from_active internal Heap function
         size_t activeDescCASFailureCount() const
         {
-            return nActiveDescCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed);
+            return nActiveDescCASFailureCount.load(atomics::memory_order_relaxed);
         }
 
         /// Get CAS failure counter of updating \p anchor field of active descriptor for \p alloc_from_active internal Heap function
         size_t activeAnchorCASFailureCount() const
         {
-            return nActiveAnchorCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed);
+            return nActiveAnchorCASFailureCount.load(atomics::memory_order_relaxed);
         }
 
         /// Get CAS failure counter of updating \p active field of partial descriptor for \p alloc_from_active internal Heap function
         size_t partialDescCASFailureCount() const
         {
-            return nPartialDescCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed);
+            return nPartialDescCASFailureCount.load(atomics::memory_order_relaxed);
         }
 
         /// Get CAS failure counter of updating \p anchor field of partial descriptor for \p alloc_from_active internal Heap function
         size_t partialAnchorCASFailureCount() const
         {
-            return nPartialAnchorCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed);
+            return nPartialAnchorCASFailureCount.load(atomics::memory_order_relaxed);
         }
     };
 
index 184a241f742c2f06ba821ef7bf13bd665663ff72..9b34428adcc708f990dc16a4ed53c78ab6573a69 100644 (file)
@@ -450,12 +450,12 @@ namespace opt {
             //@cond
 
             // For new C++11 (cds-1.1.0)
-            static const CDS_ATOMIC::memory_order memory_order_relaxed    = CDS_ATOMIC::memory_order_relaxed;
-            static const CDS_ATOMIC::memory_order memory_order_consume    = CDS_ATOMIC::memory_order_consume;
-            static const CDS_ATOMIC::memory_order memory_order_acquire    = CDS_ATOMIC::memory_order_acquire;
-            static const CDS_ATOMIC::memory_order memory_order_release    = CDS_ATOMIC::memory_order_release;
-            static const CDS_ATOMIC::memory_order memory_order_acq_rel    = CDS_ATOMIC::memory_order_acq_rel;
-            static const CDS_ATOMIC::memory_order memory_order_seq_cst    = CDS_ATOMIC::memory_order_seq_cst;
+            static const atomics::memory_order memory_order_relaxed    = atomics::memory_order_relaxed;
+            static const atomics::memory_order memory_order_consume    = atomics::memory_order_consume;
+            static const atomics::memory_order memory_order_acquire    = atomics::memory_order_acquire;
+            static const atomics::memory_order memory_order_release    = atomics::memory_order_release;
+            static const atomics::memory_order memory_order_acq_rel    = atomics::memory_order_acq_rel;
+            static const atomics::memory_order memory_order_seq_cst    = atomics::memory_order_seq_cst;
             //@endcond
         };
 
@@ -469,12 +469,12 @@ namespace opt {
             //@cond
 
             // For new C++11 (cds-1.1.0)
-            static const CDS_ATOMIC::memory_order memory_order_relaxed    = CDS_ATOMIC::memory_order_seq_cst;
-            static const CDS_ATOMIC::memory_order memory_order_consume    = CDS_ATOMIC::memory_order_seq_cst;
-            static const CDS_ATOMIC::memory_order memory_order_acquire    = CDS_ATOMIC::memory_order_seq_cst;
-            static const CDS_ATOMIC::memory_order memory_order_release    = CDS_ATOMIC::memory_order_seq_cst;
-            static const CDS_ATOMIC::memory_order memory_order_acq_rel    = CDS_ATOMIC::memory_order_seq_cst;
-            static const CDS_ATOMIC::memory_order memory_order_seq_cst    = CDS_ATOMIC::memory_order_seq_cst;
+            static const atomics::memory_order memory_order_relaxed    = atomics::memory_order_seq_cst;
+            static const atomics::memory_order memory_order_consume    = atomics::memory_order_seq_cst;
+            static const atomics::memory_order memory_order_acquire    = atomics::memory_order_seq_cst;
+            static const atomics::memory_order memory_order_release    = atomics::memory_order_seq_cst;
+            static const atomics::memory_order memory_order_acq_rel    = atomics::memory_order_seq_cst;
+            static const atomics::memory_order memory_order_seq_cst    = atomics::memory_order_seq_cst;
             //@endcond
         };
     } // namespace v
index c7798d23b8db29b21e33065c08b8ebe3b7563e70..3ccdfce27b249b789ba18a2da50b5e3c7142ddf4 100644 (file)
@@ -21,7 +21,7 @@ namespace cds {
     template <typename T>
     class ref_counter
     {
-        CDS_ATOMIC::atomic<T>   m_nRefCount    ;        ///< The reference counter
+        atomics::atomic<T>   m_nRefCount    ;        ///< The reference counter
 
     public:
         typedef T   ref_counter_type  ; ///< The reference counter type
@@ -35,7 +35,7 @@ namespace cds {
         /// Get current value of reference counter.
         T   value() const CDS_NOEXCEPT
         {
-            return m_nRefCount.load( CDS_ATOMIC::memory_order_relaxed );
+            return m_nRefCount.load( atomics::memory_order_relaxed );
         }
 
         /// Current value of reference counter
@@ -47,14 +47,14 @@ namespace cds {
         /// Atomic increment
         void    inc() CDS_NOEXCEPT
         {
-            m_nRefCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+            m_nRefCount.fetch_add( 1, atomics::memory_order_relaxed );
         }
 
         /// Atomic decrement. Return \p true if reference counter is 0, otherwise \p false
         bool    dec() CDS_NOEXCEPT
         {
-            if ( m_nRefCount.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ) == 1 ) {
-                CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+            if ( m_nRefCount.fetch_sub( 1, atomics::memory_order_relaxed ) == 1 ) {
+                atomics::atomic_thread_fence( atomics::memory_order_release );
                 return true;
             }
             return false;
index 8c295d85dde0ea606be65c485b88e5238c36dd75..1d60b66e865c28cfd08d8abbdd481b2da25b9f7e 100644 (file)
@@ -141,7 +141,7 @@ namespace cds {
             cds::algo::elimination::record   m_EliminationRec;
 
             //@cond
-            static CDS_EXPORT_API CDS_ATOMIC::atomic<size_t> s_nLastUsedProcNo;
+            static CDS_EXPORT_API atomics::atomic<size_t> s_nLastUsedProcNo;
             static CDS_EXPORT_API size_t                     s_nProcCount;
             //@endcond
 
@@ -154,7 +154,7 @@ namespace cds {
                 , m_pSHBRCU( nullptr )
                 , m_pSHTRCU( nullptr )
 #endif
-                , m_nFakeProcessorNumber( s_nLastUsedProcNo.fetch_add(1, CDS_ATOMIC::memory_order_relaxed) % s_nProcCount )
+                , m_nFakeProcessorNumber( s_nLastUsedProcNo.fetch_add(1, atomics::memory_order_relaxed) % s_nProcCount )
                 , m_nAttachCount(0)
             {
                 if (cds::gc::HP::isUsed() )
index 0ba1359c7282323ba68c2aa525c639086818b253..5443acb03a5a7cdc4a41ec990b33c58193cba96e 100644 (file)
@@ -287,12 +287,12 @@ namespace cds {
             {
             public:
 #       ifdef CDS_CXX11_TEMPLATE_ALIAS_SUPPORT
-                template <typename MarkedPtr> using atomic_marked_ptr = CDS_ATOMIC::atomic<MarkedPtr>;
+                template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
 #       else
                 template <typename MarkedPtr>
-                class atomic_marked_ptr: public CDS_ATOMIC::atomic<MarkedPtr>
+                class atomic_marked_ptr: public atomics::atomic<MarkedPtr>
                 {
-                    typedef CDS_ATOMIC::atomic<MarkedPtr> base_class;
+                    typedef atomics::atomic<MarkedPtr> base_class;
                 public:
 #           ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
                     atomic_marked_ptr() CDS_NOEXCEPT_DEFAULTED_( noexcept(base_class()) ) = default;
@@ -316,7 +316,7 @@ namespace cds {
             template <typename ThreadData>
             struct thread_list_record {
                 ThreadData *    m_pNext ;  ///< Next item in thread list
-                CDS_ATOMIC::atomic<OS::ThreadId>    m_idOwner   ; ///< Owner thread id; 0 - the record is free (not owned)
+                atomics::atomic<OS::ThreadId>    m_idOwner   ; ///< Owner thread id; 0 - the record is free (not owned)
 
                 thread_list_record()
                     : m_pNext( nullptr )
@@ -336,7 +336,7 @@ namespace cds {
                 typedef cds::details::Allocator< thread_record, Alloc >   allocator_type;
 
             private:
-                CDS_ATOMIC::atomic<thread_record *>   m_pHead;
+                atomics::atomic<thread_record *>   m_pHead;
 
             public:
                 thread_list()
@@ -355,9 +355,9 @@ namespace cds {
                     cds::OS::ThreadId const curThreadId  = cds::OS::getCurrentThreadId();
 
                     // First try to reuse a retired (non-active) HP record
-                    for ( pRec = m_pHead.load( CDS_ATOMIC::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) {
+                    for ( pRec = m_pHead.load( atomics::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) {
                         cds::OS::ThreadId thId = nullThreadId;
-                        if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
+                        if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
                             continue;
                         return pRec;
                     }
@@ -365,14 +365,14 @@ namespace cds {
                     // No records available for reuse
                     // Allocate and push a new record
                     pRec = allocator_type().New();
-                    pRec->m_list.m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed );
+                    pRec->m_list.m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
 
-                    CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+                    atomics::atomic_thread_fence( atomics::memory_order_release );
 
-                    thread_record * pOldHead = m_pHead.load( CDS_ATOMIC::memory_order_acquire );
+                    thread_record * pOldHead = m_pHead.load( atomics::memory_order_acquire );
                     do {
                         pRec->m_list.m_pNext = pOldHead;
-                    } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+                    } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
                     return pRec;
                 }
@@ -380,7 +380,7 @@ namespace cds {
                 void retire( thread_record * pRec )
                 {
                     assert( pRec != nullptr );
-                    pRec->m_list.m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release );
+                    pRec->m_list.m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
                 }
 
                 void detach_all()
@@ -388,15 +388,15 @@ namespace cds {
                     thread_record * pNext = nullptr;
                     cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId;
 
-                    for ( thread_record * pRec = m_pHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pNext ) {
+                    for ( thread_record * pRec = m_pHead.load(atomics::memory_order_acquire); pRec; pRec = pNext ) {
                         pNext = pRec->m_list.m_pNext;
-                        if ( pRec->m_list.m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) {
+                        if ( pRec->m_list.m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
                             retire( pRec );
                         }
                     }
                 }
 
-                thread_record * head( CDS_ATOMIC::memory_order mo ) const
+                thread_record * head( atomics::memory_order mo ) const
                 {
                     return m_pHead.load( mo );
                 }
@@ -408,13 +408,13 @@ namespace cds {
                     CDS_DEBUG_DO( cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId; )
                     CDS_DEBUG_DO( cds::OS::ThreadId const mainThreadId = cds::OS::getCurrentThreadId() ;)
 
-                    thread_record * p = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_seq_cst );
+                    thread_record * p = m_pHead.exchange( nullptr, atomics::memory_order_seq_cst );
                     while ( p ) {
                         thread_record * pNext = p->m_list.m_pNext;
 
-                        assert( p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == nullThreadId
-                            || p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == mainThreadId
-                            || !cds::OS::isThreadAlive( p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) )
+                        assert( p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
+                            || p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
+                            || !cds::OS::isThreadAlive( p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) )
                             );
 
                         al.Delete( p );
index 181e22ac2d2b7a432e6ad31e6660e57e24a2cc77..26c8ab8c553d5e449adae84883eff90d4ae1bd8b 100644 (file)
@@ -37,15 +37,15 @@ namespace cds { namespace urcu { namespace details {
         thread_record * pRec = get_thread_record();
         assert( pRec != nullptr );
 
-        uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
+        uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
         if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
-            pRec->m_nAccessControl.store( gp_singleton<RCUtag>::instance()->global_control_word(CDS_ATOMIC::memory_order_relaxed),
-                CDS_ATOMIC::memory_order_relaxed );
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+            pRec->m_nAccessControl.store( gp_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_relaxed),
+                atomics::memory_order_relaxed );
+            atomics::atomic_thread_fence( atomics::memory_order_acquire );
             //CDS_COMPILER_RW_BARRIER;
         }
         else {
-            pRec->m_nAccessControl.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+            pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_relaxed );
         }
     }
 
@@ -56,7 +56,7 @@ namespace cds { namespace urcu { namespace details {
         assert( pRec != nullptr );
 
         //CDS_COMPILER_RW_BARRIER;
-        pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+        pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
     }
 
     template <typename RCUtag>
@@ -65,7 +65,7 @@ namespace cds { namespace urcu { namespace details {
         thread_record * pRec = get_thread_record();
         assert( pRec != nullptr );
 
-        return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
+        return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
     }
 
 
@@ -73,9 +73,9 @@ namespace cds { namespace urcu { namespace details {
     template <typename RCUtag>
     inline bool gp_singleton<RCUtag>::check_grace_period( typename gp_singleton<RCUtag>::thread_record * pRec ) const
     {
-        uint32_t const v = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
+        uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
         return (v & general_purpose_rcu::c_nNestMask)
-            && ((( v ^ m_nGlobalControl.load( CDS_ATOMIC::memory_order_relaxed )) & ~general_purpose_rcu::c_nNestMask ));
+            && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~general_purpose_rcu::c_nNestMask ));
     }
 
     template <typename RCUtag>
@@ -83,10 +83,10 @@ namespace cds { namespace urcu { namespace details {
     inline void gp_singleton<RCUtag>::flip_and_wait( Backoff& bkoff )
     {
         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
-        m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, CDS_ATOMIC::memory_order_seq_cst );
+        m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, atomics::memory_order_seq_cst );
 
-        for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
-            while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ) ) {
+        for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
+            while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ) ) {
                 bkoff();
                 CDS_COMPILER_RW_BARRIER;
             }
index f764e727b38951781f52b3a2326dc8114d2904a7..1c8705c9cad842bbb25b3330e7db48ea60784ce8 100644 (file)
@@ -15,7 +15,7 @@ namespace cds { namespace urcu { namespace details {
     // that is not so efficiently
 #   define CDS_GPURCU_DECLARE_THREAD_DATA(tag_) \
     template <> struct thread_data<tag_> { \
-        CDS_ATOMIC::atomic<uint32_t>        m_nAccessControl ; \
+        atomics::atomic<uint32_t>        m_nAccessControl ; \
         thread_list_record< thread_data >   m_list ; \
         thread_data(): m_nAccessControl(0) {} \
         ~thread_data() {} \
@@ -101,7 +101,7 @@ namespace cds { namespace urcu { namespace details {
         typedef gp_singleton_instance< rcu_tag >    rcu_instance;
 
     protected:
-        CDS_ATOMIC::atomic<uint32_t>    m_nGlobalControl;
+        atomics::atomic<uint32_t>    m_nGlobalControl;
         thread_list< rcu_tag >          m_ThreadList;
 
     protected:
@@ -137,7 +137,7 @@ namespace cds { namespace urcu { namespace details {
             m_ThreadList.retire( pRec );
         }
 
-        uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) const
+        uint32_t global_control_word( atomics::memory_order mo ) const
         {
             return m_nGlobalControl.load( mo );
         }
@@ -163,7 +163,7 @@ namespace cds { namespace urcu { namespace details {
         static rcu_singleton * instance() { assert( rcu_instance::s_pRCU ); return static_cast<rcu_singleton *>( rcu_instance::s_pRCU ); } \
         static thread_record * attach_thread() { return instance()->attach_thread() ; } \
         static void detach_thread( thread_record * pRec ) { return instance()->detach_thread( pRec ) ; } \
-        static uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) { return instance()->global_control_word( mo ) ; } \
+        static uint32_t global_control_word( atomics::memory_order mo ) { return instance()->global_control_word( mo ) ; } \
     }
 
     CDS_GP_RCU_DECLARE_SINGLETON( general_instant_tag  );
index e75ada611616583658509f02dca9869f26c37695..c822268df02cd9db3eecbd3df9f08656d32b7b2c 100644 (file)
@@ -70,7 +70,7 @@ namespace cds { namespace urcu {
     protected:
         //@cond
         buffer_type                     m_Buffer;
-        CDS_ATOMIC::atomic<uint64_t>    m_nCurEpoch;
+        atomics::atomic<uint64_t>    m_nCurEpoch;
         lock_type                       m_Lock;
         size_t const                    m_nCapacity;
         //@endcond
@@ -166,7 +166,7 @@ namespace cds { namespace urcu {
         virtual void retire_ptr( retired_ptr& p )
         {
             if ( p.m_p ) {
-                epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
+                epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed ));
                 push_buffer( ep );
             }
         }
@@ -175,7 +175,7 @@ namespace cds { namespace urcu {
         template <typename ForwardIterator>
         void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
         {
-            uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+            uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
             while ( itFirst != itLast ) {
                 epoch_retired_ptr ep( *itFirst, nEpoch );
                 ++itFirst;
@@ -186,7 +186,7 @@ namespace cds { namespace urcu {
         /// Wait to finish a grace period and then clear the buffer
         void synchronize()
         {
-            epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
+            epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
             synchronize( ep );
         }
 
@@ -194,17 +194,17 @@ namespace cds { namespace urcu {
         bool synchronize( epoch_retired_ptr& ep )
         {
             uint64_t nEpoch;
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+            atomics::atomic_thread_fence( atomics::memory_order_acquire );
             {
                 cds::lock::scoped_lock<lock_type> sl( m_Lock );
                 if ( ep.m_p && m_Buffer.push( ep ) )
                     return false;
-                nEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+                nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
                 flip_and_wait();
                 flip_and_wait();
             }
             clear_buffer( nEpoch );
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+            atomics::atomic_thread_fence( atomics::memory_order_release );
             return true;
         }
         //@endcond
index 0f0de150b8876c5b65925c2100c923e363b31af5..f4eee83eea2256619b77a0a802ba60cd96ad833b 100644 (file)
@@ -134,13 +134,13 @@ namespace cds { namespace urcu {
         /// Waits to finish a grace period
         void synchronize()
         {
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+            atomics::atomic_thread_fence( atomics::memory_order_acquire );
             {
                 cds::lock::scoped_lock<lock_type> sl( m_Lock );
                 flip_and_wait();
                 flip_and_wait();
             }
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+            atomics::atomic_thread_fence( atomics::memory_order_release );
         }
 
         //@cond
index 2fb8e114198bafc0ecad420ffc50117b1a5a0ac4..31e03c212ef6a3a0095b6081be8feb35bbe0526f 100644 (file)
@@ -77,7 +77,7 @@ namespace cds { namespace urcu {
     protected:
         //@cond
         buffer_type                     m_Buffer;
-        CDS_ATOMIC::atomic<uint64_t>    m_nCurEpoch;
+        atomics::atomic<uint64_t>    m_nCurEpoch;
         lock_type                       m_Lock;
         size_t const                    m_nCapacity;
         disposer_thread                 m_DisposerThread;
@@ -152,7 +152,7 @@ namespace cds { namespace urcu {
                 if ( bDetachAll )
                     pThis->m_ThreadList.detach_all();
 
-                pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ));
+                pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( atomics::memory_order_acquire ));
 
                 delete pThis;
                 singleton_ptr::s_pRCU = nullptr;
@@ -170,7 +170,7 @@ namespace cds { namespace urcu {
         virtual void retire_ptr( retired_ptr& p )
         {
             if ( p.m_p ) {
-                epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ) );
+                epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_acquire ) );
                 push_buffer( ep );
             }
         }
@@ -179,7 +179,7 @@ namespace cds { namespace urcu {
         template <typename ForwardIterator>
         void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
         {
-            uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+            uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
             while ( itFirst != itLast ) {
                 epoch_retired_ptr p( *itFirst, nEpoch );
                 ++itFirst;
@@ -196,9 +196,9 @@ namespace cds { namespace urcu {
         //@cond
         void synchronize( bool bSync )
         {
-            uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_release );
+            uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_release );
 
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+            atomics::atomic_thread_fence( atomics::memory_order_acquire );
             {
                 cds::lock::scoped_lock<lock_type> sl( m_Lock );
                 flip_and_wait();
@@ -206,7 +206,7 @@ namespace cds { namespace urcu {
 
                 m_DisposerThread.dispose( m_Buffer, nPrevEpoch, bSync );
             }
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+            atomics::atomic_thread_fence( atomics::memory_order_release );
         }
         void force_dispose()
         {
index 6ef1fada12e9f6de3a1ca98dc27f038090db074d..e4d0aed2f7a07ce4a8b3a24dc12396bfe2f247d5 100644 (file)
@@ -40,15 +40,15 @@ namespace cds { namespace urcu { namespace details {
         thread_record * pRec = get_thread_record();
         assert( pRec != nullptr );
 
-        uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
+        uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
         if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
             pRec->m_nAccessControl.store(
-                sh_singleton<RCUtag>::instance()->global_control_word(CDS_ATOMIC::memory_order_acquire),
-                CDS_ATOMIC::memory_order_release
+                sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_acquire),
+                atomics::memory_order_release
             );
         }
         else {
-            pRec->m_nAccessControl.fetch_add( 1, CDS_ATOMIC::memory_order_release );
+            pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_release );
         }
         CDS_COMPILER_RW_BARRIER;
     }
@@ -60,7 +60,7 @@ namespace cds { namespace urcu { namespace details {
         assert( pRec != nullptr);
 
         CDS_COMPILER_RW_BARRIER;
-        pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+        pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
     }
 
     template <typename RCUtag>
@@ -69,7 +69,7 @@ namespace cds { namespace urcu { namespace details {
         thread_record * pRec = get_thread_record();
         assert( pRec != nullptr);
 
-        return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
+        return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
     }
 
 
@@ -99,9 +99,9 @@ namespace cds { namespace urcu { namespace details {
     {
         thread_record * pRec = cds::threading::getRCU<RCUtag>();
         if ( pRec ) {
-            CDS_ATOMIC::atomic_signal_fence( CDS_ATOMIC::memory_order_acquire );
-            pRec->m_bNeedMemBar.store( false, CDS_ATOMIC::memory_order_relaxed );
-            CDS_ATOMIC::atomic_signal_fence( CDS_ATOMIC::memory_order_release );
+            atomics::atomic_signal_fence( atomics::memory_order_acquire );
+            pRec->m_bNeedMemBar.store( false, atomics::memory_order_relaxed );
+            atomics::atomic_signal_fence( atomics::memory_order_release );
         }
     }
 
@@ -118,21 +118,21 @@ namespace cds { namespace urcu { namespace details {
         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
 
         // Send "need membar" signal to all RCU threads
-        for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
-            OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
+        for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
+            OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
             if ( tid != nullThreadId ) {
-                pRec->m_bNeedMemBar.store( true, CDS_ATOMIC::memory_order_release );
+                pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
                 raise_signal( tid );
             }
         }
 
         // Wait while all RCU threads process the signal
-        for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
-            OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
+        for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
+            OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
             if ( tid != nullThreadId ) {
                 bkOff.reset();
-                while ( (tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire )) != nullThreadId
-                     && pRec->m_bNeedMemBar.load( CDS_ATOMIC::memory_order_acquire ))
+                while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
+                     && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
                 {
                     // Some versions of OSes can lose signals
                     // So, we resend the signal
@@ -146,9 +146,9 @@ namespace cds { namespace urcu { namespace details {
     template <typename RCUtag>
     bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
     {
-        uint32_t const v = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_acquire );
+        uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
         return (v & signal_handling_rcu::c_nNestMask)
-            && ((( v ^ m_nGlobalControl.load( CDS_ATOMIC::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
+            && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
     }
 
     template <typename RCUtag>
@@ -157,8 +157,8 @@ namespace cds { namespace urcu { namespace details {
     {
         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
 
-        for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
-            while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
+        for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
+            while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
                 bkOff();
         }
     }
index 9fc21c7a6ecbc4b60d82baaf88c592a3d367a31f..55e649ef1ec701b4ecc75e84abae3d4fd270cd1f 100644 (file)
@@ -19,8 +19,8 @@ namespace cds { namespace urcu { namespace details {
     // that is not so efficiently
 #   define CDS_SHURCU_DECLARE_THREAD_DATA(tag_) \
     template <> struct thread_data<tag_> { \
-        CDS_ATOMIC::atomic<uint32_t>        m_nAccessControl ; \
-        CDS_ATOMIC::atomic<bool>            m_bNeedMemBar    ; \
+        atomics::atomic<uint32_t>        m_nAccessControl ; \
+        atomics::atomic<bool>            m_bNeedMemBar    ; \
         thread_list_record< thread_data >   m_list ; \
         thread_data(): m_nAccessControl(0), m_bNeedMemBar(false) {} \
         ~thread_data() {} \
@@ -103,7 +103,7 @@ namespace cds { namespace urcu { namespace details {
         typedef sh_singleton_instance< rcu_tag >    rcu_instance;
 
     protected:
-        CDS_ATOMIC::atomic<uint32_t>    m_nGlobalControl;
+        atomics::atomic<uint32_t>    m_nGlobalControl;
         thread_list< rcu_tag >          m_ThreadList;
         int const                       m_nSigNo;
 
@@ -150,7 +150,7 @@ namespace cds { namespace urcu { namespace details {
             m_ThreadList.retire( pRec );
         }
 
-        uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) const
+        uint32_t global_control_word( atomics::memory_order mo ) const
         {
             return m_nGlobalControl.load( mo );
         }
@@ -166,7 +166,7 @@ namespace cds { namespace urcu { namespace details {
 
         void switch_next_epoch()
         {
-            m_nGlobalControl.fetch_xor( rcu_tag::c_nControlBit, CDS_ATOMIC::memory_order_seq_cst );
+            m_nGlobalControl.fetch_xor( rcu_tag::c_nControlBit, atomics::memory_order_seq_cst );
         }
         bool check_grace_period( thread_record * pRec ) const;
 
@@ -188,7 +188,7 @@ namespace cds { namespace urcu { namespace details {
         static rcu_singleton * instance() { assert( rcu_instance::s_pRCU ); return static_cast<rcu_singleton *>( rcu_instance::s_pRCU ); } \
         static thread_record * attach_thread() { return instance()->attach_thread() ; } \
         static void detach_thread( thread_record * pRec ) { return instance()->detach_thread( pRec ) ; } \
-        static uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) { return instance()->global_control_word( mo ) ; } \
+        static uint32_t global_control_word( atomics::memory_order mo ) { return instance()->global_control_word( mo ) ; } \
     }
 
     CDS_SIGRCU_DECLARE_SINGLETON( signal_buffered_tag  );
index 4f90037b233dd2fd0ce180a1cfb217eaa1c267a2..69530feb4016d702db4022d1e723abe7dbc47267 100644 (file)
@@ -72,7 +72,7 @@ namespace cds { namespace urcu {
     protected:
         //@cond
         buffer_type                     m_Buffer;
-        CDS_ATOMIC::atomic<uint64_t>    m_nCurEpoch;
+        atomics::atomic<uint64_t>    m_nCurEpoch;
         lock_type                       m_Lock;
         size_t const                    m_nCapacity;
         //@endcond
@@ -164,7 +164,7 @@ namespace cds { namespace urcu {
         virtual void retire_ptr( retired_ptr& p )
         {
             if ( p.m_p ) {
-                epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
+                epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed ));
                 push_buffer( ep );
             }
         }
@@ -173,7 +173,7 @@ namespace cds { namespace urcu {
         template <typename ForwardIterator>
         void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
         {
-            uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+            uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
             while ( itFirst != itLast ) {
                 epoch_retired_ptr ep( *itFirst, nEpoch );
                 ++itFirst;
@@ -184,7 +184,7 @@ namespace cds { namespace urcu {
         /// Wait to finish a grace period and then clear the buffer
         void synchronize()
         {
-            epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
+            epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
             synchronize( ep );
         }
 
@@ -192,12 +192,12 @@ namespace cds { namespace urcu {
         bool synchronize( epoch_retired_ptr& ep )
         {
             uint64_t nEpoch;
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+            atomics::atomic_thread_fence( atomics::memory_order_acquire );
             {
                 cds::lock::scoped_lock<lock_type> sl( m_Lock );
                 if ( ep.m_p && m_Buffer.push( ep ) && m_Buffer.size() < capacity())
                     return false;
-                nEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+                nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
 
                 back_off bkOff;
                 base_class::force_membar_all_threads( bkOff );
index dddcf595183f2456a4647e1333b44579bb7cc01a..ca3795e8f2b999a62e21715ea35bc7c25f5ef776 100644 (file)
@@ -79,7 +79,7 @@ namespace cds { namespace urcu {
     protected:
         //@cond
         buffer_type                     m_Buffer;
-        CDS_ATOMIC::atomic<uint64_t>    m_nCurEpoch;
+        atomics::atomic<uint64_t>    m_nCurEpoch;
         lock_type                       m_Lock;
         size_t const                    m_nCapacity;
         disposer_thread                 m_DisposerThread;
@@ -151,7 +151,7 @@ namespace cds { namespace urcu {
                 if ( bDetachAll )
                     pThis->m_ThreadList.detach_all();
 
-                pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ));
+                pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( atomics::memory_order_acquire ));
 
                 delete pThis;
                 singleton_ptr::s_pRCU = nullptr;
@@ -169,7 +169,7 @@ namespace cds { namespace urcu {
         virtual void retire_ptr( retired_ptr& p )
         {
             if ( p.m_p ) {
-                epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ) );
+                epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_acquire ) );
                 push_buffer( ep );
             }
         }
@@ -178,7 +178,7 @@ namespace cds { namespace urcu {
         template <typename ForwardIterator>
         void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
         {
-            uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+            uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
             while ( itFirst != itLast ) {
                 epoch_retired_ptr p( *itFirst, nEpoch );
                 ++itFirst;
@@ -195,9 +195,9 @@ namespace cds { namespace urcu {
         //@cond
         void synchronize( bool bSync )
         {
-            uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_release );
+            uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_release );
 
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+            atomics::atomic_thread_fence( atomics::memory_order_acquire );
             {
                 cds::lock::scoped_lock<lock_type> sl( m_Lock );
 
index 12a37df6844419cf62982da870e78355a4a73da3..f6f7877e54d4414e5a4983c9e482d775e9af87cd 100644 (file)
@@ -36,9 +36,9 @@ namespace cds { namespace gc {
 
         GarbageCollector::~GarbageCollector()
         {
-            thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
+            thread_list_node * pNode = m_pListHead.load( atomics::memory_order_relaxed );
             while ( pNode ) {
-                assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == cds::OS::c_NullThreadId );
+                assert( pNode->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::c_NullThreadId );
                 clearHRCThreadDesc( pNode );
                 thread_list_node * pNext = pNode->m_pNext;
                 deleteHRCThreadDesc( pNode );
@@ -103,10 +103,10 @@ namespace cds { namespace gc {
             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
             ContainerNode * pItem;
             for ( size_t n = 0; n < pNode->m_arrRetired.capacity(); ++n ) {
-                if ( (pItem = pNode->m_arrRetired[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed )) != nullptr ) {
+                if ( (pItem = pNode->m_arrRetired[n].m_pNode.load( atomics::memory_order_relaxed )) != nullptr ) {
                     pNode->m_arrRetired[n].m_funcFree( pItem );
                     //pItem->destroy();
-                    pNode->m_arrRetired[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+                    pNode->m_arrRetired[n].m_pNode.store( nullptr, atomics::memory_order_relaxed );
                 }
             }
             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
@@ -117,8 +117,8 @@ namespace cds { namespace gc {
             thread_list_node * hprec;
             const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
 
-            for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
-                if ( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_acquire ) == curThreadId ) {
+            for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
+                if ( hprec->m_idOwner.load( atomics::memory_order_acquire ) == curThreadId ) {
                     assert( !hprec->m_bFree );
                     return hprec;
                 }
@@ -135,9 +135,9 @@ namespace cds { namespace gc {
             const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
 
             // First try to reuse a retired (non-active) HP record
-            for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
+            for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
                 cds::OS::ThreadId expectedThreadId = nullThreadId;
-                if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) )
+                if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, atomics::memory_order_acq_rel, atomics::memory_order_relaxed ) )
                     continue;
                 hprec->m_pOwner = pThreadGC;
                 hprec->m_bFree = false;
@@ -149,15 +149,15 @@ namespace cds { namespace gc {
             // Allocate and push a new HP record
             hprec = newHRCThreadDesc();
             assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() );
-            hprec->m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed );
+            hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
             hprec->m_pOwner = pThreadGC;
             hprec->m_bFree = false;
             thread_list_node * pOldHead;
 
-            pOldHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
+            pOldHead = m_pListHead.load( atomics::memory_order_relaxed );
             do {
                 hprec->m_pNext = pOldHead;
-            } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+            } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
             assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() );
             return hprec;
@@ -176,9 +176,9 @@ namespace cds { namespace gc {
                 if the destruction of thread object is called by the destructor
                 after thread termination
             */
-            assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) != cds::OS::c_NullThreadId );
+            assert( pNode->m_idOwner.load( atomics::memory_order_relaxed ) != cds::OS::c_NullThreadId );
             pNode->m_pOwner = nullptr;
-            pNode->m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release );
+            pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
         }
 
@@ -189,19 +189,19 @@ namespace cds { namespace gc {
             typedef std::vector< ContainerNode * > hazard_ptr_list;
 
             details::thread_descriptor * pRec = pThreadGC->m_pDesc;
-            assert( static_cast< thread_list_node *>( pRec )->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
+            assert( static_cast< thread_list_node *>( pRec )->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
 
             // Step 1: mark all pRec->m_arrRetired items as "traced"
             {
                 details::retired_vector::const_iterator itEnd = pRec->m_arrRetired.end();
 
                 for ( details::retired_vector::const_iterator it = pRec->m_arrRetired.begin() ; it != itEnd; ++it ) {
-                    ContainerNode * pNode = it->m_pNode.load( CDS_ATOMIC::memory_order_acquire );
+                    ContainerNode * pNode = it->m_pNode.load( atomics::memory_order_acquire );
                     if ( pNode ) {
                         if ( pNode->m_RC.value() == 0 ) {
-                            pNode->m_bTrace.store( true, CDS_ATOMIC::memory_order_release );
+                            pNode->m_bTrace.store( true, atomics::memory_order_release );
                             if ( pNode->m_RC.value() != 0 )
-                                pNode->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+                                pNode->m_bTrace.store( false, atomics::memory_order_release );
                         }
                     }
                 }
@@ -214,7 +214,7 @@ namespace cds { namespace gc {
 
             // Stage 2: Scan HP list and insert non-null values to plist
             {
-                thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_acquire );
+                thread_list_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
 
                 while ( pNode ) {
                     for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
@@ -241,17 +241,17 @@ namespace cds { namespace gc {
 
                 for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) {
                     details::retired_node& node = *it;
-                    ContainerNode * pNode = node.m_pNode.load(CDS_ATOMIC::memory_order_acquire);
+                    ContainerNode * pNode = node.m_pNode.load(atomics::memory_order_acquire);
                     if ( !pNode )
                         continue;
 
-                    if ( pNode->m_RC.value() == 0 && pNode->m_bTrace.load(CDS_ATOMIC::memory_order_acquire) && !std::binary_search( itHPBegin, itHPEnd, pNode ) ) {
+                    if ( pNode->m_RC.value() == 0 && pNode->m_bTrace.load(atomics::memory_order_acquire) && !std::binary_search( itHPBegin, itHPEnd, pNode ) ) {
                         // pNode may be destructed safely
 
-                        node.m_bDone.store( true, CDS_ATOMIC::memory_order_release );
-                        if ( node.m_nClaim.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) {
+                        node.m_bDone.store( true, atomics::memory_order_release );
+                        if ( node.m_nClaim.load( atomics::memory_order_acquire ) == 0 ) {
                             pNode->terminate( pThreadGC, false );
-                            pNode->clean( CDS_ATOMIC::memory_order_relaxed );
+                            pNode->clean( atomics::memory_order_relaxed );
                             node.m_funcFree( pNode );
 
                             arr.pop( nRetired );
@@ -260,7 +260,7 @@ namespace cds { namespace gc {
                         }
 
                         pNode->terminate( pThreadGC, true );
-                        //node.m_bDone.store( true, CDS_ATOMIC::memory_order_release );
+                        //node.m_bDone.store( true, atomics::memory_order_release );
                         CDS_HRC_STATISTIC( ++m_Stat.m_ScanClaimGuarded );
                     }
                     else {
@@ -280,11 +280,11 @@ namespace cds { namespace gc {
             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
             const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
 
-            for ( thread_list_node * pRec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_pNext )
+            for ( thread_list_node * pRec = m_pListHead.load(atomics::memory_order_acquire); pRec; pRec = pRec->m_pNext )
             {
                 // If threadDesc is free then own its
                 cds::OS::ThreadId expectedThreadId = nullThreadId;
-                if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed) )
+                if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed) )
                 {
                     continue;
                 }
@@ -303,10 +303,10 @@ namespace cds { namespace gc {
                     details::retired_vector::iterator it = src.begin();
 
                     for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) {
-                        if ( it->m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr )
+                        if ( it->m_pNode.load( atomics::memory_order_relaxed ) == nullptr )
                             continue;
 
-                        dest.push( it->m_pNode.load(CDS_ATOMIC::memory_order_relaxed), it->m_funcFree );
+                        dest.push( it->m_pNode.load(atomics::memory_order_relaxed), it->m_funcFree );
                         src.pop( nRetired );
 
                         while ( dest.isFull() ) {
@@ -321,7 +321,7 @@ namespace cds { namespace gc {
                     }
                     pRec->m_bFree = true;
                 }
-                pRec->m_idOwner.store( nullThreadId, CDS_ATOMIC::memory_order_release );
+                pRec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
             }
         }
 
@@ -330,19 +330,19 @@ namespace cds { namespace gc {
             CDS_HRC_STATISTIC( ++m_Stat.m_CleanUpAllCalls );
 
             //const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
-            thread_list_node * pThread = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
+            thread_list_node * pThread = m_pListHead.load(atomics::memory_order_acquire);
             while ( pThread ) {
                 for ( size_t i = 0; i < pThread->m_arrRetired.capacity(); ++i ) {
                     details::retired_node& rRetiredNode = pThread->m_arrRetired[i];
-                    ContainerNode * pNode = rRetiredNode.m_pNode.load(CDS_ATOMIC::memory_order_acquire);
-                    if ( pNode && !rRetiredNode.m_bDone.load(CDS_ATOMIC::memory_order_acquire) ) {
-                        rRetiredNode.m_nClaim.fetch_add( 1, CDS_ATOMIC::memory_order_release );
-                        if ( !rRetiredNode.m_bDone.load(CDS_ATOMIC::memory_order_acquire)
-                            && pNode == rRetiredNode.m_pNode.load(CDS_ATOMIC::memory_order_acquire) )
+                    ContainerNode * pNode = rRetiredNode.m_pNode.load(atomics::memory_order_acquire);
+                    if ( pNode && !rRetiredNode.m_bDone.load(atomics::memory_order_acquire) ) {
+                        rRetiredNode.m_nClaim.fetch_add( 1, atomics::memory_order_release );
+                        if ( !rRetiredNode.m_bDone.load(atomics::memory_order_acquire)
+                            && pNode == rRetiredNode.m_pNode.load(atomics::memory_order_acquire) )
                         {
                             pNode->cleanUp( pThis );
                         }
-                        rRetiredNode.m_nClaim.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+                        rRetiredNode.m_nClaim.fetch_sub( 1, atomics::memory_order_release );
                     }
                 }
                 pThread = pThread->m_pNext;
@@ -363,7 +363,7 @@ namespace cds { namespace gc {
                 stat.nRetiredPtrInFreeHRCRecs = 0;
 
             // Walk through HRC records
-            for ( thread_list_node *hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNext ) {
+            for ( thread_list_node *hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNext ) {
                 ++stat.nHRCRecAllocated;
                 size_t nRetiredNodeCount = hprec->m_arrRetired.retiredNodeCount();
                 if ( hprec->m_bFree ) {
index 5f38bafcd3831f93393f4a5334e6bfdc431cfaa8..c61315646cbbc623738fb56a872346fcaef2ecda 100644 (file)
@@ -61,14 +61,14 @@ namespace cds { namespace gc {
             CDS_DEBUG_DO( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
             CDS_DEBUG_DO( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
 
-            hplist_node * pHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
-            m_pListHead.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+            hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
+            m_pListHead.store( nullptr, atomics::memory_order_relaxed );
 
             hplist_node * pNext = nullptr;
             for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
-                assert( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == nullThreadId
-                    || hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == mainThreadId
-                    || !cds::OS::isThreadAlive( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) )
+                assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
+                    || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
+                    || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
                 );
                 details::retired_vector& vect = hprec->m_arrRetired;
                 details::retired_vector::iterator itRetired = vect.begin();
@@ -79,7 +79,7 @@ namespace cds { namespace gc {
                 }
                 vect.clear();
                 pNext = hprec->m_pNextNode;
-                hprec->m_bFree.store( true, CDS_ATOMIC::memory_order_relaxed );
+                hprec->m_bFree.store( true, atomics::memory_order_relaxed );
                 DeleteHPRec( hprec );
             }
         }
@@ -112,26 +112,26 @@ namespace cds { namespace gc {
             const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
 
             // First try to reuse a retired (non-active) HP record
-            for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
+            for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
                 cds::OS::ThreadId thId = nullThreadId;
-                if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
+                if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
                     continue;
-                hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_release );
+                hprec->m_bFree.store( false, atomics::memory_order_release );
                 return hprec;
             }
 
             // No HP records available for reuse
             // Allocate and push a new HP record
             hprec = NewHPRec();
-            hprec->m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed );
-            hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_relaxed );
+            hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
+            hprec->m_bFree.store( false, atomics::memory_order_relaxed );
 
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+            atomics::atomic_thread_fence( atomics::memory_order_release );
 
-            hplist_node * pOldHead = m_pListHead.load( CDS_ATOMIC::memory_order_acquire );
+            hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
             do {
                 hprec->m_pNextNode = pOldHead;
-            } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+            } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
             return hprec;
         }
@@ -144,16 +144,16 @@ namespace cds { namespace gc {
             pRec->clear();
             Scan( pRec );
             hplist_node * pNode = static_cast<hplist_node *>( pRec );
-            pNode->m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release );
+            pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
         }
 
         void GarbageCollector::detachAllThread()
         {
             hplist_node * pNext = nullptr;
             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
-            for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = pNext ) {
+            for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
                 pNext = hprec->m_pNextNode;
-                if ( hprec->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) {
+                if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
                     RetireHPRec( hprec );
                 }
             }
@@ -169,7 +169,7 @@ namespace cds { namespace gc {
 
             // Stage 1: Scan HP list and insert non-null values in plist
 
-            hplist_node * pNode = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
+            hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
 
             while ( pNode ) {
                 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
@@ -230,7 +230,7 @@ namespace cds { namespace gc {
 
             // Search guarded pointers in retired array
 
-            hplist_node * pNode = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
+            hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
 
             while ( pNode ) {
                 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
@@ -269,27 +269,27 @@ namespace cds { namespace gc {
         {
             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
 
-            assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
+            assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
 
             const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
             const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
-            for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
+            for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
 
                 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
-                if ( hprec->m_bFree.load(CDS_ATOMIC::memory_order_acquire) )
+                if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
                     continue;
 
                 // Owns hprec if it is empty.
                 // Several threads may work concurrently so we use atomic technique only.
                 {
-                    cds::OS::ThreadId curOwner = hprec->m_idOwner.load(CDS_ATOMIC::memory_order_acquire);
+                    cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
                     if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
-                        if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                        if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
                             continue;
                     }
                     else {
                         curOwner = nullThreadId;
-                        if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                        if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
                             continue;
                     }
                 }
@@ -311,8 +311,8 @@ namespace cds { namespace gc {
                 }
                 src.clear();
 
-                hprec->m_bFree.store(true, CDS_ATOMIC::memory_order_release);
-                hprec->m_idOwner.store( nullThreadId, CDS_ATOMIC::memory_order_release );
+                hprec->m_bFree.store(true, atomics::memory_order_release);
+                hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
             }
         }
 
@@ -329,11 +329,11 @@ namespace cds { namespace gc {
                 stat.nTotalRetiredPtrCount   =
                 stat.nRetiredPtrInFreeHPRecs = 0;
 
-            for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
+            for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
                 ++stat.nHPRecAllocated;
                 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
 
-                if ( hprec->m_bFree.load(CDS_ATOMIC::memory_order_relaxed) ) {
+                if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
                     // Free HP record
                     stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
                 }
index 7168e6c52d624967c3f09af2c843ea335991f3fc..4ca8614ec5f37577a2d345d98607b5048cb2b5bb 100644 (file)
@@ -21,7 +21,7 @@
 
 namespace cds {
 
-    CDS_EXPORT_API CDS_ATOMIC::atomic<size_t> threading::ThreadData::s_nLastUsedProcNo(0);
+    CDS_EXPORT_API atomics::atomic<size_t> threading::ThreadData::s_nLastUsedProcNo(0);
     CDS_EXPORT_API size_t threading::ThreadData::s_nProcCount = 1;
 
 #if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
@@ -45,17 +45,17 @@ namespace cds {
 #endif
 
     namespace details {
-        static CDS_ATOMIC::atomic<size_t> s_nInitCallCount(0);
+        static atomics::atomic<size_t> s_nInitCallCount(0);
 
         bool CDS_EXPORT_API init_first_call()
         {
-            return s_nInitCallCount.fetch_add(1, CDS_ATOMIC::memory_order_relaxed) == 0;
+            return s_nInitCallCount.fetch_add(1, atomics::memory_order_relaxed) == 0;
         }
 
         bool CDS_EXPORT_API fini_last_call()
         {
-            if ( s_nInitCallCount.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ) == 1 ) {
-                CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+            if ( s_nInitCallCount.fetch_sub( 1, atomics::memory_order_relaxed ) == 1 ) {
+                atomics::atomic_thread_fence( atomics::memory_order_release );
                 return true;
             }
             return false;
index bce912568210fb38d5084383040afb9d3bc52d14..2301649865be3513d4142280d783d58f68f2d8bb 100644 (file)
@@ -168,7 +168,7 @@ namespace cds { namespace gc { namespace ptb {
         details::retired_ptr_node * pHead = nullptr;
         details::retired_ptr_node * pTail = nullptr;
 
-        for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_relaxed)) {
+        for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_relaxed)) {
             details::guard_data::handoff_ptr h = pGuard->pHandOff;
             pGuard->pHandOff  = nullptr;
             while ( h ) {
@@ -192,7 +192,7 @@ namespace cds { namespace gc { namespace ptb {
         details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize();
         if ( retiredList.first ) {
 
-            size_t nLiberateThreshold = m_nLiberateThreshold.load(CDS_ATOMIC::memory_order_relaxed);
+            size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed);
             details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ) );
 
             // Get list of retired pointers
@@ -205,10 +205,10 @@ namespace cds { namespace gc { namespace ptb {
             }
 
             // Liberate cycle
-            for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_acquire) )
+            for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
             {
                 // get guarded pointer
-                details::guard_data::guarded_ptr  valGuarded = pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire);
+                details::guard_data::guarded_ptr  valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
 
                 if ( valGuarded ) {
                     details::retired_ptr_node * pRetired = set.erase( valGuarded );
@@ -237,7 +237,7 @@ namespace cds { namespace gc { namespace ptb {
             }
             else {
                 // liberate cycle did not free any retired pointer - double liberate threshold
-                m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+                m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed );
             }
         }
     }
@@ -247,10 +247,10 @@ namespace cds { namespace gc { namespace ptb {
     {
         details::guard_data::handoff_ptr const nullHandOff = nullptr;
 
-        for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_acquire) )
+        for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
         {
             // get guarded pointer
-            details::guard_data::guarded_ptr  valGuarded = pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire);
+            details::guard_data::guarded_ptr  valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
             details::guard_data::handoff_ptr h;
 
             if ( valGuarded ) {
@@ -263,7 +263,7 @@ namespace cds { namespace gc { namespace ptb {
 
                     // Now, try to set retired node pRetired as a hand-off node for the guard
                     cds::lock::Auto<details::guard_data::handoff_spin> al( pGuard->spinHandOff );
-                    if ( valGuarded == pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire) ) {
+                    if ( valGuarded == pGuard->pPost.load(atomics::memory_order_acquire) ) {
                         if ( pGuard->pHandOff && pGuard->pHandOff->m_ptr.m_p == pRetired->m_ptr.m_p ) {
                             h = nullHandOff ; //nullptr;
                             details::retired_ptr_node * pTail = pGuard->pHandOff;
index 4c066b90952290adcc531aa4614d7bb8649a206c..0cf4da764e27cc0fc507923373da2b796a2c31a4 100644 (file)
@@ -27,7 +27,7 @@ namespace CppUnitMini {
         ThreadPool&         m_Pool;
         boost::thread *     m_pThread;
         cds::OS::Timer      m_Timer;
-        CDS_ATOMIC::atomic<bool>    m_bTimeElapsed;
+        atomics::atomic<bool>    m_bTimeElapsed;
 
     public:
         double              m_nDuration;
@@ -60,11 +60,11 @@ namespace CppUnitMini {
         virtual void fini() {}
         void stop()
         {
-            m_bTimeElapsed.store( true, CDS_ATOMIC::memory_order_release );
+            m_bTimeElapsed.store( true, atomics::memory_order_release );
         }
         bool time_elapsed() const
         {
-            return m_bTimeElapsed.load( CDS_ATOMIC::memory_order_acquire );
+            return m_bTimeElapsed.load( atomics::memory_order_acquire );
         }
 
         bool check_timeout( size_t nMaxDuration )
index 681fe8fc90311233a151c9ca8e03492a0ad172d5..7637b72c95a4c4e00428d1afed0dfa47a3d03b9a 100644 (file)
@@ -11,9 +11,9 @@ namespace misc {
     class cxx11_atomic_class: public CppUnitMini::TestCase
     {
         template <typename AtomicFlag>
-        void do_test_atomic_flag_mo( AtomicFlag& f, CDS_ATOMIC::memory_order order )
+        void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
         {
-            CDS_ATOMIC::memory_order mo_clear = convert_to_store_order(order);
+            atomics::memory_order mo_clear = convert_to_store_order(order);
             for ( int i = 0; i < 5; ++i ) {
                 CPPUNIT_ASSERT( !f.test_and_set( order ));
                 CPPUNIT_ASSERT( f.test_and_set( order ) );
@@ -32,12 +32,12 @@ namespace misc {
                 f.clear();
             }
 
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_relaxed );
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_consume );
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acquire );
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_release );
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acq_rel );
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_seq_cst );
+            do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
+            do_test_atomic_flag_mo( f, atomics::memory_order_consume );
+            do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
+            do_test_atomic_flag_mo( f, atomics::memory_order_release );
+            do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
+            do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
         }
 
         template <class Atomic, typename Integral>
@@ -185,12 +185,12 @@ namespace misc {
         }
 
         template <class Atomic, typename Integral>
-        void do_test_atomic_type( Atomic& a, CDS_ATOMIC::memory_order order )
+        void do_test_atomic_type( Atomic& a, atomics::memory_order order )
         {
             typedef Integral    integral_type;
 
-            const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
-            const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+            const atomics::memory_order oLoad = convert_to_load_order( order );
+            const atomics::memory_order oStore = convert_to_store_order( order );
 
             CPPUNIT_ASSERT( a.is_lock_free() );
             a.store((integral_type) 0, oStore );
@@ -210,9 +210,9 @@ namespace misc {
                 integral_type n = integral_type(42) << (nByte * 8);
                 integral_type expected = prev;
 
-                CPPUNIT_ASSERT( a.compare_exchange_weak( expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+                CPPUNIT_ASSERT( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
                 CPPUNIT_ASSERT( expected  == prev );
-                CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+                CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
                 CPPUNIT_ASSERT( expected  == n );
 
                 prev = n;
@@ -226,9 +226,9 @@ namespace misc {
                 integral_type n = integral_type(42) << (nByte * 8);
                 integral_type expected = prev;
 
-                CPPUNIT_ASSERT( a.compare_exchange_strong( expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+                CPPUNIT_ASSERT( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
                 CPPUNIT_ASSERT( expected  == prev );
-                CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+                CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
                 CPPUNIT_ASSERT( expected  == n );
 
                 prev = n;
@@ -239,14 +239,14 @@ namespace misc {
         }
 
         template <class Atomic, typename Integral>
-        void do_test_atomic_integral( Atomic& a, CDS_ATOMIC::memory_order order )
+        void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
         {
             do_test_atomic_type< Atomic, Integral >( a, order );
 
             typedef Integral    integral_type;
 
-            const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
-            const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+            const atomics::memory_order oLoad = convert_to_load_order( order );
+            const atomics::memory_order oStore = convert_to_store_order( order );
 
             // fetch_xxx testing
             a.store( (integral_type) 0, oStore );
@@ -298,18 +298,18 @@ namespace misc {
         {
             do_test_atomic_integral<Atomic, Integral >(a);
 
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_relaxed );
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_consume );
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_acquire );
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_release );
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_acq_rel );
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_seq_cst );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_consume );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
         }
 
         template <typename Integral>
         void test_atomic_integral()
         {
-            typedef CDS_ATOMIC::atomic<Integral> atomic_type;
+            typedef atomics::atomic<Integral> atomic_type;
 
             atomic_type a[8];
             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
@@ -319,7 +319,7 @@ namespace misc {
         template <typename Integral>
         void test_atomic_integral_volatile()
         {
-            typedef CDS_ATOMIC::atomic<Integral> volatile atomic_type;
+            typedef atomics::atomic<Integral> volatile atomic_type;
 
             atomic_type a[8];
             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
@@ -361,10 +361,10 @@ namespace misc {
         }
 
         template <class AtomicBool>
-        void do_test_atomic_bool( AtomicBool& a, CDS_ATOMIC::memory_order order )
+        void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
         {
-            const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
-            const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+            const atomics::memory_order oLoad = convert_to_load_order( order );
+            const atomics::memory_order oStore = convert_to_store_order( order );
 
             CPPUNIT_ASSERT( a.is_lock_free() );
             a.store( false, oStore );
@@ -377,9 +377,9 @@ namespace misc {
             CPPUNIT_ASSERT( a.load( oLoad ) == false );
 
             bool expected = false;
-            CPPUNIT_ASSERT( a.compare_exchange_weak( expected, true, order, CDS_ATOMIC::memory_order_relaxed));
+            CPPUNIT_ASSERT( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
             CPPUNIT_ASSERT( expected  == false );
-            CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, false, order, CDS_ATOMIC::memory_order_relaxed));
+            CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
             CPPUNIT_ASSERT( expected  == true );
             CPPUNIT_ASSERT( a.load( oLoad ) == true );
 
@@ -387,9 +387,9 @@ namespace misc {
             a.store( false, oStore );
 
             expected = false;
-            CPPUNIT_ASSERT( a.compare_exchange_strong( expected, true, order, CDS_ATOMIC::memory_order_relaxed));
+            CPPUNIT_ASSERT( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
             CPPUNIT_ASSERT( expected  == false );
-            CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, false, order, CDS_ATOMIC::memory_order_relaxed));
+            CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
             CPPUNIT_ASSERT( expected  == true );
 
             CPPUNIT_ASSERT( a.load( oLoad ) == true );
@@ -399,27 +399,27 @@ namespace misc {
 
 
         template <typename Atomic>
-        void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, CDS_ATOMIC::memory_order order )
+        void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
         {
-            CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order);
-            CDS_ATOMIC::memory_order oStore = convert_to_store_order(order);
+            atomics::memory_order oLoad = convert_to_load_order(order);
+            atomics::memory_order oStore = convert_to_store_order(order);
             void *  p;
 
             a.store( (void *) arr, oStore );
             CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == 1 );
 
             p = arr;
-            CPPUNIT_ASSERT( a.compare_exchange_weak( p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 0 );
             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 1 );
-            CPPUNIT_ASSERT( !a.compare_exchange_weak( p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( !a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 5 );
             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 6 );
 
-            CPPUNIT_ASSERT( a.compare_exchange_strong( p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 5 );
             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 6 );
-            CPPUNIT_ASSERT( !a.compare_exchange_strong( p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( !a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 3 );
             CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 4 );
 
@@ -443,7 +443,7 @@ namespace misc {
         template <bool Volatile>
         void do_test_atomic_pointer_void()
         {
-            typedef typename add_volatile<CDS_ATOMIC::atomic< void *>, Volatile>::type    atomic_pointer;
+            typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type    atomic_pointer;
 
             char   arr[8];
             const char aSize = sizeof(arr)/sizeof(arr[0]);
@@ -497,37 +497,37 @@ namespace misc {
                 CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == i - 1 );
             }
 
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed );
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_consume );
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire );
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_release );
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel );
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_consume );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
         }
 
         template <typename Atomic, typename Integral>
-        void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, CDS_ATOMIC::memory_order order )
+        void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
         {
             typedef Integral integral_type;
-            CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order);
-            CDS_ATOMIC::memory_order oStore = convert_to_store_order(order);
+            atomics::memory_order oLoad = convert_to_load_order(order);
+            atomics::memory_order oStore = convert_to_store_order(order);
             integral_type *  p;
 
             a.store( arr, oStore );
             CPPUNIT_ASSERT( *a.load( oLoad ) == 1 );
 
             p = arr;
-            CPPUNIT_ASSERT( a.compare_exchange_weak( p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 0 );
             CPPUNIT_ASSERT( *p == 1 );
-            CPPUNIT_ASSERT( !a.compare_exchange_weak( p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( !a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 5 );
             CPPUNIT_ASSERT( *p == 6 );
 
-            CPPUNIT_ASSERT( a.compare_exchange_strong( p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 5 );
             CPPUNIT_ASSERT( *p == 6 );
-            CPPUNIT_ASSERT( !a.compare_exchange_strong( p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( !a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 3 );
             CPPUNIT_ASSERT( *p == 4 );
 
@@ -554,7 +554,7 @@ namespace misc {
         void test_atomic_pointer_for()
         {
             typedef Integral integral_type;
-            typedef typename add_volatile<CDS_ATOMIC::atomic< integral_type *>, Volatile>::type    atomic_pointer;
+            typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type    atomic_pointer;
 
             integral_type   arr[8];
             const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
@@ -602,12 +602,12 @@ namespace misc {
                 CPPUNIT_ASSERT( *a.load() == i - 1 );
             }
 
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed );
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_consume );
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire );
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_release );
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel );
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_consume );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
         }
 
     public:
@@ -615,7 +615,7 @@ namespace misc {
         {
             // Array to test different alignment
 
-            CDS_ATOMIC::atomic_flag flags[8];
+            atomics::atomic_flag flags[8];
             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
                 do_test_atomic_flag( flags[i] );
         }
@@ -624,7 +624,7 @@ namespace misc {
         {
             // Array to test different alignment
 
-            CDS_ATOMIC::atomic_flag volatile flags[8];
+            atomics::atomic_flag volatile flags[8];
             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
                 do_test_atomic_flag( flags[i] );
         }
@@ -638,22 +638,22 @@ namespace misc {
             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
                 do_test_atomic_bool( a[i] );
 
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_relaxed );
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_consume );
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acquire );
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_release );
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acq_rel );
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_seq_cst );
+                do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
+                do_test_atomic_bool( a[i], atomics::memory_order_consume );
+                do_test_atomic_bool( a[i], atomics::memory_order_acquire );
+                do_test_atomic_bool( a[i], atomics::memory_order_release );
+                do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
+                do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
             }
         }
 
         void test_atomic_bool()
         {
-            test_atomic_bool_< CDS_ATOMIC::atomic<bool> >();
+            test_atomic_bool_< atomics::atomic<bool> >();
         }
         void test_atomic_bool_volatile()
         {
-            test_atomic_bool_< CDS_ATOMIC::atomic<bool> volatile >();
+            test_atomic_bool_< atomics::atomic<bool> volatile >();
         }
 
         void test_atomic_char()                 { test_atomic_integral<char>(); }
index 84298b06bc05152fdaa8571fb60673ff4cb9d9a9..724670e3e391dc2ab457c7149bc69fabb478a33a 100644 (file)
@@ -15,17 +15,17 @@ namespace misc {
     class cxx11_atomic_func: public CppUnitMini::TestCase
     {
         template <typename AtomicFlag>
-        void do_test_atomic_flag_mo( AtomicFlag& f, CDS_ATOMIC::memory_order order )
+        void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
         {
-            CDS_ATOMIC::memory_order mo_clear = convert_to_store_order(order);
+            atomics::memory_order mo_clear = convert_to_store_order(order);
 
             f.clear( convert_to_store_order(order) );
 
             for ( int i = 0; i < 5; ++i ) {
-                CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_flag_test_and_set_explicit( &f, order ));
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_flag_test_and_set_explicit( &f, order ) );
-                CDS_ATOMIC::atomic_flag_clear_explicit( &f, mo_clear );
-                CDS_ATOMIC::atomic_flag_clear_explicit( &f, mo_clear );
+                CPPUNIT_ASSERT( !atomics::atomic_flag_test_and_set_explicit( &f, order ));
+                CPPUNIT_ASSERT( atomics::atomic_flag_test_and_set_explicit( &f, order ) );
+                atomics::atomic_flag_clear_explicit( &f, mo_clear );
+                atomics::atomic_flag_clear_explicit( &f, mo_clear );
             }
             //CPPUNIT_ASSERT( f.m_Flag == 0 );
         }
@@ -37,22 +37,22 @@ namespace misc {
 
             for ( int i = 0; i < 5; ++i ) {
                 //CPPUNIT_ASSERT( f.m_Flag == 0 );
-                CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_flag_test_and_set( &f ));
+                CPPUNIT_ASSERT( !atomics::atomic_flag_test_and_set( &f ));
                 //CPPUNIT_ASSERT( f.m_Flag != 0 );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_flag_test_and_set( &f ) );
+                CPPUNIT_ASSERT( atomics::atomic_flag_test_and_set( &f ) );
                 //CPPUNIT_ASSERT( f.m_Flag != 0 );
-                CDS_ATOMIC::atomic_flag_clear(&f);
+                atomics::atomic_flag_clear(&f);
                 //CPPUNIT_ASSERT( f.m_Flag == 0 );
-                CDS_ATOMIC::atomic_flag_clear(&f);
+                atomics::atomic_flag_clear(&f);
             }
             //CPPUNIT_ASSERT( f.m_Flag == 0 );
 
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_relaxed );
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_consume );
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acquire );
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_release );
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acq_rel );
-            do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_seq_cst );
+            do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
+            do_test_atomic_flag_mo( f, atomics::memory_order_consume );
+            do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
+            do_test_atomic_flag_mo( f, atomics::memory_order_release );
+            do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
+            do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
         }
 
         template <class Atomic, typename Integral>
@@ -60,51 +60,51 @@ namespace misc {
         {
             typedef Integral    integral_type;
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) );
-            CDS_ATOMIC::atomic_store( &a, (integral_type) 0 );
+            CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) );
+            atomics::atomic_store( &a, (integral_type) 0 );
             CPPUNIT_ASSERT( a == 0 );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == 0 );
+            CPPUNIT_ASSERT( atomics::atomic_load( &a ) == 0 );
 
             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
                 integral_type n = integral_type(42) << (nByte * 8);
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, n ) == 0 );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == n );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, (integral_type) 0 ) == n );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == 0 );
+                CPPUNIT_ASSERT( atomics::atomic_exchange( &a, n ) == 0 );
+                CPPUNIT_ASSERT( atomics::atomic_load( &a ) == n );
+                CPPUNIT_ASSERT( atomics::atomic_exchange( &a, (integral_type) 0 ) == n );
+                CPPUNIT_ASSERT( atomics::atomic_load( &a ) == 0 );
             }
 
-            integral_type prev = CDS_ATOMIC::atomic_load( &a );
+            integral_type prev = atomics::atomic_load( &a );
             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
                 integral_type n = integral_type(42) << (nByte * 8);
                 integral_type expected = prev;
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, n));
+                CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, &expected, n));
                 CPPUNIT_ASSERT( expected  == prev );
                 CPPUNIT_ASSERT( expected  != n );
-                CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, n) );
+                CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, &expected, n) );
                 CPPUNIT_ASSERT( expected  == n );
 
                 prev = n;
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == n );
+                CPPUNIT_ASSERT( atomics::atomic_load( &a ) == n );
             }
 
-            CDS_ATOMIC::atomic_store( &a, (integral_type) 0 );
+            atomics::atomic_store( &a, (integral_type) 0 );
 
-            prev = CDS_ATOMIC::atomic_load( &a );
+            prev = atomics::atomic_load( &a );
             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
                 integral_type n = integral_type(42) << (nByte * 8);
                 integral_type expected = prev;
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, n));
+                CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, &expected, n));
                 CPPUNIT_ASSERT( expected  == prev );
-                CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, n));
+                CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, &expected, n));
                 CPPUNIT_ASSERT( expected  == n );
 
                 prev = n;
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == n );
+                CPPUNIT_ASSERT( atomics::atomic_load( &a ) == n );
             }
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, (integral_type) 0 ) == prev );
+            CPPUNIT_ASSERT( atomics::atomic_exchange( &a, (integral_type) 0 ) == prev );
         }
 
         template <class Atomic, typename Integral>
@@ -115,152 +115,152 @@ namespace misc {
             typedef Integral    integral_type;
 
             // fetch_xxx testing
-            CDS_ATOMIC::atomic_store( &a, (integral_type) 0 );
+            atomics::atomic_store( &a, (integral_type) 0 );
 
             // fetch_add
             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
             {
-                integral_type prev = CDS_ATOMIC::atomic_load( &a );
+                integral_type prev = atomics::atomic_load( &a );
                 integral_type n = integral_type(42) << (nByte * 8);
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add( &a, n) == prev);
+                CPPUNIT_ASSERT( atomics::atomic_fetch_add( &a, n) == prev);
             }
 
             // fetch_sub
             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
             {
-                integral_type prev = CDS_ATOMIC::atomic_load( &a );
+                integral_type prev = atomics::atomic_load( &a );
                 integral_type n = integral_type(42) << ((nByte - 1) * 8);
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub( &a, n) == prev);
+                CPPUNIT_ASSERT( atomics::atomic_fetch_sub( &a, n) == prev);
             }
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == 0 );
+            CPPUNIT_ASSERT( atomics::atomic_load( &a ) == 0 );
 
             // fetch_or / fetc_xor / fetch_and
             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
             {
-                integral_type prev = CDS_ATOMIC::atomic_load( &a );
+                integral_type prev = atomics::atomic_load( &a );
                 integral_type mask = 1 << nBit;
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_or( &a, mask ) == prev );
-                prev = CDS_ATOMIC::atomic_load( &a );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_or( &a, mask ) == prev );
+                prev = atomics::atomic_load( &a );
                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_and( &a, (integral_type) ~mask ) == prev );
-                prev = CDS_ATOMIC::atomic_load( &a );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_and( &a, (integral_type) ~mask ) == prev );
+                prev = atomics::atomic_load( &a );
                 CPPUNIT_ASSERT_EX( integral_type(prev & mask) == integral_type(0), "prev=" << std::hex << prev << ", mask=" << std::hex << mask);
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_xor( &a, mask ) == prev );
-                prev = CDS_ATOMIC::atomic_load( &a );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_xor( &a, mask ) == prev );
+                prev = atomics::atomic_load( &a );
                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
             }
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == (integral_type) -1 );
+            CPPUNIT_ASSERT( atomics::atomic_load( &a ) == (integral_type) -1 );
         }
 
         template <class Atomic, typename Integral>
-        void do_test_atomic_type( Atomic& a, CDS_ATOMIC::memory_order order )
+        void do_test_atomic_type( Atomic& a, atomics::memory_order order )
         {
             typedef Integral    integral_type;
 
-            const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
-            const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+            const atomics::memory_order oLoad = convert_to_load_order( order );
+            const atomics::memory_order oStore = convert_to_store_order( order );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) );
-            CDS_ATOMIC::atomic_store_explicit( &a, (integral_type) 0, oStore );
+            CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) );
+            atomics::atomic_store_explicit( &a, (integral_type) 0, oStore );
             CPPUNIT_ASSERT( a == 0 );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 0 );
+            CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == 0 );
 
             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
                 integral_type n = integral_type(42) << (nByte * 8);
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, n, order ) == 0 );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == n );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, (integral_type) 0, order ) == n );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 0 );
+                CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, n, order ) == 0 );
+                CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == n );
+                CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, (integral_type) 0, order ) == n );
+                CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == 0 );
             }
 
-            integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+            integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
                 integral_type n = integral_type(42) << (nByte * 8);
                 integral_type expected = prev;
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+                CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
                 CPPUNIT_ASSERT( expected  == prev );
-                CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+                CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
                 CPPUNIT_ASSERT( expected  == n );
 
                 prev = n;
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == n );
+                CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == n );
             }
 
-            CDS_ATOMIC::atomic_store_explicit( &a, (integral_type) 0, oStore );
+            atomics::atomic_store_explicit( &a, (integral_type) 0, oStore );
 
-            prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+            prev = atomics::atomic_load_explicit( &a, oLoad );
             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
                 integral_type n = integral_type(42) << (nByte * 8);
                 integral_type expected = prev;
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+                CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
                 CPPUNIT_ASSERT( expected  == prev );
-                CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+                CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
                 CPPUNIT_ASSERT( expected  == n );
 
                 prev = n;
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == n );
+                CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == n );
             }
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, (integral_type) 0, order ) == prev );
+            CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, (integral_type) 0, order ) == prev );
         }
 
         template <class Atomic, typename Integral>
-        void do_test_atomic_integral( Atomic& a, CDS_ATOMIC::memory_order order )
+        void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
         {
             do_test_atomic_type< Atomic, Integral >( a, order );
             typedef Integral    integral_type;
 
-            const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
-            const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+            const atomics::memory_order oLoad = convert_to_load_order( order );
+            const atomics::memory_order oStore = convert_to_store_order( order );
 
             // fetch_xxx testing
-            CDS_ATOMIC::atomic_store_explicit( &a, (integral_type) 0, oStore );
+            atomics::atomic_store_explicit( &a, (integral_type) 0, oStore );
 
             // fetch_add
             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
             {
-                integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+                integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
                 integral_type n = integral_type(42) << (nByte * 8);
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add_explicit( &a, n, order) == prev);
+                CPPUNIT_ASSERT( atomics::atomic_fetch_add_explicit( &a, n, order) == prev);
             }
 
             // fetch_sub
             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
             {
-                integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+                integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
                 integral_type n = integral_type(42) << ((nByte - 1) * 8);
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub_explicit( &a, n, order ) == prev);
+                CPPUNIT_ASSERT( atomics::atomic_fetch_sub_explicit( &a, n, order ) == prev);
             }
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 0 );
+            CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == 0 );
 
             // fetch_or / fetc_xor / fetch_and
             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
             {
-                integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad )  ;;
+                integral_type prev = atomics::atomic_load_explicit( &a, oLoad )  ;;
                 integral_type mask = 1 << nBit;
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_or_explicit( &a, mask, order ) == prev );
-                prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_or_explicit( &a, mask, order ) == prev );
+                prev = atomics::atomic_load_explicit( &a, oLoad );
                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_and_explicit( &a, (integral_type) ~mask, order ) == prev );
-                prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_and_explicit( &a, (integral_type) ~mask, order ) == prev );
+                prev = atomics::atomic_load_explicit( &a, oLoad );
                 CPPUNIT_ASSERT( ( prev & mask)  == 0);
 
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_xor_explicit( &a, mask, order ) == prev );
-                prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_xor_explicit( &a, mask, order ) == prev );
+                prev = atomics::atomic_load_explicit( &a, oLoad );
                 CPPUNIT_ASSERT( ( prev & mask)  == mask);
             }
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == (integral_type) -1 );
+            CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == (integral_type) -1 );
         }
 
         template <typename Atomic, typename Integral>
@@ -268,18 +268,18 @@ namespace misc {
         {
             do_test_atomic_integral<Atomic, Integral >(a);
 
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_relaxed );
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_consume );
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_acquire );
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_release );
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_acq_rel );
-            do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_seq_cst );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_consume );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
+            do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
         }
 
         template <typename Integral>
         void test_atomic_integral()
         {
-            typedef CDS_ATOMIC::atomic<Integral>    atomic_type;
+            typedef atomics::atomic<Integral>    atomic_type;
             atomic_type a[8];
             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
                 test_atomic_integral_<atomic_type, Integral>( a[i] );
@@ -288,7 +288,7 @@ namespace misc {
         template <typename Integral>
         void test_atomic_integral_volatile()
         {
-            typedef CDS_ATOMIC::atomic<Integral> volatile atomic_type;
+            typedef atomics::atomic<Integral> volatile atomic_type;
             atomic_type a[8];
             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
                 test_atomic_integral_<atomic_type, Integral>( a[i] );
@@ -298,114 +298,114 @@ namespace misc {
         template <class AtomicBool>
         void do_test_atomic_bool(AtomicBool& a)
         {
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) );
-            CDS_ATOMIC::atomic_store( &a, false );
+            CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) );
+            atomics::atomic_store( &a, false );
             CPPUNIT_ASSERT( a == false );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == false );
+            CPPUNIT_ASSERT( atomics::atomic_load( &a ) == false );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, true ) == false );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == true );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, false ) == true );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == false );
+            CPPUNIT_ASSERT( atomics::atomic_exchange( &a, true ) == false );
+            CPPUNIT_ASSERT( atomics::atomic_load( &a ) == true );
+            CPPUNIT_ASSERT( atomics::atomic_exchange( &a, false ) == true );
+            CPPUNIT_ASSERT( atomics::atomic_load( &a ) == false );
 
             bool expected = false;
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, true));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, &expected, true));
             CPPUNIT_ASSERT( expected  == false );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, false));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, &expected, false));
             CPPUNIT_ASSERT( expected  == true );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == true );
+            CPPUNIT_ASSERT( atomics::atomic_load( &a ) == true );
 
-            CDS_ATOMIC::atomic_store( &a, false );
+            atomics::atomic_store( &a, false );
 
             expected = false;
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, true));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, &expected, true));
             CPPUNIT_ASSERT( expected  == false );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, false));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, &expected, false));
             CPPUNIT_ASSERT( expected  == true );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == true );
+            CPPUNIT_ASSERT( atomics::atomic_load( &a ) == true );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, false ) == true );
+            CPPUNIT_ASSERT( atomics::atomic_exchange( &a, false ) == true );
         }
 
         template <class AtomicBool>
-        void do_test_atomic_bool( AtomicBool& a, CDS_ATOMIC::memory_order order )
+        void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
         {
-            const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
-            const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+            const atomics::memory_order oLoad = convert_to_load_order( order );
+            const atomics::memory_order oStore = convert_to_store_order( order );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) );
-            CDS_ATOMIC::atomic_store_explicit( &a, false, oStore );
+            CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) );
+            atomics::atomic_store_explicit( &a, false, oStore );
             CPPUNIT_ASSERT( a == false );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == false );
+            CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == false );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, true, order ) == false );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == true );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, false, order ) == true );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == false );
+            CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, true, order ) == false );
+            CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == true );
+            CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, false, order ) == true );
+            CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == false );
 
             bool expected = false;
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, true, order, CDS_ATOMIC::memory_order_relaxed));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, true, order, atomics::memory_order_relaxed));
             CPPUNIT_ASSERT( expected  == false );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, false, order, CDS_ATOMIC::memory_order_relaxed));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, &expected, false, order, atomics::memory_order_relaxed));
             CPPUNIT_ASSERT( expected  == true );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == true );
+            CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == true );
 
-            CDS_ATOMIC::atomic_store( &a, false );
+            atomics::atomic_store( &a, false );
 
             expected = false;
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, true, order, CDS_ATOMIC::memory_order_relaxed));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, true, order, atomics::memory_order_relaxed));
             CPPUNIT_ASSERT( expected  == false );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, false, order, CDS_ATOMIC::memory_order_relaxed));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, &expected, false, order, atomics::memory_order_relaxed));
             CPPUNIT_ASSERT( expected  == true );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == true );
+            CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == true );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, false, order ) == true );
+            CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, false, order ) == true );
         }
 
         template <typename Atomic, typename Integral>
-        void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, CDS_ATOMIC::memory_order order )
+        void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
         {
             typedef Integral integral_type;
-            CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order);
-            CDS_ATOMIC::memory_order oStore = convert_to_store_order(order);
+            atomics::memory_order oLoad = convert_to_load_order(order);
+            atomics::memory_order oStore = convert_to_store_order(order);
             integral_type *  p;
 
-            CDS_ATOMIC::atomic_store_explicit( &a, arr, oStore );
-            CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 1 );
+            atomics::atomic_store_explicit( &a, arr, oStore );
+            CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == 1 );
 
             p = arr;
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, &p, arr + 5, order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 0 );
             CPPUNIT_ASSERT( *p == 1 );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, &p, arr + 3, order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 5 );
             CPPUNIT_ASSERT( *p == 6 );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, &p, arr + 3, order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 5 );
             CPPUNIT_ASSERT( *p == 6 );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, &p, arr + 5, order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 3 );
             CPPUNIT_ASSERT( *p == 4 );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, arr, order ) == arr + 3 );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == arr );
-            CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 1 );
+            CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, arr, order ) == arr + 3 );
+            CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == arr );
+            CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == 1 );
 
             for ( integral_type i = 1; i < aSize; ++i ) {
-                integral_type * p = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+                integral_type * p = atomics::atomic_load_explicit( &a, oLoad );
                 CPPUNIT_ASSERT( *p == i );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add_explicit( &a, 1, order ) == p );
-                CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == i + 1 );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_add_explicit( &a, 1, order ) == p );
+                CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == i + 1 );
             }
 
             for ( integral_type i = aSize; i > 1; --i ) {
-                integral_type * p = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+                integral_type * p = atomics::atomic_load_explicit( &a, oLoad );
                 CPPUNIT_ASSERT( *p == i  );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub_explicit( &a, 1, order ) == p );
-                CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == i - 1 );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_sub_explicit( &a, 1, order ) == p );
+                CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == i - 1 );
             }
         }
 
@@ -413,7 +413,7 @@ namespace misc {
         void test_atomic_pointer_for()
         {
             typedef Integral integral_type;
-            typedef typename add_volatile<CDS_ATOMIC::atomic< integral_type *>, Volatile>::type    atomic_pointer;
+            typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type    atomic_pointer;
 
             integral_type   arr[8];
             const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
@@ -424,93 +424,93 @@ namespace misc {
             atomic_pointer  a;
             integral_type *  p;
 
-            CDS_ATOMIC::atomic_store( &a, arr );
-            CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == 1 );
+            atomics::atomic_store( &a, arr );
+            CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == 1 );
 
             p = arr;
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, &p, arr + 5 ));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, &p, arr + 5 ));
             CPPUNIT_ASSERT( p == arr + 0 );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, &p, arr + 3 ));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, &p, arr + 3 ));
             CPPUNIT_ASSERT( p == arr + 5 );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, &p, arr + 3 ));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, &p, arr + 3 ));
             CPPUNIT_ASSERT( p == arr + 5 );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, &p, arr + 5 ));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, &p, arr + 5 ));
             CPPUNIT_ASSERT( p == arr + 3 );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, arr ) == arr + 3 );
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == arr );
-            CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == 1 );
+            CPPUNIT_ASSERT( atomics::atomic_exchange( &a, arr ) == arr + 3 );
+            CPPUNIT_ASSERT( atomics::atomic_load( &a ) == arr );
+            CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == 1 );
 
             for ( integral_type i = 1; i < aSize; ++i ) {
-                integral_type * p = CDS_ATOMIC::atomic_load( &a );
+                integral_type * p = atomics::atomic_load( &a );
                 CPPUNIT_ASSERT( *p == i );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add( &a, 1 ) == p );
-                CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == i + 1 );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_add( &a, 1 ) == p );
+                CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == i + 1 );
             }
 
             for ( integral_type i = aSize; i > 1; --i ) {
-                integral_type * p = CDS_ATOMIC::atomic_load( &a );
+                integral_type * p = atomics::atomic_load( &a );
                 CPPUNIT_ASSERT( *p == i  );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub( &a, 1 ) == p );
-                CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == i - 1 );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_sub( &a, 1 ) == p );
+                CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == i - 1 );
             }
 
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed );
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_consume );
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire );
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_release );
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel );
-            test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_consume );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
+            test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
 
         }
 
         template <typename Atomic>
-        void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, CDS_ATOMIC::memory_order order )
+        void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
         {
-            CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order);
-            CDS_ATOMIC::memory_order oStore = convert_to_store_order(order);
+            atomics::memory_order oLoad = convert_to_load_order(order);
+            atomics::memory_order oStore = convert_to_store_order(order);
             char *  p;
 
-            CDS_ATOMIC::atomic_store_explicit( &a, (void *) arr, oStore );
-            CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == 1 );
+            atomics::atomic_store_explicit( &a, (void *) arr, oStore );
+            CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == 1 );
 
             p = arr;
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 0 );
             CPPUNIT_ASSERT( *p == 1 );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 5 );
             CPPUNIT_ASSERT( *p == 6 );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 5 );
             CPPUNIT_ASSERT( *p == 6 );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed ));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
             CPPUNIT_ASSERT( p == arr + 3 );
             CPPUNIT_ASSERT( *p == 4 );
 
-            CPPUNIT_ASSERT( reinterpret_cast<char *>(CDS_ATOMIC::atomic_exchange_explicit( &a, (void *) arr, order )) == arr + 3 );
-            CPPUNIT_ASSERT( reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == arr );
-            CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == 1 );
+            CPPUNIT_ASSERT( reinterpret_cast<char *>(atomics::atomic_exchange_explicit( &a, (void *) arr, order )) == arr + 3 );
+            CPPUNIT_ASSERT( reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == arr );
+            CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == 1 );
 
             for ( char i = 1; i < aSize; ++i ) {
-                CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add_explicit( &a, 1, order ));
-                CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i + 1 );
+                CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == i );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_add_explicit( &a, 1, order ));
+                CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == i + 1 );
             }
 
             for ( char i = aSize; i > 1; --i ) {
-                CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i  );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub_explicit( &a, 1, order ));
-                CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i - 1 );
+                CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == i  );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_sub_explicit( &a, 1, order ));
+                CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == i - 1 );
             }
         }
 
         template <bool Volatile>
         void do_test_atomic_pointer_void()
         {
-            typedef typename add_volatile<CDS_ATOMIC::atomic< void *>, Volatile>::type    atomic_pointer;
+            typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type    atomic_pointer;
 
             char   arr[8];
             const char aSize = sizeof(arr)/sizeof(arr[0]);
@@ -521,54 +521,54 @@ namespace misc {
             atomic_pointer  a;
             char *  p;
 
-            CDS_ATOMIC::atomic_store( &a, (void *) arr );
-            CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == 1 );
+            atomics::atomic_store( &a, (void *) arr );
+            CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == 1 );
 
             p = arr;
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 5) ));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 5) ));
             CPPUNIT_ASSERT( p == arr + 0 );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 3) ));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 3) ));
             CPPUNIT_ASSERT( p == arr + 5 );
 
-            CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 3) ));
+            CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 3) ));
             CPPUNIT_ASSERT( p == arr + 5 );
-            CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 5) ));
+            CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 5) ));
             CPPUNIT_ASSERT( p == arr + 3 );
 
-            CPPUNIT_ASSERT( reinterpret_cast<char *>( CDS_ATOMIC::atomic_exchange( &a, (void *) arr )) == arr + 3 );
-            CPPUNIT_ASSERT( reinterpret_cast<char *>( CDS_ATOMIC::atomic_load( &a )) == arr );
-            CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == 1 );
+            CPPUNIT_ASSERT( reinterpret_cast<char *>( atomics::atomic_exchange( &a, (void *) arr )) == arr + 3 );
+            CPPUNIT_ASSERT( reinterpret_cast<char *>( atomics::atomic_load( &a )) == arr );
+            CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == 1 );
 
             for ( char i = 1; i < aSize; ++i ) {
-                CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == i );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add( &a, 1 ));
-                CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == i + 1 );
+                CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == i );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_add( &a, 1 ));
+                CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == i + 1 );
             }
 
             for ( char i = aSize; i > 1; --i ) {
-                CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == i  );
-                CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub( &a, 1 ));
-                CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == i - 1 );
+                CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == i  );
+                CPPUNIT_ASSERT( atomics::atomic_fetch_sub( &a, 1 ));
+                CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == i - 1 );
             }
 
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed );
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_consume );
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire );
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_release );
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel );
-            do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_consume );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
+            do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
         }
 
     public:
         void test_atomic_flag()
         {
-            CDS_ATOMIC::atomic_flag flags[8];
+            atomics::atomic_flag flags[8];
             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
                 do_test_atomic_flag( flags[i] );
         }
         void test_atomic_flag_volatile()
         {
-            CDS_ATOMIC::atomic_flag volatile flags[8];
+            atomics::atomic_flag volatile flags[8];
             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
                 do_test_atomic_flag( flags[i] );
         }
@@ -580,22 +580,22 @@ namespace misc {
             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
                 do_test_atomic_bool( a[i] );
 
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_relaxed );
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_consume );
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acquire );
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_release );
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acq_rel );
-                do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_seq_cst );
+                do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
+                do_test_atomic_bool( a[i], atomics::memory_order_consume );
+                do_test_atomic_bool( a[i], atomics::memory_order_acquire );
+                do_test_atomic_bool( a[i], atomics::memory_order_release );
+                do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
+                do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
             }
         }
 
         void test_atomic_bool()
         {
-            test_atomic_bool_<CDS_ATOMIC::atomic<bool> >();
+            test_atomic_bool_<atomics::atomic<bool> >();
         }
         void test_atomic_bool_volatile()
         {
-            test_atomic_bool_<CDS_ATOMIC::atomic<bool> volatile >();
+            test_atomic_bool_<atomics::atomic<bool> volatile >();
         }
 
         void test_atomic_char()                 { test_atomic_integral<char>(); }
@@ -656,19 +656,19 @@ namespace misc {
 
         void test_atomic_fence()
         {
-            CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_relaxed );
-            CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_consume );
-            CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_acquire );
-            CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_release );
-            CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_acq_rel );
-            CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_seq_cst );
-
-            CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_relaxed );
-            CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_consume );
-            CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_acquire );
-            CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_release );
-            CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_acq_rel );
-            CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_seq_cst );
+            atomics::atomic_thread_fence(atomics::memory_order_relaxed );
+            atomics::atomic_thread_fence(atomics::memory_order_consume );
+            atomics::atomic_thread_fence(atomics::memory_order_acquire );
+            atomics::atomic_thread_fence(atomics::memory_order_release );
+            atomics::atomic_thread_fence(atomics::memory_order_acq_rel );
+            atomics::atomic_thread_fence(atomics::memory_order_seq_cst );
+
+            atomics::atomic_signal_fence(atomics::memory_order_relaxed );
+            atomics::atomic_signal_fence(atomics::memory_order_consume );
+            atomics::atomic_signal_fence(atomics::memory_order_acquire );
+            atomics::atomic_signal_fence(atomics::memory_order_release );
+            atomics::atomic_signal_fence(atomics::memory_order_acq_rel );
+            atomics::atomic_signal_fence(atomics::memory_order_seq_cst );
         }
 
     public:
index 8c0083d6686d33be488daf0929690bd7b0a5bfc5..cf6be7250a21f98f143cf4af5bf5cf0245b5cfac 100644 (file)
@@ -4,26 +4,26 @@
 
 namespace misc {
 
-    static inline CDS_ATOMIC::memory_order convert_to_store_order( CDS_ATOMIC::memory_order order )
+    static inline atomics::memory_order convert_to_store_order( atomics::memory_order order )
     {
         switch ( order ) {
-            case CDS_ATOMIC::memory_order_acquire:
-            case CDS_ATOMIC::memory_order_consume:
-                return CDS_ATOMIC::memory_order_relaxed;
-            case CDS_ATOMIC::memory_order_acq_rel:
-                return CDS_ATOMIC::memory_order_release;
+            case atomics::memory_order_acquire:
+            case atomics::memory_order_consume:
+                return atomics::memory_order_relaxed;
+            case atomics::memory_order_acq_rel:
+                return atomics::memory_order_release;
             default:
                 return order;
         }
     }
 
-    static inline CDS_ATOMIC::memory_order convert_to_load_order( CDS_ATOMIC::memory_order order )
+    static inline atomics::memory_order convert_to_load_order( atomics::memory_order order )
     {
         switch ( order ) {
-            case CDS_ATOMIC::memory_order_release:
-                return CDS_ATOMIC::memory_order_relaxed;
-            case CDS_ATOMIC::memory_order_acq_rel:
-                return CDS_ATOMIC::memory_order_acquire;
+            case atomics::memory_order_release:
+                return atomics::memory_order_relaxed;
+            case atomics::memory_order_acq_rel:
+                return atomics::memory_order_acquire;
             default:
                 return order;
         }
index 6791b8e6ca71a86d175e6cc93ec6ab502edaeed8..6a8e3a48d9b0fbb3df46994c66fb59f5b2e0fbdd 100644 (file)
@@ -147,7 +147,7 @@ namespace map2 {
         typedef size_t      value_type;
         typedef std::pair<key_type const, value_type> pair_type;
 
-        CDS_ATOMIC::atomic<size_t>      m_nInsThreadCount;
+        atomics::atomic<size_t>      m_nInsThreadCount;
 
         // Inserts keys from [0..N)
         template <class Map>
@@ -210,7 +210,7 @@ namespace map2 {
                     }
                 }
 
-                getTest().m_nInsThreadCount.fetch_sub( 1, CDS_ATOMIC::memory_order_acquire );
+                getTest().m_nInsThreadCount.fetch_sub( 1, atomics::memory_order_acquire );
             }
         };
 
@@ -296,7 +296,7 @@ namespace map2 {
                                     ++m_nDeleteFailed;
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -310,7 +310,7 @@ namespace map2 {
                                     ++m_nDeleteFailed;
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -369,7 +369,7 @@ namespace map2 {
                                     ++m_nDeleteFailed;
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -383,7 +383,7 @@ namespace map2 {
                                     ++m_nDeleteFailed;
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -456,7 +456,7 @@ namespace map2 {
                                 }
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -484,7 +484,7 @@ namespace map2 {
                                 }
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -512,7 +512,7 @@ namespace map2 {
             typedef InsertThread<Map> insert_thread;
             typedef DeleteThread<Map> delete_thread;
 
-            m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release );
+            m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release );
 
             CppUnitMini::ThreadPool pool( *this );
             pool.add( new insert_thread( pool, testMap ), c_nInsThreadCount );
@@ -554,7 +554,7 @@ namespace map2 {
             typedef DeleteThread<Map> delete_thread;
             typedef ExtractThread< typename Map::gc, Map > extract_thread;
 
-            m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release );
+            m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release );
 
             CppUnitMini::ThreadPool pool( *this );
             pool.add( new insert_thread( pool, testMap ), c_nInsThreadCount );
index d47a6bea456e6704b3ae611ed4c31a96239f7ff0..765db471a25586eb01f6ff8ad4b7aedff5c86eda 100644 (file)
@@ -31,8 +31,8 @@ namespace map2 {
         struct value_type {
             size_t      nKey;
             size_t      nData;
-            CDS_ATOMIC::atomic<size_t> nEnsureCall;
-            CDS_ATOMIC::atomic<bool>   bInitialized;
+            atomics::atomic<size_t> nEnsureCall;
+            atomics::atomic<bool>   bInitialized;
             cds::OS::ThreadId          threadId     ;   // insert thread id
 
             typedef cds::lock::Spinlock< cds::backoff::pause >   lock_type;
@@ -49,8 +49,8 @@ namespace map2 {
             value_type( value_type const& s )
                 : nKey(s.nKey)
                 , nData(s.nData)
-                , nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed))
-                , bInitialized( s.bInitialized.load(CDS_ATOMIC::memory_order_relaxed) )
+                , nEnsureCall(s.nEnsureCall.load(atomics::memory_order_relaxed))
+                , bInitialized( s.bInitialized.load(atomics::memory_order_relaxed) )
                 , threadId( cds::OS::getCurrentThreadId() )
             {}
 
@@ -59,8 +59,8 @@ namespace map2 {
             {
                 nKey = v.nKey;
                 nData = v.nData;
-                nEnsureCall.store( v.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
-                bInitialized.store(v.bInitialized.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed);
+                nEnsureCall.store( v.nEnsureCall.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
+                bInitialized.store(v.bInitialized.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed);
 
                 return *this;
             }
@@ -95,7 +95,7 @@ namespace map2 {
                     val.second.nData = val.first * 8;
 
                     ++nTestFunctorRef;
-                    val.second.bInitialized.store( true, CDS_ATOMIC::memory_order_relaxed);
+                    val.second.bInitialized.store( true, atomics::memory_order_relaxed);
                 }
             };
 
@@ -187,10 +187,10 @@ namespace map2 {
                         ++nCreated;
                         val.second.nKey = val.first;
                         val.second.nData = val.first * 8;
-                        val.second.bInitialized.store( true, CDS_ATOMIC::memory_order_relaxed);
+                        val.second.bInitialized.store( true, atomics::memory_order_relaxed);
                     }
                     else {
-                        val.second.nEnsureCall.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+                        val.second.nEnsureCall.fetch_add( 1, atomics::memory_order_relaxed );
                         ++nModified;
                     }
                 }
@@ -304,7 +304,7 @@ namespace map2 {
                 void operator ()( pair_type& item )
                 {
                     while ( true ) {
-                        if ( item.second.bInitialized.load( CDS_ATOMIC::memory_order_relaxed )) {
+                        if ( item.second.bInitialized.load( atomics::memory_order_relaxed )) {
                             cds::lock::scoped_lock< typename value_type::lock_type>    ac( item.second.m_access );
 
                             if ( m_cnt.nKeyExpected == item.second.nKey && m_cnt.nKeyExpected * 8 == item.second.nData )
index 01cf3936192b313bbca844d8312d78a1b4720627..1c36934e061006a81a117e3a0391f1bc1a2abd6f 100644 (file)
@@ -94,7 +94,7 @@ namespace queue {
                 }
 
                 m_fTime = m_Timer.duration() - m_fTime;
-                getTest().m_nProducerCount.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+                getTest().m_nProducerCount.fetch_sub( 1, atomics::memory_order_release );
             }
         };
 
@@ -177,7 +177,7 @@ namespace queue {
                     }
                     else {
                         ++m_nPopEmpty;
-                        if ( getTest().m_nProducerCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 && m_Queue.empty() )
+                        if ( getTest().m_nProducerCount.load( atomics::memory_order_acquire ) == 0 && m_Queue.empty() )
                             break;
                     }
                 }
@@ -206,7 +206,7 @@ namespace queue {
 
     protected:
         size_t                  m_nThreadPushCount;
-        CDS_ATOMIC::atomic<size_t>     m_nProducerCount;
+        atomics::atomic<size_t>     m_nProducerCount;
         static CDS_CONSTEXPR_CONST size_t c_nBadConsumer = 0xbadc0ffe;
 
     protected:
@@ -318,7 +318,7 @@ namespace queue {
 
             CppUnitMini::ThreadPool pool( *this );
 
-            m_nProducerCount.store( s_nWriterThreadCount, CDS_ATOMIC::memory_order_release );
+            m_nProducerCount.store( s_nWriterThreadCount, atomics::memory_order_release );
 
             // Writers must be first
             pool.add( new Producer<Queue>( pool, testQueue ), s_nWriterThreadCount );
index ea36000a9e4ca3d2d73b6464cce0843618bc69b0..93e9b5f49c612bd94ad69a9d0da733b13a1dbfbe 100644 (file)
@@ -178,7 +178,7 @@ namespace queue {
 
     protected:
         size_t                  m_nThreadPushCount;
-        CDS_ATOMIC::atomic<size_t>     m_nWriterDone;
+        atomics::atomic<size_t>     m_nWriterDone;
 
     protected:
         template <class Queue>
index b4d9298a6eeb2aa33d64dd1568a6cfad9de6cc69..d5dab928fe38cd314bc9d4c949020971c40d8b99 100644 (file)
@@ -147,7 +147,7 @@ namespace set2 {
         typedef key_thread  key_type;
         typedef size_t      value_type;
 
-        CDS_ATOMIC::atomic<size_t>      m_nInsThreadCount;
+        atomics::atomic<size_t>      m_nInsThreadCount;
 
         // Inserts keys from [0..N)
         template <class Set>
@@ -210,7 +210,7 @@ namespace set2 {
                     }
                 }
 
-                getTest().m_nInsThreadCount.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+                getTest().m_nInsThreadCount.fetch_sub( 1, atomics::memory_order_release );
             }
         };
 
@@ -336,7 +336,7 @@ namespace set2 {
                                     ++m_nDeleteFailed;
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -350,7 +350,7 @@ namespace set2 {
                                     ++m_nDeleteFailed;
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -409,7 +409,7 @@ namespace set2 {
                                     ++m_nExtractFailed;
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -423,7 +423,7 @@ namespace set2 {
                                     ++m_nExtractFailed;
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -491,7 +491,7 @@ namespace set2 {
                                 xp.release();
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -515,7 +515,7 @@ namespace set2 {
                                 xp.release();
                             }
                         }
-                        if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+                        if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
                             break;
                     }
                 }
@@ -545,7 +545,7 @@ namespace set2 {
             typedef InsertThread<Set> insert_thread;
             typedef DeleteThread<Set> delete_thread;
 
-            m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release );
+            m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release );
 
             CppUnitMini::ThreadPool pool( *this );
             pool.add( new insert_thread( pool, testSet ), c_nInsThreadCount );
@@ -586,7 +586,7 @@ namespace set2 {
             typedef DeleteThread<Set> delete_thread;
             typedef ExtractThread< typename Set::gc, Set > extract_thread;
 
-            m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release );
+            m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release );
 
             CppUnitMini::ThreadPool pool( *this );
             pool.add( new insert_thread( pool, testSet ), c_nInsThreadCount );
index 1f0bdf7d0c7c62663173f5dd45c1005d406e27d8..999bae050e2ccc5bb66595d4bd41f59a798756b1 100644 (file)
@@ -28,7 +28,7 @@ namespace set2 {
         struct value_type {
             size_t      nKey;
             size_t      nData;
-            CDS_ATOMIC::atomic<size_t> nEnsureCall;
+            atomics::atomic<size_t> nEnsureCall;
             bool volatile   bInitialized;
             cds::OS::ThreadId          threadId     ;   // insert thread id
 
@@ -46,7 +46,7 @@ namespace set2 {
             value_type( value_type const& s )
                 : nKey(s.nKey)
                 , nData(s.nData)
-                , nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed))
+                , nEnsureCall(s.nEnsureCall.load(atomics::memory_order_relaxed))
                 , bInitialized( s.bInitialized )
                 , threadId( cds::OS::getCurrentThreadId() )
             {}
@@ -56,7 +56,7 @@ namespace set2 {
             {
                 nKey = v.nKey;
                 nData = v.nData;
-                nEnsureCall.store( v.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
+                nEnsureCall.store( v.nEnsureCall.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
                 bInitialized = v.bInitialized;
 
                 return *this;
@@ -196,7 +196,7 @@ namespace set2 {
                         ++nCreated;
                     }
                     else {
-                        val.val.nEnsureCall.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+                        val.val.nEnsureCall.fetch_add( 1, atomics::memory_order_relaxed );
                         ++nModified;
                     }
                 }
index c4d6fad20741671233034d31f70f2985d1f9b0c0..ec6039e590ed61b17dd523fa880c4b385cb612fd 100644 (file)
@@ -40,7 +40,7 @@ namespace istack {
 
     class IntrusiveStack_PushPop: public CppUnitMini::TestCase
     {
-        CDS_ATOMIC::atomic<size_t>  m_nWorkingProducers;
+        atomics::atomic<size_t>  m_nWorkingProducers;
         static CDS_CONSTEXPR_CONST size_t c_nValArraySize = 1024;
         static CDS_CONSTEXPR_CONST size_t c_nBadConsumer = 0xbadc0ffe;
 
@@ -99,7 +99,7 @@ namespace istack {
                         ++m_nPushError;
                 }
 
-                getTest().m_nWorkingProducers.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+                getTest().m_nWorkingProducers.fetch_sub( 1, atomics::memory_order_release );
             }
         };
 
@@ -147,7 +147,7 @@ namespace istack {
                 m_nDirtyPop = 0;
                 memset( m_arrPop, 0, sizeof(m_arrPop));
 
-                while ( !(getTest().m_nWorkingProducers.load(CDS_ATOMIC::memory_order_acquire) == 0 && m_Stack.empty()) ) {
+                while ( !(getTest().m_nWorkingProducers.load(atomics::memory_order_acquire) == 0 && m_Stack.empty()) ) {
                     typename Stack::value_type * p = m_Stack.pop();
                     if ( p ) {
                         p->nConsumer = m_nThreadNo;
@@ -236,7 +236,7 @@ namespace istack {
         template <class Stack>
         void test( Stack& testStack, value_array<typename Stack::value_type>& arrValue )
         {
-            m_nWorkingProducers.store( s_nPushThreadCount, CDS_ATOMIC::memory_order_release );
+            m_nWorkingProducers.store( s_nPushThreadCount, atomics::memory_order_release );
             size_t const nPushCount = s_nStackSize / s_nPushThreadCount;
 
             typename Stack::value_type * pValStart = arrValue.get();
index 30ecdf2b0520946b5086b25ae5ed7529312eeda5..7d1f8e6614654e6660b56459784c7d6fc27ea147 100644 (file)
@@ -29,7 +29,7 @@ namespace stack {
 
     class Stack_PushPop: public CppUnitMini::TestCase
     {
-        CDS_ATOMIC::atomic<size_t>  m_nWorkingProducers;
+        atomics::atomic<size_t>  m_nWorkingProducers;
         static size_t const c_nValArraySize = 1024;
 
         template <class Stack>
@@ -85,7 +85,7 @@ namespace stack {
                 }
 
 
-                getTest().m_nWorkingProducers.fetch_sub(1, CDS_ATOMIC::memory_order_release);
+                getTest().m_nWorkingProducers.fetch_sub(1, atomics::memory_order_release);
             }
         };
 
@@ -134,7 +134,7 @@ namespace stack {
                 memset( m_arrPop, 0, sizeof(m_arrPop));
 
                 SimpleValue v;
-                while ( !(getTest().m_nWorkingProducers.load(CDS_ATOMIC::memory_order_acquire) == 0 && m_Stack.empty()) ) {
+                while ( !(getTest().m_nWorkingProducers.load(atomics::memory_order_acquire) == 0 && m_Stack.empty()) ) {
                     if ( m_Stack.pop( v )) {
                         ++m_nPopCount;
                         if ( v.nNo < sizeof(m_arrPop)/sizeof(m_arrPop[0]) )
@@ -236,7 +236,7 @@ namespace stack {
         template <class Stack>
         void test( Stack& testStack )
         {
-            m_nWorkingProducers.store(s_nPushThreadCount, CDS_ATOMIC::memory_order_release);
+            m_nWorkingProducers.store(s_nPushThreadCount, atomics::memory_order_release);
             size_t const nPushCount = s_nStackSize / s_nPushThreadCount;
 
             CppUnitMini::ThreadPool pool( *this );