Remove CDS_RVALUE_SUPPORT, CDS_MOVE_SEMANTICS_SUPPORT macros and emulating code
[libcds.git] / cds / memory / michael / allocator.h
index f571ed1010bd66774befbc04c248cb190727c451..8681913a55d208ccd3452334dde3fa202a522ab8 100644 (file)
@@ -140,7 +140,7 @@ namespace michael {
         struct make_null_ptr {
             void operator ()(void *& p)
             {
-                p = null_ptr<void *>();
+                p = nullptr;
             }
         };
 #endif
@@ -332,7 +332,7 @@ namespace michael {
         {
             auto_lock al(m_access);
             if ( base_class::empty() )
-                return null_ptr<T *>();
+                return nullptr;
             T& rDesc = base_class::front();
             base_class::pop_front();
             assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc) ) );
@@ -386,7 +386,7 @@ namespace michael {
         {
             auto_lock al( m_access );
             if ( base_class::empty() )
-                return null_ptr<T *>();
+                return nullptr;
             T& rDesc = base_class::front();
             base_class::pop_front();
             assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc) ) );
@@ -396,7 +396,7 @@ namespace michael {
         /// Removes \p pDesc descriptor from the free-list
         bool unlink( T * pDesc )
         {
-            assert( pDesc != null_ptr<T *>() );
+            assert(pDesc != nullptr);
             auto_lock al( m_access );
             // !inited(pDesc) is equal to "pDesc is being linked to partial list"
             if ( !base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) ) {
@@ -688,22 +688,7 @@ namespace michael {
 
         \endcode
     */
-#ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT
     template <typename... Options>
-#else
-    template <
-        typename O1 = opt::none,
-        typename O2 = opt::none,
-        typename O3 = opt::none,
-        typename O4 = opt::none,
-        typename O5 = opt::none,
-        typename O6 = opt::none,
-        typename O7 = opt::none,
-        typename O8 = opt::none,
-        typename O9 = opt::none,
-        typename O10= opt::none
-    >
-#endif
     class Heap {
     protected:
 
@@ -727,11 +712,7 @@ namespace michael {
 
     protected:
         //@cond
-#ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT
         typedef typename opt::make_options<default_options, Options...>::type   options;
-#else
-        typedef typename opt::make_options<default_options, O1, O2, O3, O4, O5, O6, O7, O8, O9, O10 >::type   options;
-#endif
         //@endcond
 
         //@cond
@@ -786,7 +767,7 @@ namespace michael {
             : public options::free_list::item_hook
             , public options::partial_list::item_hook
         {
-            CDS_ATOMIC::atomic<anchor_tag>          anchor      ;   ///< anchor, see \ref anchor_tag
+            atomics::atomic<anchor_tag>          anchor      ;   ///< anchor, see \ref anchor_tag
             byte *              pSB         ;   ///< ptr to superblock
             processor_heap_base * pProcHeap ;   ///< pointer to owner processor heap
             unsigned int        nBlockSize  ;   ///< block size in bytes
@@ -794,8 +775,8 @@ namespace michael {
 
             //@cond
             superblock_desc()
-                : pSB( null_ptr<byte *>() )
-                , pProcHeap( null_ptr<processor_heap_base *>() )
+                : pSB(nullptr)
+                , pProcHeap( nullptr )
             {}
             //@endcond
         };
@@ -961,7 +942,7 @@ namespace michael {
         /// Processor heap's \p active field
         /**
             The \p active field in the processor heap structure is primarily a pointer to the descriptor
-            of the active superblock owned by the processor heap. If the value of \p active is not \p NULL, it is
+            of the active superblock owned by the processor heap. If the value of \p active is not \p nullptr, it is
             guaranteed that the active superblock has at least one block available for reservation.
             Since the addresses of superblock descriptors can be guaranteed to be aligned to some power
             of 2 (e.g., 64), as an optimization, we can carve a credits subfield to hold the number
@@ -969,7 +950,7 @@ namespace michael {
             of credits is n, then the active superblock contains n+1 blocks available for reservation
             through the \p active field. Note that the number of blocks in a superblock is not limited
             to the maximum reservations that can be held in the credits subfield. In a typical malloc operation
-            (i.e., when \p active != \p NULL and \p credits > 0), the thread reads \p active and then
+            (i.e., when \p active != \p nullptr and \p credits > 0), the thread reads \p active and then
             atomically decrements credits while validating that the active superblock is still valid.
         */
         class active_tag {
@@ -982,19 +963,17 @@ namespace michael {
 
         public:
             CDS_CONSTEXPR active_tag() CDS_NOEXCEPT
-                : pDesc(null_ptr<superblock_desc *>())
+                : pDesc( nullptr )
                 , nCredits(0)
             {}
 
-#   ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
             active_tag( active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default;
             ~active_tag() CDS_NOEXCEPT_DEFAULTED = default;
             active_tag& operator=(active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default;
-#       if defined(CDS_MOVE_SEMANTICS_SUPPORT) && !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
+#       if !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
             active_tag( active_tag&& ) CDS_NOEXCEPT_DEFAULTED = default;
             active_tag& operator=(active_tag&&) CDS_NOEXCEPT_DEFAULTED = default;
 #       endif
-#   endif
 
             /// Returns pointer to superblock descriptor
             superblock_desc * ptr() const
@@ -1020,7 +999,7 @@ namespace michael {
 
             void clear()
             {
-                pDesc = null_ptr<superblock_desc *>();
+                pDesc = nullptr;
                 nCredits = 0;
             }
 
@@ -1043,19 +1022,17 @@ namespace michael {
 
         public:
             active_tag() CDS_NOEXCEPT
-                : pDesc( null_ptr<superblock_desc *>() )
+                : pDesc( nullptr )
             {}
-#   ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
             // Clang 3.1: error: first argument to atomic operation must be a pointer to a trivially-copyable type
             //active_tag() CDS_NOEXCEPT_DEFAULTED = default;
             active_tag( active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default;
             ~active_tag() CDS_NOEXCEPT_DEFAULTED = default;
             active_tag& operator=(active_tag const&) CDS_NOEXCEPT_DEFAULTED = default;
-#       if defined(CDS_MOVE_SEMANTICS_SUPPORT) && !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
+#       if !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
             active_tag( active_tag&& ) CDS_NOEXCEPT_DEFAULTED = default;
             active_tag& operator=(active_tag&&) CDS_NOEXCEPT_DEFAULTED = default;
 #       endif
-#   endif
             superblock_desc *    ptr() const
             {
                 return pDesc.ptr();
@@ -1099,10 +1076,10 @@ namespace michael {
         /// Processor heap
         struct processor_heap_base
         {
-            CDS_DATA_ALIGNMENT(8) CDS_ATOMIC::atomic<active_tag> active;   ///< pointer to the descriptor of active superblock owned by processor heap
+            CDS_DATA_ALIGNMENT(8) atomics::atomic<active_tag> active;   ///< pointer to the descriptor of active superblock owned by processor heap
             processor_desc *    pProcDesc   ;   ///< pointer to parent processor descriptor
             const size_class *  pSizeClass  ;   ///< pointer to size class
-            CDS_ATOMIC::atomic<superblock_desc *>   pPartial    ;   ///< pointer to partial filled superblock (may be NULL)
+            atomics::atomic<superblock_desc *>   pPartial    ;   ///< pointer to partial filled superblock (may be \p nullptr)
             partial_list        partialList ;   ///< list of partial filled superblocks owned by the processor heap
             unsigned int        nPageIdx    ;   ///< page size-class index, \ref c_nPageSelfAllocation - "small page"
 
@@ -1119,9 +1096,9 @@ namespace michael {
 
             //@cond
             processor_heap_base() CDS_NOEXCEPT
-                : pProcDesc( null_ptr<processor_desc *>() )
-                , pSizeClass( null_ptr<size_class *>() )
-                , pPartial( null_ptr<superblock_desc *>() )
+                : pProcDesc( nullptr )
+                , pSizeClass( nullptr )
+                , pPartial( nullptr )
             {
                 assert( (reinterpret_cast<uptr_atomic_t>(this) & (c_nAlignment - 1)) == 0 );
             }
@@ -1130,16 +1107,16 @@ namespace michael {
             /// Get partial superblock owned by the processor heap
             superblock_desc * get_partial()
             {
-                superblock_desc * pDesc = pPartial.load(CDS_ATOMIC::memory_order_acquire);
+                superblock_desc * pDesc = pPartial.load(atomics::memory_order_acquire);
                 do {
                     if ( !pDesc ) {
                         pDesc =  partialList.pop();
                         break;
                     }
-                } while ( !pPartial.compare_exchange_weak( pDesc, null_ptr<superblock_desc *>(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed));
+                } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) );
 
-                //assert( pDesc == NULL || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
-                //assert( pDesc == NULL || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
+                //assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
+                //assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
                 return pDesc;
             }
 
@@ -1149,8 +1126,8 @@ namespace michael {
                 assert( pPartial != pDesc );
                 //assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
 
-                superblock_desc * pCur = null_ptr<superblock_desc *>();
-                if ( !pPartial.compare_exchange_strong(pCur, pDesc, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed) )
+                superblock_desc * pCur = nullptr;
+                if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed) )
                     partialList.push( pDesc );
             }
 
@@ -1174,8 +1151,8 @@ namespace michael {
 
             //@cond
             processor_desc()
-                : arrProcHeap( null_ptr<processor_heap *>() )
-                , pageHeaps( null_ptr<page_heap *>() )
+                : arrProcHeap( nullptr )
+                , pageHeaps( nullptr )
             {}
             //@endcond
         };
@@ -1186,7 +1163,7 @@ namespace michael {
         system_heap         m_LargeHeap          ;  ///< Heap for large block
         aligned_heap        m_AlignedHeap        ;  ///< Internal aligned heap
         sizeclass_selector  m_SizeClassSelector  ;  ///< Size-class selector
-        CDS_ATOMIC::atomic<processor_desc *> *   m_arrProcDesc  ;  ///< array of pointers to the processor descriptors
+        atomics::atomic<processor_desc *> *   m_arrProcDesc  ;  ///< array of pointers to the processor descriptors
         unsigned int        m_nProcessorCount    ;  ///< Processor count
         bound_checker       m_BoundChecker       ;  ///< Bound checker
 
@@ -1213,16 +1190,16 @@ namespace michael {
             // Reserve block
             while ( true ) {
                 ++nCollision;
-                oldActive = pProcHeap->active.load(CDS_ATOMIC::memory_order_acquire);
+                oldActive = pProcHeap->active.load(atomics::memory_order_acquire);
                 if ( !oldActive.ptr() )
-                    return null_ptr<block_header *>();
+                    return nullptr;
                 unsigned int nCredits = oldActive.credits();
                 active_tag  newActive   ; // default = 0
                 if ( nCredits != 0 ) {
                     newActive = oldActive;
                     newActive.credits( nCredits - 1 );
                 }
-                if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+                if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, atomics::memory_order_release, atomics::memory_order_relaxed ))
                     break;
             }
 
@@ -1240,7 +1217,7 @@ namespace michael {
             nCollision = -1;
             do {
                 ++nCollision;
-                newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+                newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
 
                 assert( oldAnchor.avail < pDesc->nCapacity );
                 pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize;
@@ -1256,7 +1233,7 @@ namespace michael {
                         newAnchor.count -= nMoreCredits;
                     }
                 }
-            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
             if ( nCollision )
                 pProcHeap->stat.incActiveAnchorCASFailureCount( nCollision );
@@ -1285,7 +1262,7 @@ namespace michael {
         retry:
             superblock_desc * pDesc = pProcHeap->get_partial();
             if ( !pDesc )
-                return null_ptr<block_header *>();
+                return nullptr;
 
             // reserve blocks
             anchor_tag  oldAnchor;
@@ -1297,7 +1274,7 @@ namespace michael {
             do {
                 ++nCollision;
 
-                newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+                newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
                 if ( oldAnchor.state == SBSTATE_EMPTY ) {
                     free_superblock( pDesc );
                     goto retry;
@@ -1307,7 +1284,7 @@ namespace michael {
                 newAnchor.count -= nMoreCredits + 1;
                 newAnchor.state = (nMoreCredits > 0) ? SBSTATE_ACTIVE : SBSTATE_FULL;
                 newAnchor.tag += 1;
-            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) );
+            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
 
             if ( nCollision )
                 pProcHeap->stat.incPartialDescCASFailureCount( nCollision );
@@ -1322,13 +1299,13 @@ namespace michael {
             do {
                 ++nCollision;
 
-                newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+                newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
 
                 assert( oldAnchor.avail < pDesc->nCapacity );
                 pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize;
                 newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
                 ++newAnchor.tag;
-            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) );
+            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
 
             if ( nCollision )
                 pProcHeap->stat.incPartialAnchorCASFailureCount( nCollision );
@@ -1353,10 +1330,10 @@ namespace michael {
         block_header * alloc_from_new_superblock( processor_heap * pProcHeap )
         {
             superblock_desc * pDesc = new_superblock_desc( pProcHeap );
-            assert( pDesc != null_ptr<superblock_desc *>() );
+            assert( pDesc != nullptr );
             pDesc->pSB = new_superblock_buffer( pProcHeap );
 
-            anchor_tag anchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_relaxed);
+            anchor_tag anchor = pDesc->anchor.load(atomics::memory_order_relaxed);
             anchor.tag += 1;
 
             // Make single-linked list of free blocks in superblock
@@ -1374,17 +1351,17 @@ namespace michael {
 
             anchor.count = pDesc->nCapacity - 1 - (newActive.credits() + 1);
             anchor.state = SBSTATE_ACTIVE;
-            pDesc->anchor.store(anchor, CDS_ATOMIC::memory_order_relaxed);
+            pDesc->anchor.store(anchor, atomics::memory_order_relaxed);
 
             active_tag curActive;
-            if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+            if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, atomics::memory_order_release, atomics::memory_order_relaxed )) {
                 pProcHeap->stat.incAllocFromNew();
                 //reinterpret_cast<block_header *>( pDesc->pSB )->set( pDesc, 0 );
                 return reinterpret_cast<block_header *>( pDesc->pSB );
             }
 
             free_superblock( pDesc );
-            return null_ptr<block_header *>();
+            return nullptr;
         }
 
         /// Find appropriate processor heap based on size-class selected
@@ -1398,11 +1375,11 @@ namespace michael {
             if ( nProcessorId >= m_nProcessorCount )
                 nProcessorId = 0;
 
-            processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( CDS_ATOMIC::memory_order_relaxed );
+            processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( atomics::memory_order_relaxed );
             while ( !pDesc ) {
 
                 processor_desc * pNewDesc = new_processor_desc( nProcessorId );
-                if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) {
+                if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed ) ) {
                     pDesc = pNewDesc;
                     break;
                 }
@@ -1421,7 +1398,7 @@ namespace michael {
             active_tag  newActive;
             newActive.set( pDesc, nCredits - 1 );
 
-            if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
+            if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
                 return;
 
             // Someone installed another active superblock.
@@ -1431,10 +1408,10 @@ namespace michael {
             anchor_tag  newAnchor;
 
             do {
-                newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+                newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
                 newAnchor.count += nCredits;
                 newAnchor.state = SBSTATE_PARTIAL;
-            } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+            } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
             pDesc->pProcHeap->add_partial( pDesc );
         }
@@ -1509,13 +1486,13 @@ namespace michael {
                     m_AlignedHeap.free( pDesc );
                 }
 
-                superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed);
+                superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
                 if ( pPartial ) {
                     free( pPartial->pSB );
                     m_AlignedHeap.free( pPartial );
                 }
 
-                pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr();
+                pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
                 if ( pDesc ) {
                     free( pDesc->pSB );
                     m_AlignedHeap.free( pDesc );
@@ -1530,13 +1507,13 @@ namespace michael {
                     m_AlignedHeap.free( pDesc );
                 }
 
-                superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed);
+                superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
                 if ( pPartial ) {
                     pageHeap.free( pPartial->pSB );
                     m_AlignedHeap.free( pPartial );
                 }
 
-                pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr();
+                pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
                 if ( pDesc ) {
                     pageHeap.free( pDesc->pSB );
                     m_AlignedHeap.free( pDesc );
@@ -1560,7 +1537,7 @@ namespace michael {
                 (pDesc->pageHeaps + i)->page_heap::~page_heap();
 
             //m_IntHeap.free( pDesc->pageHeaps );
-            pDesc->pageHeaps = null_ptr<page_heap *>();
+            pDesc->pageHeaps = nullptr;
 
             pDesc->processor_desc::~processor_desc();
             m_AlignedHeap.free( pDesc );
@@ -1571,13 +1548,13 @@ namespace michael {
         {
             anchor_tag anchor;
             superblock_desc * pDesc = pProcHeap->pProcDesc->listSBDescFree.pop();
-            if ( pDesc == null_ptr<superblock_desc *>() ) {
+            if ( pDesc == nullptr ) {
                 pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc;
                 assert( (uptr_atomic_t(pDesc) & (c_nAlignment - 1)) == 0 );
 
-                anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed );
+                anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
                 anchor.tag = 0;
-                pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed );
+                pDesc->anchor.store( anchor, atomics::memory_order_relaxed );
 
                 pProcHeap->stat.incDescAllocCount();
             }
@@ -1586,9 +1563,9 @@ namespace michael {
             assert( pDesc->nCapacity <= c_nMaxBlockInSuperBlock );
             pDesc->pProcHeap = pProcHeap;
 
-            anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed );
+            anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
             anchor.avail = 1;
-            pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed );
+            pDesc->anchor.store( anchor, atomics::memory_order_relaxed );
 
             return pDesc;
         }
@@ -1639,17 +1616,17 @@ namespace michael {
                 if ( !pProcHeap )
                     return alloc_from_OS( nSize );
 
-                if ( (pBlock = alloc_from_active( pProcHeap )) != null_ptr<block_header *>() )
+                if ( (pBlock = alloc_from_active( pProcHeap )) != nullptr )
                     break;
-                if ( (pBlock = alloc_from_partial( pProcHeap )) != null_ptr<block_header *>() )
+                if ( (pBlock = alloc_from_partial( pProcHeap )) != nullptr )
                     break;
-                if ( (pBlock = alloc_from_new_superblock( pProcHeap )) != null_ptr<block_header *>() )
+                if ( (pBlock = alloc_from_new_superblock( pProcHeap )) != nullptr )
                     break;
             }
 
             pProcHeap->stat.incAllocatedBytes( pProcHeap->pSizeClass->nBlockSize );
 
-            assert( pBlock != null_ptr<block_header *>() );
+            assert( pBlock != nullptr );
             return pBlock;
         }
 
@@ -1663,7 +1640,7 @@ namespace michael {
 
             m_nProcessorCount = m_Topology.processor_count();
             m_arrProcDesc = new( m_AlignedHeap.alloc(sizeof(processor_desc *) * m_nProcessorCount, c_nAlignment ))
-                CDS_ATOMIC::atomic<processor_desc *>[ m_nProcessorCount ];
+                atomics::atomic<processor_desc *>[ m_nProcessorCount ];
             memset( m_arrProcDesc, 0, sizeof(processor_desc *) * m_nProcessorCount )    ;   // ?? memset for atomic<>
         }
 
@@ -1674,7 +1651,7 @@ namespace michael {
         ~Heap()
         {
             for ( unsigned int i = 0; i < m_nProcessorCount; ++i ) {
-                processor_desc * pDesc = m_arrProcDesc[i].load(CDS_ATOMIC::memory_order_relaxed);
+                processor_desc * pDesc = m_arrProcDesc[i].load(atomics::memory_order_relaxed);
                 if ( pDesc )
                     free_processor_desc( pDesc );
             }
@@ -1739,7 +1716,7 @@ namespace michael {
 
             pProcHeap->stat.incDeallocatedBytes( pDesc->nBlockSize );
 
-            oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+            oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
             do {
                 newAnchor = oldAnchor;
                 reinterpret_cast<free_block_header *>( pBlock )->nNextFree = oldAnchor.avail;
@@ -1758,7 +1735,7 @@ namespace michael {
                 }
                 else
                     newAnchor.count += 1;
-            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ) );
 
             pProcHeap->stat.incFreeCount();
 
@@ -1767,7 +1744,7 @@ namespace michael {
                     free_superblock( pDesc );
             }
             else if (oldAnchor.state == SBSTATE_FULL ) {
-                assert( pProcHeap != null_ptr<processor_heap_base *>() );
+                assert( pProcHeap != nullptr );
                 pProcHeap->stat.decDescFull();
                 pProcHeap->add_partial( pDesc );
             }
@@ -1776,13 +1753,13 @@ namespace michael {
         /// Reallocate memory block
         /**
             If \p nNewSize is zero, then the block pointed to by \p pMemory is freed;
-            the return value is \p NULL, and \p pMemory is left pointing at a freed block.
+            the return value is \p nullptr, and \p pMemory is left pointing at a freed block.
 
             If there is not enough available memory to expand the block to the given size,
-            the original block is left unchanged, and \p NULL is returned.
+            the original block is left unchanged, and \p nullptr is returned.
 
             Aligned memory block cannot be realloc'ed: if \p pMemory has been allocated by \ref alloc_aligned,
-            then the return value is \p NULL and the original block is left unchanged.
+            then the return value is \p nullptr and the original block is left unchanged.
         */
         void * realloc(
             void *  pMemory,    ///< Pointer to previously allocated memory block
@@ -1791,7 +1768,7 @@ namespace michael {
         {
             if ( nNewSize == 0 ) {
                 free( pMemory );
-                return null_ptr<void *>();
+                return nullptr;
             }
 
             const size_t nOrigSize = nNewSize;
@@ -1802,7 +1779,7 @@ namespace michael {
             // Reallocation of aligned block is not possible
             if ( pBlock->isAligned() ) {
                 assert( false );
-                return null_ptr<void *>();
+                return nullptr;
             }
 
             if ( pBlock->isOSAllocated() ) {
@@ -1840,7 +1817,7 @@ namespace michael {
                 return pNew;
             }
 
-            return null_ptr<void *>();
+            return nullptr;
         }
 
         /// Allocate aligned memory block
@@ -1897,7 +1874,7 @@ namespace michael {
         {
             size_t nProcHeapCount = m_SizeClassSelector.size();
             for ( unsigned int nProcessor = 0; nProcessor < m_nProcessorCount; ++nProcessor ) {
-                processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(CDS_ATOMIC::memory_order_relaxed);
+                processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(atomics::memory_order_relaxed);
                 if ( pProcDesc ) {
                     for ( unsigned int i = 0; i < nProcHeapCount; ++i ) {
                         processor_heap_base * pProcHeap = pProcDesc->arrProcHeap + i;