HP and DHP SMR totally refactored
authorkhizmax <libcds.dev@gmail.com>
Sun, 15 Jan 2017 20:15:22 +0000 (23:15 +0300)
committerkhizmax <libcds.dev@gmail.com>
Sun, 15 Jan 2017 20:15:22 +0000 (23:15 +0300)
145 files changed:
CMakeLists.txt
cds/compiler/clang/defs.h
cds/compiler/gcc/defs.h
cds/compiler/icl/defs.h
cds/compiler/vc/defs.h
cds/details/throw_exception.h [new file with mode: 0644]
cds/gc/details/dhp.h [deleted file]
cds/gc/details/hp.h [deleted file]
cds/gc/details/hp_alloc.h [deleted file]
cds/gc/details/hp_common.h [new file with mode: 0644]
cds/gc/details/hp_type.h [deleted file]
cds/gc/dhp.h
cds/gc/dhp_smr.h [new file with mode: 0644]
cds/gc/hp.h
cds/gc/hp_smr.h [new file with mode: 0644]
cds/gc/impl/dhp_decl.h [deleted file]
cds/gc/impl/dhp_impl.h [deleted file]
cds/gc/impl/hp_decl.h [deleted file]
cds/gc/impl/hp_impl.h [deleted file]
cds/threading/details/_common.h
cds/threading/details/cxx11_manager.h
cds/threading/details/gcc_manager.h
cds/threading/details/msvc_manager.h
cds/threading/details/pthread_manager.h
cds/threading/details/wintls_manager.h
cds/threading/model.h
change.log
doxygen/cds.doxy
projects/Win/vc14/cds.vcxproj
projects/Win/vc14/cds.vcxproj.filters
projects/Win/vc14/gtest-deque.vcxproj
projects/Win/vc14/gtest-ilist-iterable.vcxproj
projects/Win/vc14/gtest-ilist-lazy.vcxproj
projects/Win/vc14/gtest-ilist-michael.vcxproj
projects/Win/vc14/gtest-iset-feldman.vcxproj
projects/Win/vc14/gtest-iset-michael-iterable.vcxproj
projects/Win/vc14/gtest-iset-michael-lazy.vcxproj
projects/Win/vc14/gtest-iset-michael.vcxproj
projects/Win/vc14/gtest-iset-skip.vcxproj
projects/Win/vc14/gtest-iset-split-iterable.vcxproj
projects/Win/vc14/gtest-iset-split-lazy.vcxproj
projects/Win/vc14/gtest-iset-split-michael.vcxproj
projects/Win/vc14/gtest-list-iterable.vcxproj
projects/Win/vc14/gtest-list-lazy.vcxproj
projects/Win/vc14/gtest-list-michael.vcxproj
projects/Win/vc14/gtest-map-feldman.vcxproj
projects/Win/vc14/gtest-map-michael-iterable.vcxproj
projects/Win/vc14/gtest-map-michael-lazy.vcxproj
projects/Win/vc14/gtest-map-michael.vcxproj
projects/Win/vc14/gtest-map-skip.vcxproj
projects/Win/vc14/gtest-map-split-iterable.vcxproj
projects/Win/vc14/gtest-map-split-lazy.vcxproj
projects/Win/vc14/gtest-map-split-michael.vcxproj
projects/Win/vc14/gtest-misc.vcxproj
projects/Win/vc14/gtest-pqueue.vcxproj
projects/Win/vc14/gtest-queue.vcxproj
projects/Win/vc14/gtest-set-feldman.vcxproj
projects/Win/vc14/gtest-set-michael-iterable.vcxproj
projects/Win/vc14/gtest-set-michael-lazy.vcxproj
projects/Win/vc14/gtest-set-michael.vcxproj
projects/Win/vc14/gtest-set-skip.vcxproj
projects/Win/vc14/gtest-set-split-iterable.vcxproj
projects/Win/vc14/gtest-set-split-lazy.vcxproj
projects/Win/vc14/gtest-set-split-michael.vcxproj
projects/Win/vc14/gtest-stack.vcxproj
projects/Win/vc14/gtest-tree-bronson.vcxproj
projects/Win/vc14/gtest-tree-ellen.vcxproj
projects/Win/vc14/stress-framework.vcxproj
projects/Win/vc14/stress-freelist.vcxproj
projects/Win/vc14/stress-map-delodd.vcxproj
projects/Win/vc14/stress-map-find_int.vcxproj
projects/Win/vc14/stress-map-find_string.vcxproj
projects/Win/vc14/stress-map-insdel-func.vcxproj
projects/Win/vc14/stress-map-insdel-int.vcxproj
projects/Win/vc14/stress-map-insdel-item-int.vcxproj
projects/Win/vc14/stress-map-insdel-string.vcxproj
projects/Win/vc14/stress-map-insdelfind.vcxproj
projects/Win/vc14/stress-map-insfind-int.vcxproj
projects/Win/vc14/stress-pqueue.vcxproj
projects/Win/vc14/stress-queue.vcxproj
projects/Win/vc14/stress-set-delodd.vcxproj
projects/Win/vc14/stress-set-insdel_func.vcxproj
projects/Win/vc14/stress-set-insdel_string.vcxproj
projects/Win/vc14/stress-set-insdelfind.vcxproj
projects/Win/vc14/stress-set-iteration.vcxproj
projects/Win/vc14/stress-stack.vcxproj
src/dhp.cpp [new file with mode: 0644]
src/dhp_gc.cpp [deleted file]
src/hp.cpp [new file with mode: 0644]
src/hp_gc.cpp [deleted file]
src/init.cpp
src/thread_data.cpp [new file with mode: 0644]
test/stress/data/test-debug.conf
test/stress/data/test-express-x86.conf
test/stress/data/test-express.conf
test/stress/data/test.conf
test/stress/main.cpp
test/unit/intrusive-list/intrusive_iterable_dhp.cpp
test/unit/intrusive-list/intrusive_lazy_dhp.cpp
test/unit/intrusive-list/intrusive_michael_dhp.cpp
test/unit/intrusive-set/intrusive_feldman_hashset_dhp.cpp
test/unit/intrusive-set/intrusive_michael_iterable_dhp.cpp
test/unit/intrusive-set/intrusive_michael_lazy_dhp.cpp
test/unit/intrusive-set/intrusive_michael_michael_dhp.cpp
test/unit/intrusive-set/intrusive_skiplist_dhp.cpp
test/unit/intrusive-set/intrusive_split_iterable_dhp.cpp
test/unit/intrusive-set/intrusive_split_lazy_dhp.cpp
test/unit/intrusive-set/intrusive_split_michael_dhp.cpp
test/unit/list/iterable_dhp.cpp
test/unit/list/kv_iterable_dhp.cpp
test/unit/list/kv_lazy_dhp.cpp
test/unit/list/kv_michael_dhp.cpp
test/unit/list/lazy_dhp.cpp
test/unit/list/michael_dhp.cpp
test/unit/map/feldman_hashmap_dhp.cpp
test/unit/map/michael_iterable_dhp.cpp
test/unit/map/michael_lazy_dhp.cpp
test/unit/map/michael_michael_dhp.cpp
test/unit/map/skiplist_dhp.cpp
test/unit/map/split_iterable_dhp.cpp
test/unit/map/split_lazy_dhp.cpp
test/unit/map/split_michael_dhp.cpp
test/unit/queue/basket_queue_dhp.cpp
test/unit/queue/intrusive_basket_queue_dhp.cpp
test/unit/queue/intrusive_moirqueue_dhp.cpp
test/unit/queue/intrusive_msqueue_dhp.cpp
test/unit/queue/intrusive_optqueue_dhp.cpp
test/unit/queue/intrusive_segmented_queue_dhp.cpp
test/unit/queue/moirqueue_dhp.cpp
test/unit/queue/msqueue_dhp.cpp
test/unit/queue/optimistic_queue_dhp.cpp
test/unit/queue/segmented_queue_dhp.cpp
test/unit/set/feldman_hashset_dhp.cpp
test/unit/set/michael_iterable_dhp.cpp
test/unit/set/michael_lazy_dhp.cpp
test/unit/set/michael_michael_dhp.cpp
test/unit/set/skiplist_dhp.cpp
test/unit/set/split_iterable_dhp.cpp
test/unit/set/split_lazy_dhp.cpp
test/unit/set/split_michael_dhp.cpp
test/unit/stack/intrusive_treiber_stack_dhp.cpp
test/unit/stack/treiber_stack_dhp.cpp
test/unit/tree/ellen_bintree_map_dhp.cpp
test/unit/tree/ellen_bintree_set_dhp.cpp
test/unit/tree/intrusive_ellenbintree_dhp.cpp

index 4848054..0843a2c 100644 (file)
@@ -109,11 +109,12 @@ include_directories(${PROJECT_SOURCE_DIR})
 set(LIBRARIES_COMPONENT lib)
 set(HEADERS_COMPONENT devel)
 
-set(SOURCES src/hp_gc.cpp
-            src/init.cpp
-            src/dhp_gc.cpp
+set(SOURCES src/init.cpp
+            src/hp.cpp
+            src/dhp.cpp
             src/urcu_gp.cpp
             src/urcu_sh.cpp
+            src/thread_data.cpp
             src/topology_hpux.cpp
             src/topology_linux.cpp
             src/topology_osx.cpp
index 3c43ef0..daec9c7 100644 (file)
 #   define CDS_DEPRECATED( reason ) __attribute__((deprecated( reason )))
 #endif
 
+#define CDS_NORETURN __attribute__((__noreturn__))
+
 // *************************************************
 // Features
 #if defined(__has_feature) && __has_feature(thread_sanitizer)
 #define cds_likely( expr )   __builtin_expect( !!( expr ), 1 )
 #define cds_unlikely( expr ) __builtin_expect( !!( expr ), 0 )
 
+// Exceptions
+
+#if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1
+#   define CDS_EXCEPTION_ENABLED
+#endif
+
+
 // double-width CAS support - only for libc++
 #ifdef _LIBCPP_VERSION
 #   if CDS_BUILD_BITS == 64
index 32b02c8..f7e3c40 100644 (file)
 #   define CDS_DEPRECATED( reason ) __attribute__((deprecated( reason )))
 #endif
 
+#define CDS_NORETURN __attribute__((__noreturn__))
+
 // likely/unlikely
 
 #define cds_likely( expr )   __builtin_expect( !!( expr ), 1 )
 #define cds_unlikely( expr ) __builtin_expect( !!( expr ), 0 )
 
+// Exceptions
+
+#if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1
+#   define CDS_EXCEPTION_ENABLED
+#endif
+
 // double-width CAS support
 // note: gcc-4.8 does not support double-word atomics
 //       gcc-4.9: a lot of crashes when use DCAS
index dde93b9..f0e0729 100644 (file)
 // Attributes
 #if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
 #   define CDS_DEPRECATED( reason ) __declspec(deprecated( reason ))
+#   define CDS_NORETURN __declspec(noreturn)
 #else
 #   define CDS_DEPRECATED( reason ) __attribute__((deprecated( reason )))
+#   define CDS_NORETURN __attribute__((__noreturn__))
 #endif
 
+// Exceptions
+
+#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
+#   if defined( _CPPUNWIND )
+#       define CDS_EXCEPTION_ENABLED
+#   endif
+#else
+#   if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1
+#       define CDS_EXCEPTION_ENABLED
+#   endif
+#endif
+
+
 #include <cds/compiler/icl/compiler_barriers.h>
 
 //@endcond
index d3425dd..f665acf 100644 (file)
 #   define CDS_DEPRECATED( reason ) __declspec(deprecated( reason ))
 #endif
 
+#define CDS_NORETURN __declspec(noreturn)
+
+// Exceptions
+
+#if defined( _CPPUNWIND )
+#   define CDS_EXCEPTION_ENABLED
+#endif
+
+
 // double-width CAS support
 //#define CDS_DCAS_SUPPORT
 
diff --git a/cds/details/throw_exception.h b/cds/details/throw_exception.h
new file mode 100644 (file)
index 0000000..cd708df
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_DETAILS_THROW_EXCEPTION_H
+#define CDSLIB_DETAILS_THROW_EXCEPTION_H
+
+#include <cds/details/defs.h>
+#if !defined( CDS_EXCEPTION_ENABLED ) && !defined( CDS_USER_DEFINED_THROW_HANDLER )
+#   include <stdio.h>
+#endif
+
+namespace cds {
+
+#if !defined( CDS_USER_DEFINED_THROW_EXCEPTION )
+#if defined( CDS_EXCEPTION_ENABLED )
+    /// Function to throw an exception
+    /**
+        If you compile your code with exception enabled, \p %throw_exception() function
+        throws the \p exception.
+
+        If exception is disabled, \p %throw_exception() prints an exception message to
+        standard output and call \p abort(). 
+
+        You may supply your own \p %cds::throw_exception() function;
+        for that you should specify \p -DCDS_USER_DEFINED_THROW_EXCEPTION
+        in compiler command line.
+
+        @note \p %throw_exception() never returns. If the user-defined \p %throw_exception() returns,
+        the behavior is undefined.
+    */
+    template <typename E>
+    CDS_NORETURN static inline void throw_exception(
+        E&& exception,       ///< Exception to throw
+        char const* file,   ///< Source filename
+        int line            ///< File line
+    )
+    {
+        CDS_UNUSED( file );
+        CDS_UNUSED( line );
+
+        throw exception;
+    }
+#else
+    template <typename E>
+    CDS_NORETURN static inline void throw_exception( E&& exception, char const* file, int line )
+    {
+        printf( "file %s, line %d: %s\n", file, line, exception.what() );
+        abort();
+    }
+#endif
+//#else
+    // User-provided cds::throw_exception()
+#endif
+
+#define CDS_THROW_EXCEPTION( exception ) ::cds::throw_exception( exception, __FILE__, __LINE__ )
+
+} // namespace cds
+
+
+#endif // #ifndef CDSLIB_DETAILS_THROW_EXCEPTION_H
+
diff --git a/cds/gc/details/dhp.h b/cds/gc/details/dhp.h
deleted file mode 100644 (file)
index da9113d..0000000
+++ /dev/null
@@ -1,874 +0,0 @@
-/*
-    This file is a part of libcds - Concurrent Data Structures library
-
-    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
-
-    Source code repo: http://github.com/khizmax/libcds/
-    Download: http://sourceforge.net/projects/libcds/files/
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright notice, this
-      list of conditions and the following disclaimer.
-
-    * Redistributions in binary form must reproduce the above copyright notice,
-      this list of conditions and the following disclaimer in the documentation
-      and/or other materials provided with the distribution.
-
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef CDSLIB_GC_DETAILS_DHP_H
-#define CDSLIB_GC_DETAILS_DHP_H
-
-#include <mutex>        // unique_lock
-#include <cds/algo/atomic.h>
-#include <cds/algo/int_algo.h>
-#include <cds/gc/details/retired_ptr.h>
-#include <cds/details/aligned_allocator.h>
-#include <cds/details/allocator.h>
-#include <cds/sync/spinlock.h>
-
-#if CDS_COMPILER == CDS_COMPILER_MSVC
-#   pragma warning(push)
-#   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
-#endif
-
-//@cond
-namespace cds { namespace gc {
-
-    /// Dynamic Hazard Pointer reclamation schema
-    /**
-        The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
-        Use cds::gc::DHP class in your code.
-
-        Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
-        GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
-        by contructing cds::gc::DHP object in beginning of your main().
-        See cds::gc::DHP class for explanation.
-
-        \par Implementation issues
-            The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
-            It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
-            of guards allocated from the global list of free guards and the access to the global list is occurred only when
-            all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
-            Guards allocated for the thread is push back to the global list only when the thread terminates.
-    */
-    namespace dhp {
-
-        // Forward declarations
-        class Guard;
-        template <size_t Count> class GuardArray;
-        class ThreadGC;
-        class GarbageCollector;
-
-        /// Retired pointer type
-        typedef cds::gc::details::retired_ptr retired_ptr;
-
-        using cds::gc::details::free_retired_ptr_func;
-
-        /// Details of Dynamic Hazard Pointer algorithm
-        namespace details {
-
-            // Forward declaration
-            class liberate_set;
-
-            /// Retired pointer buffer node
-            struct retired_ptr_node {
-                retired_ptr         m_ptr   ;   ///< retired pointer
-                atomics::atomic<retired_ptr_node *>  m_pNext     ;   ///< next retired pointer in buffer
-                atomics::atomic<retired_ptr_node *>  m_pNextFree ;   ///< next item in free list of \p retired_ptr_node
-            };
-
-            /// Internal guard representation
-            struct guard_data {
-                typedef void * guarded_ptr;  ///< type of value guarded
-
-                atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
-                atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
-                atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
-
-                guard_data * pThreadNext; ///< next item of thread's local list of guards
-
-                guard_data() CDS_NOEXCEPT
-                    : pPost( nullptr )
-                    , pGlobalNext( nullptr )
-                    , pNextFree( nullptr )
-                    , pThreadNext( nullptr )
-                {}
-
-                void init() CDS_NOEXCEPT
-                {
-                    pPost.store( nullptr, atomics::memory_order_relaxed );
-                }
-
-                /// Checks if the guard is free, that is, it does not contain any pointer guarded
-                bool isFree() const CDS_NOEXCEPT
-                {
-                    return pPost.load( atomics::memory_order_acquire ) == nullptr;
-                }
-
-                guarded_ptr get( atomics::memory_order order = atomics::memory_order_acquire )
-                {
-                    return pPost.load( order );
-                }
-
-                void set( guarded_ptr p, atomics::memory_order order = atomics::memory_order_release )
-                {
-                    pPost.store( p, order );
-                }
-            };
-
-            /// Guard allocator
-            template <class Alloc = CDS_DEFAULT_ALLOCATOR>
-            class guard_allocator
-            {
-                cds::details::Allocator<details::guard_data>  m_GuardAllocator;   ///< guard allocator
-
-                atomics::atomic<guard_data *>  m_GuardList;     ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
-                atomics::atomic<guard_data *>  m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
-                cds::sync::spin                m_freeListLock;  ///< Access to m_FreeGuardList
-
-                /*
-                    Unfortunately, access to the list of free guard is lock-based.
-                    Lock-free manipulations with guard free-list are ABA-prone.
-                    TODO: working with m_FreeGuardList in lock-free manner.
-                */
-
-            private:
-                /// Allocates new guard from the heap. The function uses aligned allocator
-                guard_data * allocNew()
-                {
-                    //TODO: the allocator should make block allocation
-
-                    details::guard_data * pGuard = m_GuardAllocator.New();
-
-                    // Link guard to the list
-                    // m_GuardList is an accumulating list and it cannot support concurrent deletion,
-                    // so, ABA problem is impossible for it
-                    details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
-                    do {
-                        pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
-                        // pHead is changed by compare_exchange_weak
-                    } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
-
-                    pGuard->init();
-                    return pGuard;
-                }
-
-            public:
-                // Default ctor
-                guard_allocator() CDS_NOEXCEPT
-                    : m_GuardList( nullptr )
-                    , m_FreeGuardList( nullptr )
-                {}
-
-                // Destructor
-                ~guard_allocator()
-                {
-                    guard_data * pNext;
-                    for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
-                        pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
-                        m_GuardAllocator.Delete( pData );
-                    }
-                }
-
-                /// Allocates a guard from free list or from heap if free list is empty
-                guard_data* alloc()
-                {
-                    // Try to pop a guard from free-list
-                    details::guard_data * pGuard;
-
-                    {
-                        std::unique_lock<cds::sync::spin> al( m_freeListLock );
-                        pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
-                        if ( pGuard )
-                            m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
-                    }
-                    if ( !pGuard )
-                        return allocNew();
-
-                    pGuard->init();
-                    return pGuard;
-                }
-
-                /// Frees guard \p pGuard
-                /**
-                    The function places the guard \p pGuard into free-list
-                */
-                void free( guard_data* pGuard ) CDS_NOEXCEPT
-                {
-                    pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
-
-                    std::unique_lock<cds::sync::spin> al( m_freeListLock );
-                    pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
-                    m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
-                }
-
-                /// Allocates list of guard
-                /**
-                    The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
-
-                    cds::gc::dhp::ThreadGC supporting method
-                */
-                guard_data * allocList( size_t nCount )
-                {
-                    assert( nCount != 0 );
-
-                    guard_data * pHead;
-                    guard_data * pLast;
-
-                    pHead =
-                        pLast = alloc();
-
-                    // The guard list allocated is private for the thread,
-                    // so, we can use relaxed memory order
-                    while ( --nCount ) {
-                        guard_data * p = alloc();
-                        pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
-                        pLast = p;
-                    }
-
-                    pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
-
-                    return pHead;
-                }
-
-                /// Frees list of guards
-                /**
-                    The list \p pList is linked by guard's \p pThreadNext field.
-
-                    cds::gc::dhp::ThreadGC supporting method
-                */
-                void freeList( guard_data * pList ) CDS_NOEXCEPT
-                {
-                    assert( pList != nullptr );
-
-                    guard_data * pLast = pList;
-                    while ( pLast->pThreadNext ) {
-                        pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
-                        guard_data * p;
-                        pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
-                        pLast = p;
-                    }
-
-                    std::unique_lock<cds::sync::spin> al( m_freeListLock );
-                    pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
-                    m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
-                }
-
-                /// Returns the list's head of guards allocated
-                guard_data * begin() CDS_NOEXCEPT
-                {
-                    return m_GuardList.load(atomics::memory_order_acquire);
-                }
-            };
-
-            /// Retired pointer buffer
-            /**
-                The buffer of retired nodes ready for liberating.
-                When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
-                retired nodes.
-            */
-            class retired_ptr_buffer
-            {
-                atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
-                atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
-
-            public:
-                retired_ptr_buffer() CDS_NOEXCEPT
-                    : m_pHead( nullptr )
-                    , m_nItemCount(0)
-                {}
-
-                ~retired_ptr_buffer() CDS_NOEXCEPT
-                {
-                    assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
-                }
-
-                /// Pushes new node into the buffer. Returns current buffer size
-                size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
-                {
-                    retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
-                    do {
-                        node.m_pNext.store( pHead, atomics::memory_order_relaxed );
-                        // pHead is changed by compare_exchange_weak
-                    } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_acquire ));
-
-                    return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
-                }
-
-                /// Pushes [pFirst, pLast] list linked by pNext field.
-                size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
-                {
-                    assert( pFirst );
-                    assert( pLast );
-
-                    retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
-                    do {
-                        pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
-                        // pHead is changed by compare_exchange_weak
-                    } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_acquire ));
-
-                    return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
-                }
-
-                /// Result of \ref dhp_gc_privatize "privatize" function.
-                /**
-                    The \p privatize function returns retired node list as \p first and the size of that list as \p second.
-                */
-                typedef std::pair<retired_ptr_node *, size_t> privatize_result;
-
-                /// Gets current list of retired pointer and clears the list
-                /**@anchor dhp_gc_privatize
-                */
-                privatize_result privatize() CDS_NOEXCEPT
-                {
-                    privatize_result res;
-
-                    // Item counter is needed only as a threshold for \p scan() function
-                    // So, we may clear the item counter without synchronization with m_pHead
-                    res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
-                    res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
-                    return res;
-                }
-
-                /// Returns current size of buffer (approximate)
-                size_t size() const CDS_NOEXCEPT
-                {
-                    return m_nItemCount.load(atomics::memory_order_relaxed);
-                }
-            };
-
-            /// Pool of retired pointers
-            /**
-                The class acts as an allocator of retired node.
-                Retired pointers are linked in the lock-free list.
-            */
-            template <class Alloc = CDS_DEFAULT_ALLOCATOR>
-            class retired_ptr_pool {
-                /// Pool item
-                typedef retired_ptr_node    item;
-
-                /// Count of items in block
-                static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
-
-                /// Pool block
-                struct block {
-                    atomics::atomic<block *> pNext;     ///< next block
-                    item        items[m_nItemPerBlock]; ///< item array
-                };
-
-                atomics::atomic<block *> m_pBlockListHead;   ///< head of of allocated block list
-
-                // To solve ABA problem we use epoch-based approach
-                unsigned int const m_nEpochBitmask;             ///< Epoch bitmask (log2( m_nEpochCount))
-                atomics::atomic<unsigned int> m_nCurEpoch;      ///< Current epoch
-                atomics::atomic<item *>* m_pEpochFree;          ///< List of free item per epoch
-                atomics::atomic<item *>  m_pGlobalFreeHead;     ///< Head of unallocated item list
-
-                typedef cds::details::Allocator< block, Alloc > block_allocator;
-                typedef cds::details::Allocator< atomics::atomic<item *>, Alloc > epoch_array_alloc;
-
-            private:
-                void allocNewBlock()
-                {
-                    // allocate new block
-                    block * pNew = block_allocator().New();
-
-                    // link items within the block
-                    item * pLastItem = pNew->items + m_nItemPerBlock - 1;
-                    for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
-                        pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
-                        CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
-                    }
-
-                    // links new block to the block list
-                    {
-                        block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
-                        do {
-                            pNew->pNext.store( pHead, atomics::memory_order_relaxed );
-                            // pHead is changed by compare_exchange_weak
-                        } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_acquire ));
-                    }
-
-                    // links block's items to the free list
-                    {
-                        item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
-                        do {
-                            pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
-                            // pHead is changed by compare_exchange_weak
-                        } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_acquire ));
-                    }
-                }
-
-                unsigned int current_epoch() const CDS_NOEXCEPT
-                {
-                    return m_nCurEpoch.load(atomics::memory_order_acquire) & m_nEpochBitmask;
-                }
-
-                unsigned int next_epoch() const CDS_NOEXCEPT
-                {
-                    return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & m_nEpochBitmask;
-                }
-
-            public:
-                retired_ptr_pool( unsigned int nEpochCount = 8 )
-                    : m_pBlockListHead( nullptr )
-                    , m_nEpochBitmask( static_cast<unsigned int>(beans::ceil2(nEpochCount)) - 1 )
-                    , m_nCurEpoch(0)
-                    , m_pEpochFree( epoch_array_alloc().NewArray( m_nEpochBitmask + 1))
-                    , m_pGlobalFreeHead( nullptr )
-                {
-
-
-                    for (unsigned int i = 0; i <= m_nEpochBitmask; ++i )
-                        m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
-
-                    allocNewBlock();
-                }
-
-                ~retired_ptr_pool()
-                {
-                    block_allocator a;
-                    block * p;
-                    for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
-                        p = pBlock->pNext.load( atomics::memory_order_relaxed );
-                        a.Delete( pBlock );
-                    }
-
-                    epoch_array_alloc().Delete( m_pEpochFree, m_nEpochBitmask + 1 );
-                }
-
-                /// Increments current epoch
-                void inc_epoch() CDS_NOEXCEPT
-                {
-                    m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
-                }
-
-                /// Allocates the new retired pointer
-                retired_ptr_node&  alloc()
-                {
-                    unsigned int nEpoch;
-                    item * pItem;
-                    for (;;) {
-                        pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
-                        if ( !pItem )
-                            goto retry;
-                        if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
-                                                                         pItem->m_pNextFree.load(atomics::memory_order_acquire),
-                                                                         atomics::memory_order_acquire, atomics::memory_order_relaxed ))
-                        {
-                            goto success;
-                        }
-                    }
-
-                    // Epoch free list is empty
-                    // Alloc from global free list
-                retry:
-                    pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
-                    do {
-                        if ( !pItem ) {
-                            allocNewBlock();
-                            goto retry;
-                        }
-                        // pItem is changed by compare_exchange_weak
-                    } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
-                                                                        pItem->m_pNextFree.load(atomics::memory_order_acquire),
-                                                                        atomics::memory_order_acquire, atomics::memory_order_acquire ));
-
-                success:
-                    CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
-                    return *pItem;
-                }
-
-                /// Allocates and initializes new retired pointer
-                retired_ptr_node& alloc( const retired_ptr& p )
-                {
-                    retired_ptr_node& node = alloc();
-                    node.m_ptr = p;
-                    return node;
-                }
-
-                /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
-                /**
-                    The list is linked on the m_pNextFree field
-                */
-                void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
-                {
-                    assert( pHead != nullptr );
-                    assert( pTail != nullptr );
-
-                    unsigned int nEpoch;
-                    item * pCurHead;
-                    do {
-                        pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
-                        pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
-                    } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
-                }
-            };
-        } // namespace details
-
-        /// Memory manager (Garbage collector)
-        class CDS_EXPORT_API GarbageCollector
-        {
-        private:
-            friend class ThreadGC;
-
-            /// Internal GC statistics
-            struct internal_stat
-            {
-                atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
-                atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
-
-                internal_stat()
-                    : m_nGuardCount(0)
-                    , m_nFreeGuardCount(0)
-                {}
-            };
-
-        public:
-            /// Exception "No GarbageCollector object is created"
-            class not_initialized : public std::runtime_error
-            {
-            public:
-                //@cond
-                not_initialized()
-                    : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
-                {}
-                //@endcond
-            };
-
-            /// Internal GC statistics
-            struct InternalState
-            {
-                size_t m_nGuardCount       ;   ///< Total guard count
-                size_t m_nFreeGuardCount   ;   ///< Count of free guard
-
-                //@cond
-                InternalState()
-                    : m_nGuardCount(0)
-                    , m_nFreeGuardCount(0)
-                {}
-
-                InternalState& operator =( internal_stat const& s )
-                {
-                    m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
-                    m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
-
-                    return *this;
-                }
-                //@endcond
-            };
-
-        private:
-            static GarbageCollector * m_pManager    ;   ///< GC global instance
-
-            atomics::atomic<size_t>  m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
-            const size_t             m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
-
-            details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
-            details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
-            details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
-
-            internal_stat   m_stat  ;   ///< Internal statistics
-            bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
-
-        public:
-            /// Initializes DHP memory manager singleton
-            /**
-                This member function creates and initializes DHP global object.
-                The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
-                this member function is called in the \p main() function. See cds::gc::dhp for example.
-                After calling of this function you may use CDS data structures based on cds::gc::DHP.
-
-                \par Parameters
-                - \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
-                    the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
-                    If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
-                - \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
-                    is initialized the GC allocates local guard pool for the thread from common guard pool.
-                    By perforce the local thread's guard pool is grown automatically from common pool.
-                    When the thread terminated its guard pool is backed to common GC's pool.
-                - \p nEpochCount: internally, DHP memory manager uses epoch-based schema to solve
-                    ABA problem for internal data. \p nEpochCount specifies the epoch count,
-                    i.e. the count of simultaneously working threads that remove the elements
-                    of DHP-based concurrent data structure. Default value is 16.
-            */
-            static void CDS_STDCALL Construct(
-                size_t nLiberateThreshold = 1024
-                , size_t nInitialThreadGuardCount = 8
-                , size_t nEpochCount = 16
-            );
-
-            /// Destroys DHP memory manager
-            /**
-                The member function destroys DHP global object. After calling of this function you may \b NOT
-                use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
-                at the end of your \p main(). See cds::gc::dhp for example.
-            */
-            static void CDS_STDCALL Destruct();
-
-            /// Returns pointer to GarbageCollector instance
-            /**
-                If DHP GC is not initialized, \p not_initialized exception is thrown
-            */
-            static GarbageCollector&   instance()
-            {
-                if ( m_pManager == nullptr )
-                    throw not_initialized();
-                return *m_pManager;
-            }
-
-            /// Checks if global GC object is constructed and may be used
-            static bool isUsed() CDS_NOEXCEPT
-            {
-                return m_pManager != nullptr;
-            }
-
-        public:
-            //@{
-            /// Internal interface
-
-            /// Allocates a guard
-            details::guard_data * allocGuard()
-            {
-                return m_GuardPool.alloc();
-            }
-
-            /// Frees guard \p g for reusing in future
-            void freeGuard(details::guard_data * pGuard )
-            {
-                m_GuardPool.free( pGuard );
-            }
-
-            /// Allocates guard list for a thread.
-            details::guard_data* allocGuardList( size_t nCount )
-            {
-                return m_GuardPool.allocList( nCount );
-            }
-
-            /// Frees thread's guard list pointed by \p pList
-            void freeGuardList( details::guard_data * pList )
-            {
-                m_GuardPool.freeList( pList );
-            }
-
-            /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
-            /**@anchor dhp_gc_retirePtr
-            */
-            template <typename T>
-            void retirePtr( T * p, void (* pFunc)(T *))
-            {
-                retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc )));
-            }
-
-            /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
-            void retirePtr( retired_ptr const& p )
-            {
-                if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed))
-                    scan();
-            }
-
-        protected:
-            /// Liberate function
-            /** @anchor dhp_gc_liberate
-                The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
-                trapped by any guard.
-            */
-            void scan();
-            //@}
-
-        public:
-            /// Get internal statistics
-            InternalState& getInternalState(InternalState& stat) const
-            {
-                return stat = m_stat;
-            }
-
-            /// Checks if internal statistics enabled
-            bool              isStatisticsEnabled() const
-            {
-                return m_bStatEnabled;
-            }
-
-            /// Enables/disables internal statistics
-            bool  enableStatistics( bool bEnable )
-            {
-                bool bEnabled = m_bStatEnabled;
-                m_bStatEnabled = bEnable;
-                return bEnabled;
-            }
-
-        private:
-            GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount, size_t nEpochCount );
-            ~GarbageCollector();
-        };
-
-        /// Thread GC
-        /**
-            To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
-            that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
-            on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
-            \ref cds_threading "cds::threading::Manager::detachThread()".
-
-            The ThreadGC object maintains two list:
-            \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
-            \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
-            Free guard list is a subset of thread guard list.
-        */
-        class ThreadGC
-        {
-            GarbageCollector&        m_gc;      ///< reference to GC singleton
-            details::guard_data *    m_pList;   ///< Local list of guards owned by the thread
-            details::guard_data *    m_pFree;   ///< The list of free guard from m_pList
-
-        public:
-            /// Default constructor
-            ThreadGC()
-                : m_gc( GarbageCollector::instance())
-                , m_pList( nullptr )
-                , m_pFree( nullptr )
-            {}
-
-            /// The object is not copy-constructible
-            ThreadGC( ThreadGC const& ) = delete;
-
-            /// Dtor calls fini()
-            ~ThreadGC()
-            {
-                fini();
-            }
-
-            /// Initialization. Repeat call is available
-            void init()
-            {
-                if ( !m_pList ) {
-                    m_pList =
-                        m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
-                }
-            }
-
-            /// Finalization. Repeat call is available
-            void fini()
-            {
-                if ( m_pList ) {
-                    m_gc.freeGuardList( m_pList );
-                    m_pList =
-                        m_pFree = nullptr;
-                }
-            }
-
-        public:
-            /// Allocates new guard
-            dhp::details::guard_data* allocGuard()
-            {
-                assert( m_pList != nullptr );
-
-                dhp::details::guard_data* ret;
-                if ( cds_likely( m_pFree )) {
-                    ret = m_pFree;
-                    m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
-                }
-                else {
-                    ret = m_gc.allocGuard();
-                    ret->pThreadNext = m_pList;
-                    m_pList = ret;
-                }
-                return ret;
-            }
-
-            /// Frees guard \p g
-            void freeGuard( dhp::details::guard_data* g )
-            {
-                assert( m_pList != nullptr );
-                if ( cds_likely( g )) {
-                    g->pPost.store( nullptr, atomics::memory_order_relaxed );
-                    g->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
-                    m_pFree = g;
-                }
-            }
-
-            /// Guard array
-            template <size_t Count>
-            using guard_array = dhp::details::guard_data* [Count];
-
-            /// Initializes guard array \p arr
-            template <size_t Count>
-            void allocGuard( guard_array<Count>& arr )
-            {
-                assert( m_pList != nullptr );
-                size_t nCount = 0;
-
-                while ( m_pFree && nCount < Count ) {
-                    arr[nCount] = m_pFree;
-                    m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
-                    ++nCount;
-                }
-
-                while ( nCount < Count ) {
-                    dhp::details::guard_data*& g = arr[nCount];
-                    g = m_gc.allocGuard();
-                    g->pThreadNext = m_pList;
-                    m_pList = g;
-                    ++nCount;
-                }
-            }
-
-            /// Frees guard array \p arr
-            template <size_t Count>
-            void freeGuard( guard_array<Count>& arr )
-            {
-                assert( m_pList != nullptr );
-
-                details::guard_data* first = nullptr;
-                details::guard_data* last;
-                for ( size_t i = 0; i < Count; ++i ) {
-                    details::guard_data* guard = arr[i];
-                    if ( cds_likely( guard )) {
-                        guard->pPost.store( nullptr, atomics::memory_order_relaxed );
-                        if ( first )
-                            last->pNextFree.store( guard, atomics::memory_order_relaxed );
-                        else
-                            first = guard;
-                        last = guard;
-                    }
-                }
-                if ( first ) {
-                    last->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
-                    m_pFree = first;
-                }
-            }
-
-            /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
-            template <typename T>
-            void retirePtr( T * p, void (* pFunc)(T *))
-            {
-                m_gc.retirePtr( p, pFunc );
-            }
-
-            /// Run retiring cycle
-            void scan()
-            {
-                m_gc.scan();
-            }
-        };
-    }   // namespace dhp
-}}  // namespace cds::gc
-//@endcond
-
-#if CDS_COMPILER == CDS_COMPILER_MSVC
-#   pragma warning(pop)
-#endif
-
-#endif // #ifndef CDSLIB_GC_DETAILS_DHP_H
diff --git a/cds/gc/details/hp.h b/cds/gc/details/hp.h
deleted file mode 100644 (file)
index 21e83f7..0000000
+++ /dev/null
@@ -1,680 +0,0 @@
-/*
-    This file is a part of libcds - Concurrent Data Structures library
-
-    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
-
-    Source code repo: http://github.com/khizmax/libcds/
-    Download: http://sourceforge.net/projects/libcds/files/
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright notice, this
-      list of conditions and the following disclaimer.
-
-    * Redistributions in binary form must reproduce the above copyright notice,
-      this list of conditions and the following disclaimer in the documentation
-      and/or other materials provided with the distribution.
-
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef CDSLIB_GC_DETAILS_HP_H
-#define CDSLIB_GC_DETAILS_HP_H
-
-#include <cds/algo/atomic.h>
-#include <cds/os/thread.h>
-#include <cds/details/bounded_array.h>
-#include <cds/user_setup/cache_line.h>
-
-#include <cds/gc/details/hp_type.h>
-#include <cds/gc/details/hp_alloc.h>
-
-#if CDS_COMPILER == CDS_COMPILER_MSVC
-#   pragma warning(push)
-    // warning C4251: 'cds::gc::hp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomic::atomic<T>'
-    // needs to have dll-interface to be used by clients of class 'cds::gc::hp::GarbageCollector'
-#   pragma warning(disable: 4251)
-#endif
-
-/*
-    Editions:
-        2007.12.24  khizmax Add statistics and CDS_GATHER_HAZARDPTR_STAT macro
-        2008.03.06  khizmax Refactoring: implementation of HazardPtrMgr is moved to hazardptr.cpp
-        2008.03.08  khizmax Remove HazardPtrMgr singleton. Now you must initialize/destroy HazardPtrMgr calling
-                            HazardPtrMgr::Construct / HazardPtrMgr::Destruct before use (usually in main() function).
-        2008.12.06  khizmax Refactoring. Changes class name, namespace hierarchy, all helper defs have been moved to details namespace
-        2010.01.27  khizmax Introducing memory order constraint
-*/
-
-//@cond
-namespace cds {
-    /// Different safe memory reclamation schemas (garbage collectors)
-    /** @ingroup cds_garbage_collector
-
-        This namespace specifies different safe memory reclamation (SMR) algorithms.
-        See \ref cds_garbage_collector "Garbage collectors"
-    */
-    namespace gc {
-
-    /// Michael's Hazard Pointers reclamation schema
-    /**
-    \par Sources:
-        - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
-        - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
-        - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
-
-        The \p cds::gc::hp namespace and its members are internal representation of Hazard Pointer GC and should not be used directly.
-        Use \p cds::gc::HP class in your code.
-
-        Hazard Pointer garbage collector is a singleton. The main user-level part of Hazard Pointer schema is
-        GC class and its nested classes. Before use any HP-related class you must initialize HP garbage collector
-        by contructing \p cds::gc::HP object in beginning of your \p main().
-        See \p cds::gc::HP class for explanation.
-    */
-    namespace hp {
-
-        // forwards
-        class GarbageCollector;
-        class ThreadGC;
-
-        namespace details {
-
-            /// Retired pointer
-            typedef cds::gc::details::retired_ptr   retired_ptr;
-
-            /// Array of retired pointers
-            /**
-                The vector of retired pointer ready to delete.
-
-                The Hazard Pointer schema is build on thread-static arrays. For each HP-enabled thread the HP manager allocates
-                array of retired pointers. The array belongs to the thread: owner thread writes to the array, other threads
-                just read it.
-            */
-            class retired_vector {
-                /// Underlying vector implementation
-                typedef cds::details::bounded_array<retired_ptr>    retired_vector_impl;
-
-                retired_vector_impl m_arr   ;   ///< the array of retired pointers
-                size_t              m_nSize ;   ///< Current size of \p m_arr
-
-            public:
-                /// Iterator
-                typedef retired_vector_impl::iterator  iterator;
-
-                /// Constructor
-                explicit retired_vector( const cds::gc::hp::GarbageCollector& HzpMgr ); // inline
-                ~retired_vector()
-                {}
-
-                /// Vector capacity.
-                /**
-                    The capacity is constant for any thread. It is defined by cds::gc::hp::GarbageCollector.
-                */
-                size_t capacity() const CDS_NOEXCEPT
-                {
-                    return m_arr.capacity();
-                }
-
-                /// Current vector size (count of retired pointers in the vector)
-                size_t size() const CDS_NOEXCEPT
-                {
-                    return m_nSize;
-                }
-
-                /// Set vector size. Uses internally
-                void size( size_t nSize )
-                {
-                    assert( nSize <= capacity());
-                    m_nSize = nSize;
-                }
-
-                /// Pushes retired pointer to the vector
-                void push( retired_ptr const& p )
-                {
-                    assert( m_nSize < capacity());
-                    m_arr[ m_nSize ] = p;
-                    ++m_nSize;
-                }
-
-                /// Checks if the vector is full (size() == capacity())
-                bool isFull() const CDS_NOEXCEPT
-                {
-                    return m_nSize >= capacity();
-                }
-
-                /// Begin iterator
-                iterator    begin() CDS_NOEXCEPT
-                {
-                    return m_arr.begin();
-                }
-
-                /// End iterator
-                iterator    end() CDS_NOEXCEPT
-                {
-                    return m_arr.begin() +  m_nSize;
-                }
-
-                /// Clears the vector. After clearing, size() == 0
-                void clear() CDS_NOEXCEPT
-                {
-                    m_nSize = 0;
-                }
-            };
-
-            /// Hazard pointer record of the thread
-            /**
-                The structure of type "single writer - multiple reader": only the owner thread may write to this structure
-                other threads have read-only access.
-            */
-            struct hp_record {
-                hp_allocator<>    m_hzp;         ///< array of hazard pointers. Implicit \ref CDS_DEFAULT_ALLOCATOR dependency
-                retired_vector    m_arrRetired ; ///< Retired pointer array
-
-                char padding[cds::c_nCacheLineSize];
-                atomics::atomic<unsigned int> m_nSync; ///< dummy var to introduce synchronizes-with relationship between threads
-                char padding2[cds::c_nCacheLineSize];
-
-                /// Ctor
-                explicit hp_record( const cds::gc::hp::GarbageCollector& HzpMgr ); // inline
-                ~hp_record()
-                {}
-
-                /// Clears all hazard pointers
-                void clear()
-                {
-                    m_hzp.clear();
-                }
-
-                void sync()
-                {
-                    m_nSync.fetch_add( 1, atomics::memory_order_acq_rel );
-                }
-            };
-        }    // namespace details
-
-        /// GarbageCollector::Scan phase strategy
-        /**
-            See GarbageCollector::Scan for explanation
-        */
-        enum scan_type {
-            classic,    ///< classic scan as described in Michael's works (see GarbageCollector::classic_scan)
-            inplace     ///< inplace scan without allocation (see GarbageCollector::inplace_scan)
-        };
-
-        /// Hazard Pointer singleton
-        /**
-            Safe memory reclamation schema by Michael "Hazard Pointers"
-
-        \par Sources:
-            \li [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
-            \li [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
-            \li [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
-
-        */
-        class CDS_EXPORT_API GarbageCollector
-        {
-        public:
-            typedef cds::atomicity::event_counter  event_counter   ;   ///< event counter type
-
-            /// Internal GC statistics
-            struct InternalState {
-                size_t              nHPCount                ;   ///< HP count per thread (const)
-                size_t              nMaxThreadCount         ;   ///< Max thread count (const)
-                size_t              nMaxRetiredPtrCount     ;   ///< Max retired pointer count per thread (const)
-                size_t              nHPRecSize              ;   ///< Size of HP record, bytes (const)
-
-                size_t              nHPRecAllocated         ;   ///< Count of HP record allocations
-                size_t              nHPRecUsed              ;   ///< Count of HP record used
-                size_t              nTotalRetiredPtrCount   ;   ///< Current total count of retired pointers
-                size_t              nRetiredPtrInFreeHPRecs ;   ///< Count of retired pointer in free (unused) HP records
-
-                event_counter::value_type   evcAllocHPRec   ;   ///< Count of \p hp_record allocations
-                event_counter::value_type   evcRetireHPRec  ;   ///< Count of \p hp_record retire events
-                event_counter::value_type   evcAllocNewHPRec;   ///< Count of new \p hp_record allocations from heap
-                event_counter::value_type   evcDeleteHPRec  ;   ///< Count of \p hp_record deletions
-
-                event_counter::value_type   evcScanCall     ;   ///< Count of Scan calling
-                event_counter::value_type   evcHelpScanCall ;   ///< Count of HelpScan calling
-                event_counter::value_type   evcScanFromHelpScan;///< Count of Scan calls from HelpScan
-
-                event_counter::value_type   evcDeletedNode  ;   ///< Count of deleting of retired objects
-                event_counter::value_type   evcDeferredNode ;   ///< Count of objects that cannot be deleted in Scan phase because of a hazard_pointer guards it
-            };
-
-            /// No GarbageCollector object is created
-            class not_initialized : public std::runtime_error
-            {
-            public:
-                not_initialized()
-                    : std::runtime_error( "Global Hazard Pointer GarbageCollector is not initialized" )
-                {}
-            };
-
-            /// Not enough Hazard Pointer
-            class too_many_hazard_ptr : public std::length_error
-            {
-            public:
-                too_many_hazard_ptr()
-                    : std::length_error( "Not enough Hazard Pointer" )
-                {}
-            };
-
-        private:
-            /// Internal GC statistics
-            struct Statistics {
-                event_counter  m_AllocHPRec            ;    ///< Count of \p hp_record allocations
-                event_counter  m_RetireHPRec            ;    ///< Count of \p hp_record retire events
-                event_counter  m_AllocNewHPRec            ;    ///< Count of new \p hp_record allocations from heap
-                event_counter  m_DeleteHPRec            ;    ///< Count of \p hp_record deletions
-
-                event_counter  m_ScanCallCount            ;    ///< Count of Scan calling
-                event_counter  m_HelpScanCallCount        ;    ///< Count of HelpScan calling
-                event_counter  m_CallScanFromHelpScan    ;    ///< Count of Scan calls from HelpScan
-
-                event_counter  m_DeletedNode            ;    ///< Count of retired objects deleting
-                event_counter  m_DeferredNode            ;    ///< Count of objects that cannot be deleted in Scan phase because of a hazard_pointer guards it
-            };
-
-            /// Internal list of cds::gc::hp::details::hp_record
-            struct hplist_node : public details::hp_record
-            {
-                atomics::atomic<hplist_node*>    m_pNextNode; ///< next hazard ptr record in list
-                atomics::atomic<OS::ThreadId>    m_idOwner;   ///< Owner thread id; 0 - the record is free (not owned)
-                atomics::atomic<bool>            m_bFree;     ///< true if record is free (not owned)
-
-                explicit hplist_node( const GarbageCollector& HzpMgr )
-                    : hp_record( HzpMgr ),
-                    m_pNextNode( nullptr ),
-                    m_idOwner( OS::c_NullThreadId ),
-                    m_bFree( true )
-                {}
-
-                hplist_node( const GarbageCollector& HzpMgr, OS::ThreadId owner )
-                    : hp_record( HzpMgr ),
-                    m_pNextNode( nullptr ),
-                    m_idOwner( owner ),
-                    m_bFree( false )
-                {}
-
-                ~hplist_node()
-                {
-                    assert( m_idOwner.load( atomics::memory_order_relaxed ) == OS::c_NullThreadId );
-                    assert( m_bFree.load(atomics::memory_order_relaxed));
-                }
-            };
-
-            atomics::atomic<hplist_node *>   m_pListHead  ;  ///< Head of GC list
-
-            static GarbageCollector *    m_pHZPManager  ;   ///< GC instance pointer
-
-            Statistics              m_Stat              ;   ///< Internal statistics
-            bool                    m_bStatEnabled      ;   ///< true - statistics enabled
-
-            const size_t            m_nHazardPointerCount   ;   ///< max count of thread's hazard pointer
-            const size_t            m_nMaxThreadCount       ;   ///< max count of thread
-            const size_t            m_nMaxRetiredPtrCount   ;   ///< max count of retired ptr per thread
-            scan_type               m_nScanType             ;   ///< scan type (see \ref scan_type enum)
-
-
-        private:
-            /// Ctor
-            GarbageCollector(
-                size_t nHazardPtrCount = 0,         ///< Hazard pointer count per thread
-                size_t nMaxThreadCount = 0,         ///< Max count of thread
-                size_t nMaxRetiredPtrCount = 0,     ///< Capacity of the array of retired objects
-                scan_type nScanType = inplace       ///< Scan type (see \ref scan_type enum)
-            );
-
-            /// Dtor
-            ~GarbageCollector();
-
-            /// Allocate new HP record
-            hplist_node * NewHPRec( OS::ThreadId owner );
-
-            /// Permanently deletes HPrecord \p pNode
-            /**
-                Caveat: for performance reason this function is defined as inline and cannot be called directly
-            */
-            void                DeleteHPRec( hplist_node * pNode );
-
-            void detachAllThread();
-
-        public:
-            /// Creates GarbageCollector singleton
-            /**
-                GC is the singleton. If GC instance is not exist then the function creates the instance.
-                Otherwise it does nothing.
-
-                The Michael's HP reclamation schema depends of three parameters:
-
-                \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
-                                     the data structure algorithms. By default, if \p nHazardPtrCount = 0,
-                                     the function uses maximum of HP count for CDS library.
-
-                \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
-
-                \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
-                                    \p nHazardPtrCount * \p nMaxThreadCount.
-                                    Default is 2 * \p nHazardPtrCount * \p nMaxThreadCount.
-            */
-            static void    CDS_STDCALL Construct(
-                size_t nHazardPtrCount = 0,     ///< Hazard pointer count per thread
-                size_t nMaxThreadCount = 0,     ///< Max count of simultaneous working thread in your application
-                size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
-                scan_type nScanType = inplace   ///< Scan type (see \ref scan_type enum)
-            );
-
-            /// Destroys global instance of GarbageCollector
-            /**
-                The parameter \p bDetachAll should be used carefully: if its value is \p true,
-                then the destroying GC automatically detaches all attached threads. This feature
-                can be useful when you have no control over the thread termination, for example,
-                when \p libcds is injected into existing external thread.
-            */
-            static void CDS_STDCALL Destruct(
-                bool bDetachAll = false     ///< Detach all threads
-            );
-
-            /// Returns pointer to GarbageCollector instance
-            static GarbageCollector&   instance()
-            {
-                if ( !m_pHZPManager )
-                    throw not_initialized();
-                return *m_pHZPManager;
-            }
-
-            /// Checks if global GC object is constructed and may be used
-            static bool isUsed() CDS_NOEXCEPT
-            {
-                return m_pHZPManager != nullptr;
-            }
-
-            /// Returns max Hazard Pointer count defined in construction time
-            size_t            getHazardPointerCount() const CDS_NOEXCEPT
-            {
-                return m_nHazardPointerCount;
-            }
-
-            /// Returns max thread count defined in construction time
-            size_t            getMaxThreadCount() const CDS_NOEXCEPT
-            {
-                return m_nMaxThreadCount;
-            }
-
-            /// Returns max size of retired objects array. It is defined in construction time
-            size_t            getMaxRetiredPtrCount() const CDS_NOEXCEPT
-            {
-                return m_nMaxRetiredPtrCount;
-            }
-
-            // Internal statistics
-
-            /// Get internal statistics
-            InternalState& getInternalState(InternalState& stat) const;
-
-            /// Checks if internal statistics enabled
-            bool              isStatisticsEnabled() const { return m_bStatEnabled; }
-
-            /// Enables/disables internal statistics
-            bool              enableStatistics( bool bEnable )
-            {
-                bool bEnabled = m_bStatEnabled;
-                m_bStatEnabled = bEnable;
-                return bEnabled;
-            }
-
-            /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count
-            /**
-                If \p nRequiredCount > getHazardPointerCount() then the exception \p too_many_hazard_ptr is thrown
-            */
-            static void checkHPCount( unsigned int nRequiredCount )
-            {
-                if ( instance().getHazardPointerCount() < nRequiredCount )
-                    throw too_many_hazard_ptr();
-            }
-
-            /// Get current scan strategy
-            scan_type getScanType() const
-            {
-                return m_nScanType;
-            }
-
-            /// Set current scan strategy
-            /** @anchor hzp_gc_setScanType
-                Scan strategy changing is allowed on the fly.
-            */
-            void setScanType(
-                scan_type nScanType     ///< new scan strategy
-            )
-            {
-                m_nScanType = nScanType;
-            }
-
-        public:    // Internals for threads
-
-            /// Allocates Hazard Pointer GC record. For internal use only
-            details::hp_record* alloc_hp_record();
-
-            /// Free HP record. For internal use only
-            void free_hp_record( details::hp_record* pRec );
-
-            /// The main garbage collecting function
-            /**
-                This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
-                is reached.
-
-                There are the following scan algorithm:
-                - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use
-                - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory
-
-                Use \ref hzp_gc_setScanType "setScanType" member function to setup appropriate scan algorithm.
-            */
-            void Scan( details::hp_record * pRec )
-            {
-                switch ( m_nScanType ) {
-                    case inplace:
-                        inplace_scan( pRec );
-                        break;
-                    default:
-                        assert(false)   ;   // Forgotten something?..
-                    case classic:
-                        classic_scan( pRec );
-                        break;
-                }
-            }
-
-            /// Helper scan routine
-            /**
-                The function guarantees that every node that is eligible for reuse is eventually freed, barring
-                thread failures. To do so, after executing Scan, a thread executes a HelpScan,
-                where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
-                to thread's list of reclaimed pointers.
-
-                The function is called internally by Scan.
-            */
-            void HelpScan( details::hp_record * pThis );
-
-        protected:
-            /// Classic scan algorithm
-            /** @anchor hzp_gc_classic_scan
-                Classical scan algorithm as described in Michael's paper.
-
-                A scan includes four stages. The first stage involves scanning the array HP for non-null values.
-                Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer.
-                Only stage 1 accesses shared variables. The following stages operate only on private variables.
-
-                The second stage of a scan involves sorting local list of protected pointers to allow
-                binary search in the third stage.
-
-                The third stage of a scan involves checking each reclaimed node
-                against the pointers in local list of protected pointers. If the binary search yields
-                no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list
-                of reclaimed pointers.
-
-                The forth stage prepares new thread's private list of reclaimed pointers
-                that could not be freed during the current scan, where they remain until the next scan.
-
-                This algorithm allocates memory for internal HP array.
-
-                This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
-                is reached.
-            */
-            void classic_scan( details::hp_record * pRec );
-
-            /// In-place scan algorithm
-            /** @anchor hzp_gc_inplace_scan
-                Unlike the \ref hzp_gc_classic_scan "classic_scan" algorithm, \p inplace_scan does not allocate any memory.
-                All operations are performed in-place.
-            */
-            void inplace_scan( details::hp_record * pRec );
-        };
-
-        /// Thread's hazard pointer manager
-        /**
-            To use Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
-            that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
-            on the start of each thread that uses HP GC. Before terminating the thread linked to HP GC it is necessary to call
-            \ref cds_threading "cds::threading::Manager::detachThread()".
-        */
-        class ThreadGC
-        {
-            GarbageCollector&   m_HzpManager; ///< Hazard Pointer GC singleton
-            details::hp_record* m_pHzpRec;    ///< Pointer to thread's HZP record
-
-        public:
-            /// Default constructor
-            ThreadGC()
-                : m_HzpManager( GarbageCollector::instance()),
-                m_pHzpRec( nullptr )
-            {}
-
-            /// The object is not copy-constructible
-            ThreadGC( ThreadGC const& ) = delete;
-
-            ~ThreadGC()
-            {
-                fini();
-            }
-
-            /// Checks if thread GC is initialized
-            bool isInitialized() const   { return m_pHzpRec != nullptr; }
-
-            /// Initialization. Repeat call is available
-            void init()
-            {
-                if ( !m_pHzpRec )
-                    m_pHzpRec = m_HzpManager.alloc_hp_record();
-            }
-
-            /// Finalization. Repeat call is available
-            void fini()
-            {
-                if ( m_pHzpRec ) {
-                    details::hp_record* pRec = m_pHzpRec;
-                    m_pHzpRec = nullptr;
-                    m_HzpManager.free_hp_record( pRec );
-                }
-            }
-
-            /// Initializes HP guard \p guard
-            details::hp_guard* allocGuard()
-            {
-                assert( m_pHzpRec );
-                return m_pHzpRec->m_hzp.alloc();
-            }
-
-            /// Frees HP guard \p guard
-            void freeGuard( details::hp_guard* guard )
-            {
-                assert( m_pHzpRec );
-                m_pHzpRec->m_hzp.free( guard );
-            }
-
-            /// Initializes HP guard array \p arr
-            template <size_t Count>
-            size_t allocGuard( details::hp_array<Count>& arr )
-            {
-                assert( m_pHzpRec );
-                return m_pHzpRec->m_hzp.alloc( arr );
-            }
-
-            /// Frees HP guard array \p arr
-            template <size_t Count>
-            void freeGuard( details::hp_array<Count>& arr )
-            {
-                assert( m_pHzpRec );
-                m_pHzpRec->m_hzp.free( arr );
-            }
-
-            /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
-            template <typename T>
-            void retirePtr( T * p, void (* pFunc)(T *))
-            {
-                retirePtr( details::retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc )));
-            }
-
-            /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
-            void retirePtr( details::retired_ptr const& p )
-            {
-                m_pHzpRec->m_arrRetired.push( p );
-
-                if ( m_pHzpRec->m_arrRetired.isFull()) {
-                    // Max of retired pointer count is reached. Do scan
-                    scan();
-                }
-            }
-
-            /// Run retiring scan cycle
-            void scan()
-            {
-                m_HzpManager.Scan( m_pHzpRec );
-                m_HzpManager.HelpScan( m_pHzpRec );
-            }
-
-            void sync()
-            {
-                assert( m_pHzpRec != nullptr );
-                m_pHzpRec->sync();
-            }
-        };
-
-    }   // namespace hp
-}}  // namespace cds::gc
-//@endcond
-
-//@cond
-// Inlines
-namespace cds {
-    namespace gc { namespace hp { namespace details {
-
-        inline retired_vector::retired_vector( const cds::gc::hp::GarbageCollector& HzpMgr )
-            : m_arr( HzpMgr.getMaxRetiredPtrCount()),
-            m_nSize(0)
-        {}
-
-        inline hp_record::hp_record( const cds::gc::hp::GarbageCollector& HzpMgr )
-            : m_hzp( HzpMgr.getHazardPointerCount())
-            , m_arrRetired( HzpMgr )
-            , m_nSync( 0 )
-        {}
-
-    }}} // namespace gc::hp::details
-} // namespace cds
-//@endcond
-
-
-#if CDS_COMPILER == CDS_COMPILER_MSVC
-#   pragma warning(pop)
-#endif
-
-#endif  // #ifndef CDSLIB_GC_DETAILS_HP_H
diff --git a/cds/gc/details/hp_alloc.h b/cds/gc/details/hp_alloc.h
deleted file mode 100644 (file)
index 978a342..0000000
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
-    This file is a part of libcds - Concurrent Data Structures library
-
-    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
-
-    Source code repo: http://github.com/khizmax/libcds/
-    Download: http://sourceforge.net/projects/libcds/files/
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright notice, this
-      list of conditions and the following disclaimer.
-
-    * Redistributions in binary form must reproduce the above copyright notice,
-      this list of conditions and the following disclaimer in the documentation
-      and/or other materials provided with the distribution.
-
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef CDSLIB_GC_DETAILS_HP_ALLOC_H
-#define CDSLIB_GC_DETAILS_HP_ALLOC_H
-
-#include <cds/algo/atomic.h>
-#include <cds/details/allocator.h>
-#include <cds/gc/details/hp_type.h>
-#include <string.h> // memset
-
-//@cond
-namespace cds {
-    namespace gc { namespace hp {
-        // forwards
-        class GarbageCollector;
-        class ThreadGC;
-
-    /// Hazard Pointer schema implementation details
-    namespace details {
-
-        /// Hazard pointer guard
-        /**
-            It is unsafe to use this class directly.
-            Instead, the \p hp::guard class should be used.
-        */
-        class hp_guard : protected atomics::atomic < hazard_pointer >
-        {
-            template <class Allocator> friend class hp_allocator;
-
-        public:
-            typedef hazard_pointer hazard_ptr;///< Hazard pointer type
-
-        private:
-            typedef atomics::atomic<hazard_ptr> atomic_hazard_ptr;
-
-            atomic_hazard_ptr m_hp;
-            hp_guard*         m_next; // next free guard
-
-        public:
-            hp_guard() CDS_NOEXCEPT
-                : m_hp( nullptr )
-                , m_next( nullptr )
-            {}
-
-            ~hp_guard() CDS_NOEXCEPT
-            {}
-
-            /// Sets HP value. Guards pointer \p p from reclamation.
-            /**
-                Storing has release semantics.
-                */
-                template <typename T>
-            T * operator =(T * p) CDS_NOEXCEPT
-            {
-                // We use atomic store with explicit memory order because other threads may read this hazard pointer concurrently
-                set( p );
-                return p;
-            }
-
-            std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
-            {
-                clear();
-                return nullptr;
-            }
-
-            /// Returns current value of hazard pointer
-            /**
-                Loading has acquire semantics
-            */
-            hazard_ptr get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
-            {
-                return m_hp.load( order );
-            }
-
-            template <typename T>
-            void set( T * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
-            {
-                m_hp.store( reinterpret_cast<hazard_ptr>(p), order );
-            }
-
-            /// Clears HP
-            /**
-                Clearing has relaxed semantics.
-            */
-            void clear( atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
-            {
-                // memory order is not necessary here
-                m_hp.store( nullptr, order );
-            }
-        };
-
-        /// Array of hazard pointers.
-        /**
-            Array of hazard-pointer. Placing a pointer into this array guards the pointer against reclamation.
-            Template parameter \p Count defines the size of hazard pointer array. \p Count parameter should not exceed
-            GarbageCollector::getHazardPointerCount().
-
-            It is unsafe to use this class directly. Instead, the \p hp::array should be used.
-
-            While creating the object of \p hp_array class an array of size \p Count of hazard pointers is reserved by
-            the HP Manager of current thread. The object's destructor cleans all of reserved hazard pointer and
-            returns reserved HP to the HP pool of ThreadGC.
-
-            Usually, it is not necessary to create an object of this class. The object of class ThreadGC contains
-            the \p hp_array object and implements interface for HP setting and freeing.
-
-            Template parameter:
-                \li Count - capacity of array
-        */
-        template <size_t Count>
-        class hp_array
-        {
-            template <class Allocator> friend class hp_allocator;
-
-        public:
-            typedef hazard_pointer  hazard_ptr;   ///< Hazard pointer type
-            static CDS_CONSTEXPR const size_t c_nCapacity = Count ;   ///< Capacity of the array
-
-        public:
-            /// Constructs uninitialized array.
-            hp_array() CDS_NOEXCEPT
-            {
-                memset( m_arr, 0, sizeof( m_arr ));
-            }
-
-            /// Destructs object
-            ~hp_array() CDS_NOEXCEPT
-            {}
-
-            /// Returns max count of hazard pointer for this array
-            CDS_CONSTEXPR size_t capacity() const
-            {
-                return c_nCapacity;
-            }
-
-            /// Set hazard pointer \p nIndex. 0 <= \p nIndex < \p Count
-            void set( size_t nIndex, hazard_ptr hptr ) CDS_NOEXCEPT
-            {
-                assert( nIndex < capacity());
-                assert( m_arr[nIndex] != nullptr );
-
-                *m_arr[nIndex] = hptr;
-            }
-
-            /// Returns pointer to hazard pointer of index \p nIndex (0 <= \p nIndex < \p Count)
-            hp_guard* operator []( size_t nIndex ) CDS_NOEXCEPT
-            {
-                assert( nIndex < capacity());
-                return m_arr[nIndex];
-            }
-
-            /// Returns pointer to hazard pointer of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
-            hp_guard* operator []( size_t nIndex ) const CDS_NOEXCEPT
-            {
-                assert( nIndex < capacity());
-                return m_arr[nIndex];
-            }
-
-            /// Clears (sets to \p nullptr) hazard pointer \p nIndex
-            void clear( size_t nIndex ) CDS_NOEXCEPT
-            {
-                assert( nIndex < capacity());
-                assert( m_arr[nIndex] != nullptr );
-
-                m_arr[ nIndex ]->clear();
-            }
-
-            hp_guard* release( size_t nIndex ) CDS_NOEXCEPT
-            {
-                assert( nIndex < capacity());
-
-                hp_guard* p = m_arr[ nIndex ];
-                m_arr[ nIndex ] = nullptr;
-                return p;
-            }
-
-
-        private:
-            hp_guard* m_arr[c_nCapacity]; ///< Hazard pointer array of size = \p Count
-        };
-
-        /// Allocator of hazard pointers for the thread
-        /**
-            The hazard pointer array is the free-list of unused hazard pointer for the thread.
-            The array is managed as a stack.
-            The max size (capacity) of array is defined at ctor time and cannot be changed during object's lifetime
-
-            Each allocator object is thread-private.
-
-            Template parameters:
-                \li Allocator - memory allocator class, default is \ref CDS_DEFAULT_ALLOCATOR
-
-            This helper class should not be used directly.
-        */
-        template <class Allocator = CDS_DEFAULT_ALLOCATOR >
-        class hp_allocator
-        {
-        public:
-            typedef hazard_pointer  hazard_ptr;     ///< type of hazard pointer
-            typedef Allocator       allocator_type; ///< allocator type
-
-        private:
-            typedef cds::details::Allocator< hp_guard, allocator_type > allocator_impl;
-
-            hp_guard*    m_arrHazardPtr; ///< Array of hazard pointers
-            hp_guard*    m_FreeListHead; ///< List of free hp guards
-            size_t const m_nCapacity;    ///< Array capacity
-
-        public:
-            /// Default ctor
-            explicit hp_allocator(
-                size_t  nCapacity ///< max count of hazard pointer per thread
-            )
-            : m_arrHazardPtr( alloc_array( nCapacity ))
-            , m_FreeListHead( m_arrHazardPtr )
-            , m_nCapacity( nCapacity )
-            {
-                build_free_list();
-            }
-
-            /// Dtor
-            ~hp_allocator()
-            {
-                allocator_impl().Delete( m_arrHazardPtr, capacity());
-            }
-
-            /// Get capacity of array
-            size_t capacity() const CDS_NOEXCEPT
-            {
-                return m_nCapacity;
-            }
-
-            /// Get size of array. The size is equal to the capacity of array
-            size_t size() const CDS_NOEXCEPT
-            {
-                return capacity();
-            }
-
-            /// Checks if all items are allocated
-            bool full() const CDS_NOEXCEPT
-            {
-                return m_FreeListHead == nullptr;
-            }
-
-            /// Allocates hazard pointer
-            hp_guard* alloc()
-            {
-                assert( !full());
-
-                hp_guard* p = m_FreeListHead;
-                m_FreeListHead = m_FreeListHead->m_next;
-                return p;
-            }
-
-            /// Frees previously allocated hazard pointer
-            void free( hp_guard* hp ) CDS_NOEXCEPT
-            {
-                if ( hp ) {
-                    hp->clear();
-                    hp->m_next = m_FreeListHead;
-                    m_FreeListHead = hp;
-                }
-            }
-
-            /// Allocates hazard pointers array
-            /**
-                Allocates \p Count hazard pointers from array \p m_arrHazardPtr
-                Initializes \p arr with hazard pointers.
-
-                @return actual size of allocated array.
-            */
-            template <size_t Count>
-            size_t alloc( hp_array<Count>& arr )
-            {
-                size_t i;
-                hp_guard* p = m_FreeListHead;
-                for ( i = 0; i < Count && p; ++i ) {
-                    arr.m_arr[i] = p;
-                    p = p->m_next;
-                }
-                size_t ret = i;
-                for ( ; i < Count; ++i )
-                    arr.m_arr[i] = nullptr;
-                m_FreeListHead = p;
-                return ret;
-            }
-
-            /// Frees hazard pointer array
-            /**
-                Frees the array of hazard pointers allocated by previous call \p this->alloc.
-            */
-            template <size_t Count>
-            void free( hp_array<Count> const& arr ) CDS_NOEXCEPT
-            {
-                hp_guard* pList = m_FreeListHead;
-                for ( size_t i = 0; i < Count; ++i ) {
-                    hp_guard* p = arr[i];
-                    if ( p ) {
-                        p->clear();
-                        p->m_next = pList;
-                        pList = p;
-                    }
-                }
-                m_FreeListHead = pList;
-            }
-
-            /// Makes all HP free
-            void clear() CDS_NOEXCEPT
-            {
-                for ( size_t i = 0; i < capacity(); ++i )
-                    m_arrHazardPtr[i].clear();
-            }
-
-            /// Returns i-th hazard pointer
-            hp_guard& operator []( size_t i ) CDS_NOEXCEPT
-            {
-                assert( i < capacity());
-                return m_arrHazardPtr[i];
-            }
-
-        private:
-            hp_guard* alloc_array( size_t nCapacity )
-            {
-                return allocator_impl().NewArray( nCapacity );
-            }
-
-            void build_free_list()
-            {
-                hp_guard* first = m_arrHazardPtr;
-                hp_guard* last = m_arrHazardPtr + capacity();
-                hp_guard* prev = first;
-                for ( ++first; first < last; ++first ) {
-                    prev->m_next = first;
-                    prev = first;
-                }
-                prev->m_next = nullptr;
-                m_FreeListHead = m_arrHazardPtr;
-            }
-        };
-
-    }}} // namespace gc::hp::details
-}   // namespace cds
-//@endcond
-
-#endif // #ifndef CDSLIB_GC_DETAILS_HP_ALLOC_H
diff --git a/cds/gc/details/hp_common.h b/cds/gc/details/hp_common.h
new file mode 100644 (file)
index 0000000..bfdccef
--- /dev/null
@@ -0,0 +1,184 @@
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_GC_DETAILS_HP_COMMON_H
+#define CDSLIB_GC_DETAILS_HP_COMMON_H
+
+#include <cds/algo/atomic.h>
+#include <cds/gc/details/retired_ptr.h>
+
+#ifdef CDS_ENABLE_HPSTAT
+#   define CDS_HPSTAT( expr ) expr
+#else
+#   define CDS_HPSTAT( expr )
+#endif
+
+//@cond
+namespace cds { namespace gc { namespace hp { namespace common {
+
+    /// Hazard pointer type
+    typedef void*   hazard_ptr;
+
+    /// Retired pointer
+    using cds::gc::details::retired_ptr;
+    using cds::gc::make_retired_ptr;
+
+    /// Hazard pointer guard
+    class guard
+    {
+    public:
+        guard() noexcept
+            : hp_( nullptr )
+            , next_( nullptr )
+        {}
+
+        template <typename T>
+        T* operator=( T* ptr ) noexcept
+        {
+            set( ptr );
+            return ptr;
+        }
+
+        std::nullptr_t operator=( std::nullptr_t ) noexcept
+        {
+            clear();
+            return nullptr;
+        }
+
+        hazard_ptr get() const noexcept
+        {
+            return hp_.load( atomics::memory_order_acquire );
+        }
+
+        hazard_ptr get( atomics::memory_order order ) const noexcept
+        {
+            return hp_.load( order );
+        }
+
+        template <typename T>
+        T* get_as() const noexcept
+        {
+            return reinterpret_cast<T*>( get() );
+        }
+
+        template <typename T>
+        void set( T* ptr ) noexcept
+        {
+            hp_.store( reinterpret_cast<hazard_ptr>( ptr ), atomics::memory_order_release );
+        }
+
+        void clear( atomics::memory_order order ) noexcept
+        {
+            hp_.store( nullptr, order );
+        }
+
+        void clear() noexcept
+        {
+            clear( atomics::memory_order_release );
+        }
+
+    private:
+        atomics::atomic<hazard_ptr>   hp_;
+
+    public:
+        guard* next_;   // free guard list
+    };
+
+    /// Array of guards
+    template <size_t Capacity>
+    class guard_array
+    {
+    public:
+        static size_t const c_nCapacity = Capacity;
+
+    public:
+        guard_array()
+            : arr_{ nullptr }
+        {}
+
+        constexpr size_t capacity() const
+        {
+            return c_nCapacity;
+        }
+
+        guard* operator[]( size_t idx ) const noexcept
+        {
+            assert( idx < capacity() );
+            return arr_[idx];
+        }
+
+        template <typename T>
+        void set( size_t idx, T* ptr ) noexcept
+        {
+            assert( idx < capacity());
+            assert( arr_[idx] != nullptr );
+
+            arr_[idx]->set( ptr );
+        }
+
+        void clear( size_t idx ) noexcept
+        {
+            assert( idx < capacity() );
+            assert( arr_[idx] != nullptr );
+
+            arr_[idx]->clear();
+        }
+
+        guard* release( size_t idx ) noexcept
+        {
+            assert( idx < capacity() );
+
+            guard* g = arr_[idx];
+            arr_[idx] = nullptr;
+            return g;
+        }
+
+        void reset( size_t idx, guard* g ) noexcept
+        {
+            assert( idx < capacity() );
+            assert( arr_[idx] == nullptr );
+
+            arr_[idx] = g;
+        }
+
+    private:
+        guard*  arr_[c_nCapacity];
+    };
+
+
+    /// Retired pointer disposer
+    typedef void ( *disposer_func )( void* );
+
+}}}} // namespace cds::gc::hp::common
+//@endcond
+
+#endif // #ifndef CDSLIB_GC_DETAILS_HP_COMMON_H
+
+
diff --git a/cds/gc/details/hp_type.h b/cds/gc/details/hp_type.h
deleted file mode 100644 (file)
index 03da8ad..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-    This file is a part of libcds - Concurrent Data Structures library
-
-    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
-
-    Source code repo: http://github.com/khizmax/libcds/
-    Download: http://sourceforge.net/projects/libcds/files/
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright notice, this
-      list of conditions and the following disclaimer.
-
-    * Redistributions in binary form must reproduce the above copyright notice,
-      this list of conditions and the following disclaimer in the documentation
-      and/or other materials provided with the distribution.
-
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef CDSLIB_GC_DETAILS_HP_TYPE_H
-#define CDSLIB_GC_DETAILS_HP_TYPE_H
-
-#include <cds/gc/details/retired_ptr.h> // free_retired_ptr_func
-
-//@cond
-namespace cds {
-    namespace gc {
-        namespace hp {
-
-            /// Hazard pointer
-            typedef void *    hazard_pointer;
-
-            /// Pointer to function to free (destruct and deallocate) retired pointer of specific type
-            typedef cds::gc::details::free_retired_ptr_func free_retired_ptr_func;
-        }
-    }
-}
-//@endcond
-
-#endif // #ifndef CDSLIB_GC_DETAILS_HP_TYPE_H
-
-
index d29a4b8..02a6d91 100644 (file)
@@ -31,8 +31,8 @@
 #ifndef CDSLIB_GC_DHP_H
 #define CDSLIB_GC_DHP_H
 
-#include <cds/gc/impl/dhp_decl.h>
-#include <cds/gc/impl/dhp_impl.h>
+#include <cds/gc/dhp_smr.h>
 #include <cds/details/lib.h>
+#include <cds/threading/model.h>
 
 #endif // #ifndef CDSLIB_GC_DHP_H
diff --git a/cds/gc/dhp_smr.h b/cds/gc/dhp_smr.h
new file mode 100644 (file)
index 0000000..92d9b53
--- /dev/null
@@ -0,0 +1,1355 @@
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_GC_DHP_SMR_H
+#define CDSLIB_GC_DHP_SMR_H
+
+#include <exception>
+#include <cds/gc/details/hp_common.h>
+#include <cds/intrusive/free_list_selector.h>
+#include <cds/details/throw_exception.h>
+#include <cds/details/static_functor.h>
+#include <cds/details/marked_ptr.h>
+#include <cds/user_setup/cache_line.h>
+
+namespace cds { namespace gc {
+    namespace dhp {
+        using namespace cds::gc::hp::common;
+
+        /// Exception "Dynamic Hazard Pointer SMR is not initialized"
+        class not_initialized: public std::runtime_error
+        {
+        public:
+            not_initialized()
+                : std::runtime_error( "Global DHP SMR object is not initialized" )
+            {}
+        };
+
+        struct guard_block: public cds::intrusive::FreeListImpl::node
+        {
+            guard_block*    next_;  // next block in the thread list
+
+            guard_block()
+                : next_( nullptr )
+            {}
+
+            guard* first()
+            {
+                return reinterpret_cast<guard*>( this + 1 );
+            }
+        };
+
+        /// \p guard_block allocator (global object)
+        class hp_allocator
+        {
+            friend class smr;
+        public:
+            static hp_allocator& instance();
+
+            CDS_EXPORT_API guard_block*    alloc();
+            void            free( guard_block* block )
+            {
+                free_list_.put( block );
+            }
+
+        private:
+            hp_allocator()
+            {}
+            CDS_EXPORT_API ~hp_allocator();
+
+        private:
+            cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
+        };
+
+        /// Per-thread hazard pointer storage
+        class thread_hp_storage 
+        {
+            friend class smr;
+        public:
+            thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
+                : free_head_( arr )
+                , extended_list_( nullptr )
+                , array_( arr )
+                , initial_capacity_( nSize )
+            {}
+
+            thread_hp_storage() = delete;
+            thread_hp_storage( thread_hp_storage const& ) = delete;
+            thread_hp_storage( thread_hp_storage&& ) = delete;
+
+            ~thread_hp_storage()
+            {
+                clear();
+            }
+
+            guard* alloc()
+            {
+                if ( cds_unlikely( free_head_ == nullptr )) {
+                    extend();
+                    assert( free_head_ != nullptr );
+                }
+
+                guard* g = free_head_;
+                free_head_ = g->next_;
+                return g;
+            }
+
+            void free( guard* g ) CDS_NOEXCEPT
+            {
+                if ( g ) {
+                    g->clear();
+                    g->next_ = free_head_;
+                    free_head_ = g;
+                }
+            }
+
+            template< size_t Capacity>
+            size_t alloc( guard_array<Capacity>& arr )
+            {
+                for ( size_t i = 0; i < Capacity; ++i ) {
+                    if ( cds_unlikely( free_head_ == nullptr ))
+                        extend();
+                    arr.reset( i, free_head_ );
+                    free_head_ = free_head_->next_;
+                }
+                return Capacity;
+            }
+
+            template <size_t Capacity>
+            void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
+            {
+                guard* gList = free_head_;
+                for ( size_t i = 0; i < Capacity; ++i ) {
+                    guard* g = arr[i];
+                    if ( g ) {
+                        g->clear();
+                        g->next_ = gList;
+                        gList = g;
+                    }
+                }
+                free_head_ = gList;
+            }
+
+            void clear()
+            {
+                // clear array_
+                for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
+                    cur->clear();
+
+                // free all extended blocks
+                hp_allocator& alloc = hp_allocator::instance();
+                for ( guard_block* p = extended_list_; p; ) {
+                    guard_block* next = p->next_;
+                    alloc.free( p );
+                    p = next;
+                }
+
+                extended_list_ = nullptr;
+            }
+
+            void init()
+            {
+                assert( extended_list_ == nullptr );
+
+                guard* p = array_;
+                for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
+                    p->next_ = p + 1;
+                p->next_ = nullptr;
+                free_head_ = array_;
+            }
+
+        private:
+            void extend()
+            {
+                assert( free_head_ == nullptr );
+
+                guard_block* block = hp_allocator::instance().alloc();
+                block->next_ = extended_list_;
+                extended_list_ = block;
+                free_head_ = block->first();
+            }
+
+        private:
+            guard*          free_head_;        ///< Head of free guard list
+            guard_block*    extended_list_;    ///< Head of extended guard blocks allocated for the thread
+            guard* const    array_;            ///< initial HP array
+            size_t const    initial_capacity_; ///< Capacity of \p array_
+        };
+
+        struct retired_block: public cds::intrusive::FreeListImpl::node
+        {
+            retired_block*  next_;  ///< Next block in thread-private retired array
+
+            static size_t const c_capacity = 256;
+
+            retired_block()
+                : next_( nullptr )
+            {}
+
+            retired_ptr*    first()
+            {
+                return reinterpret_cast<retired_ptr*>( this + 1 );
+            }
+
+            retired_ptr*    last()
+            {
+                return first() + c_capacity;
+            }
+        };
+
+        class retired_allocator
+        {
+            friend class smr;
+        public:
+            static retired_allocator& instance();
+
+            CDS_EXPORT_API retired_block* alloc();
+            void free( retired_block* block )
+            {
+                block->next_ = nullptr;
+                free_list_.put( block );
+            }
+
+        private:
+            retired_allocator()
+            {}
+            CDS_EXPORT_API ~retired_allocator();
+
+        private:
+            cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
+        };
+
+        /// Per-thread retired array
+        class retired_array
+        {
+            friend class smr;
+        public:
+            retired_array() CDS_NOEXCEPT
+                : current_block_( nullptr )
+                , current_cell_( nullptr )
+                , list_head_( nullptr )
+                , list_tail_( nullptr )
+                , block_count_(0)
+            {}
+
+            retired_array( retired_array const& ) = delete;
+            retired_array( retired_array&& ) = delete;
+
+            ~retired_array()
+            {
+                assert( empty());
+                fini();
+            }
+
+            bool push( retired_ptr const& p ) CDS_NOEXCEPT
+            {
+                assert( current_block_ != nullptr );
+                assert( current_block_->first() <= current_cell_ );
+                assert( current_cell_ < current_block_->last() );
+                //assert( &p != current_cell_ );
+
+                *current_cell_ = p;
+                if ( ++current_cell_ == current_block_->last() ) {
+                    // goto next block if exists
+                    if ( current_block_->next_ ) {
+                        current_block_ = current_block_->next_;
+                        current_cell_ = current_block_->first();
+                        return true;
+                    }
+
+                    // no free block
+                    // smr::scan() extend retired_array if needed
+                    return false;
+                }
+
+                return true;
+            }
+
+            bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
+            {                
+                bool ret = push( *p );
+                assert( ret );
+                return ret;
+            }
+
+        private: // called by smr
+            void init()
+            {
+                if ( list_head_ == nullptr ) {
+                    retired_block* block = retired_allocator::instance().alloc();
+                    assert( block->next_ == nullptr );
+
+                    current_block_ =
+                        list_head_ =
+                        list_tail_ = block;
+                    current_cell_ = block->first();
+
+                    block_count_ = 1;
+                }
+            }
+
+            void fini()
+            {
+                retired_allocator& alloc = retired_allocator::instance();
+                for ( retired_block* p = list_head_; p; ) {
+                    retired_block* next = p->next_;
+                    alloc.free( p );
+                    p = next;
+                }
+
+                current_block_ =
+                    list_head_ =
+                    list_tail_ = nullptr;
+                current_cell_ = nullptr;
+
+                block_count_ = 0;
+            }
+
+            void extend()
+            {
+                assert( list_head_ != nullptr );
+                assert( current_block_ == list_tail_ );
+                assert( current_cell_ == current_block_->last() );
+
+                retired_block* block = retired_allocator::instance().alloc();
+                assert( block->next_ == nullptr );
+
+                list_tail_ = list_tail_->next_ = block;
+                current_cell_ = block->first();
+                ++block_count_;
+            }
+
+            bool empty() const
+            {
+                return current_block_ == nullptr
+                    || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
+            }
+
+        private:
+            retired_block*          current_block_;
+            retired_ptr*            current_cell_;  // in current_block_
+
+            retired_block*          list_head_;
+            retired_block*          list_tail_;
+            size_t                  block_count_;
+        };
+
+        /// Per-thread data
+        struct thread_data {
+            thread_hp_storage   hazards_;   ///< Hazard pointers private to the thread
+            retired_array       retired_;   ///< Retired data private to the thread
+
+            char pad1_[cds::c_nCacheLineSize];
+            atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
+            char pad2_[cds::c_nCacheLineSize];
+
+            thread_data( guard* guards, size_t guard_count )
+                : hazards_( guards, guard_count )
+                , sync_( 0 )
+            {}
+
+            thread_data() = delete;
+            thread_data( thread_data const& ) = delete;
+            thread_data( thread_data&& ) = delete;
+
+            void sync()
+            {
+                sync_.fetch_add( 1, atomics::memory_order_acq_rel );
+            }
+        };
+
+        // Hazard Pointer SMR (Safe Memory Reclamation)
+        class smr
+        {
+            struct thread_record;
+
+        public:
+            /// Returns the instance of Hazard Pointer \ref smr
+            static smr& instance()
+            {
+#       ifdef CDS_DISABLE_SMR_EXCEPTION
+                assert( instance_ != nullptr );
+#       else
+                if ( !instance_ )
+                    CDS_THROW_EXCEPTION( not_initialized() );
+#       endif
+                return *instance_;
+            }
+
+            /// Creates Dynamic Hazard Pointer SMR singleton
+            /**
+                Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
+                Otherwise it does nothing.
+
+                The Michael's HP reclamation schema depends of three parameters:
+                - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
+                the data structure algorithms. By default, if \p nHazardPtrCount = 0,
+                the function uses maximum of HP count for CDS library
+                - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
+                - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
+                <tt> nHazardPtrCount * nMaxThreadCount </tt>
+                Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
+            */
+            static CDS_EXPORT_API void construct(
+                size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
+            );
+
+            //@cond
+            // for back-copatibility
+            static void Construct(
+                size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
+            )
+            {
+                construct( nInitialHazardPtrCount );
+            }
+            //@endcond
+
+            /// Destroys global instance of \ref smr
+            /**
+                The parameter \p bDetachAll should be used carefully: if its value is \p true,
+                then the object destroyed automatically detaches all attached threads. This feature
+                can be useful when you have no control over the thread termination, for example,
+                when \p libcds is injected into existing external thread.
+            */
+            static CDS_EXPORT_API void destruct(
+                bool bDetachAll = false     ///< Detach all threads
+            );
+
+            //@cond
+            // for back-copatibility
+            static void Destruct(
+                bool bDetachAll = false     ///< Detach all threads
+            )
+            {
+                destruct( bDetachAll );
+            }
+            //@endcond
+
+            /// Checks if global SMR object is constructed and may be used
+            static bool isUsed() CDS_NOEXCEPT
+            {
+                return instance_ != nullptr;
+            }
+
+            /// Set memory management functions
+            /**
+                @note This function may be called <b>BEFORE</b> creating an instance
+                of Dynamic Hazard Pointer SMR
+
+                SMR object allocates some memory for thread-specific data and for
+                creating SMR object.
+                By default, a standard \p new and \p delete operators are used for this.
+            */
+            static CDS_EXPORT_API void set_memory_allocator(
+                void* ( *alloc_func )( size_t size ),
+                void( *free_func )( void * p )
+            );
+
+            /// Returns thread-local data for the current thread
+            static CDS_EXPORT_API thread_data* tls();
+
+            static CDS_EXPORT_API void attach_thread();
+            static CDS_EXPORT_API void detach_thread();
+
+        public: // for internal use only
+            /// The main garbage collecting function
+            CDS_EXPORT_API void scan( thread_data* pRec );
+
+            /// Helper scan routine
+            /**
+                The function guarantees that every node that is eligible for reuse is eventually freed, barring
+                thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
+                where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
+                to thread's list of reclaimed pointers.
+
+                The function is called internally by \p scan().
+            */
+            CDS_EXPORT_API void help_scan( thread_data* pThis );
+
+            hp_allocator& get_hp_allocator()
+            {
+                return hp_allocator_;
+            }
+
+            retired_allocator& get_retired_allocator()
+            {
+                return retired_allocator_;
+            }
+
+        private:
+            CDS_EXPORT_API smr(
+                size_t nInitialHazardPtrCount
+            );
+
+            CDS_EXPORT_API ~smr();
+
+            CDS_EXPORT_API void detach_all_thread();
+
+        private:
+            //@cond
+            CDS_EXPORT_API thread_record* create_thread_data();
+            CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
+
+            /// Allocates Hazard Pointer SMR thread private data
+            CDS_EXPORT_API thread_record* alloc_thread_data();
+
+            /// Free HP SMR thread-private data
+            CDS_EXPORT_API void free_thread_data( thread_record* pRec );
+            //@endcond
+
+        private:
+            static CDS_EXPORT_API smr* instance_;
+
+            atomics::atomic< thread_record*>    thread_list_;   ///< Head of thread list
+            size_t const        initial_hazard_count_;  ///< initial number of hazard pointers per thread
+            hp_allocator        hp_allocator_;
+            retired_allocator   retired_allocator_;
+
+            // temporaries
+            std::atomic<size_t> last_plist_size_;   ///< HP array size in last scan() call
+        };
+
+        // for backward compatibility
+        typedef smr GarbageCollector;
+
+
+        // inlines
+        inline hp_allocator& hp_allocator::instance()
+        {
+            return smr::instance().get_hp_allocator();
+        }
+
+        inline retired_allocator& retired_allocator::instance()
+        {
+            return smr::instance().get_retired_allocator();
+        }
+
+    } // namespace dhp
+
+
+        /// Dynamic Hazard Pointer garbage collector
+    /**  @ingroup cds_garbage_collector
+        @headerfile cds/gc/dhp.h
+
+        Implementation of Dynamic Hazard Pointer garbage collector.
+
+        Sources:
+            - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
+            - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
+            - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
+
+        Dynamic Hazard Pointers SMR (safe memory reclamation) provides an unbounded number of hazard pointer per thread
+        despite of classic Hazard Pointer SMR in which the count of the hazard pointef per thread is limited.
+
+        See \ref cds_how_to_use "How to use" section for details how to apply garbage collector.
+    */
+    class DHP
+    {
+    public:
+        /// Native guarded pointer type
+        typedef void* guarded_pointer;
+
+        /// Atomic reference
+        template <typename T> using atomic_ref = atomics::atomic<T *>;
+
+        /// Atomic type
+        /**
+            @headerfile cds/gc/dhp.h
+        */
+        template <typename T> using atomic_type = atomics::atomic<T>;
+
+        /// Atomic marked pointer
+        template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
+
+
+        /// Dynamic Hazard Pointer guard
+        /**
+            A guard is a hazard pointer.
+            Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
+
+            \p %Guard object is movable but not copyable.
+
+            The guard object can be in two states:
+            - unlinked - the guard is not linked with any internal hazard pointer.
+              In this state no operation except \p link() and move assignment is supported.
+            - linked (default) - the guard allocates an internal hazard pointer and fully operable.
+
+            Due to performance reason the implementation does not check state of the guard in runtime.
+
+            @warning Move assignment can transfer the guard in unlinked state, use with care.
+        */
+        class Guard
+        {
+        public:
+            /// Default ctor allocates a guard (hazard pointer) from thread-private storage
+            Guard() CDS_NOEXCEPT
+                : guard_( dhp::smr::tls()->hazards_.alloc() )
+            {}
+
+            /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
+            explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
+                : guard_( nullptr )
+            {}
+
+            /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
+            Guard( Guard&& src ) CDS_NOEXCEPT
+                : guard_( src.guard_ )
+            {
+                src.guard_ = nullptr;
+            }
+
+            /// Move assignment: the internal guards are swapped between \p src and \p this
+            /**
+                @warning \p src will become in unlinked state if \p this was unlinked on entry.
+            */
+            Guard& operator=( Guard&& src ) CDS_NOEXCEPT
+            {
+                std::swap( guard_, src.guard_ );
+                return *this;
+            }
+
+            /// Copy ctor is prohibited - the guard is not copyable
+            Guard( Guard const& ) = delete;
+
+            /// Copy assignment is prohibited
+            Guard& operator=( Guard const& ) = delete;
+
+            /// Frees the internal hazard pointer if the guard is in linked state
+            ~Guard()
+            {
+                unlink();
+            }
+
+            /// Checks if the guard object linked with any internal hazard pointer
+            bool is_linked() const
+            {
+                return guard_ != nullptr;
+            }
+
+            /// Links the guard with internal hazard pointer if the guard is in unlinked state
+            void link()
+            {
+                if ( !guard_ )
+                    guard_ = dhp::smr::tls()->hazards_.alloc();
+            }
+
+            /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
+            void unlink()
+            {
+                if ( guard_ ) {
+                    dhp::smr::tls()->hazards_.free( guard_ );
+                    guard_ = nullptr;
+                }
+            }
+
+            /// Protects a pointer of type <tt> atomic<T*> </tt>
+            /**
+                Return the value of \p toGuard
+
+                The function tries to load \p toGuard and to store it
+                to the HP slot repeatedly until the guard's value equals \p toGuard
+            */
+            template <typename T>
+            T protect( atomics::atomic<T> const& toGuard )
+            {
+                assert( guard_ != nullptr );
+
+                T pCur = toGuard.load(atomics::memory_order_acquire);
+                T pRet;
+                do {
+                    pRet = assign( pCur );
+                    pCur = toGuard.load(atomics::memory_order_acquire);
+                } while ( pRet != pCur );
+                return pCur;
+            }
+
+            /// Protects a converted pointer of type <tt> atomic<T*> </tt>
+            /**
+                Return the value of \p toGuard
+
+                The function tries to load \p toGuard and to store result of \p f functor
+                to the HP slot repeatedly until the guard's value equals \p toGuard.
+
+                The function is useful for intrusive containers when \p toGuard is a node pointer
+                that should be converted to a pointer to the value type before guarding.
+                The parameter \p f of type Func is a functor that makes this conversion:
+                \code
+                    struct functor {
+                        value_type * operator()( T * p );
+                    };
+                \endcode
+                Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
+            */
+            template <typename T, class Func>
+            T protect( atomics::atomic<T> const& toGuard, Func f )
+            {
+                assert( guard_ != nullptr );
+
+                T pCur = toGuard.load(atomics::memory_order_acquire);
+                T pRet;
+                do {
+                    pRet = pCur;
+                    assign( f( pCur ));
+                    pCur = toGuard.load(atomics::memory_order_acquire);
+                } while ( pRet != pCur );
+                return pCur;
+            }
+
+            /// Store \p p to the guard
+            /**
+                The function is just an assignment, no loop is performed.
+                Can be used for a pointer that cannot be changed concurrently
+                or for already guarded pointer.
+            */
+            template <typename T>
+            T* assign( T* p )
+            {
+                assert( guard_ != nullptr );
+
+                guard_->set( p );
+                dhp::smr::tls()->sync();
+                return p;
+            }
+
+            //@cond
+            std::nullptr_t assign( std::nullptr_t )
+            {
+                assert( guard_ != nullptr );
+
+                clear();
+                return nullptr;
+            }
+            //@endcond
+
+            /// Store marked pointer \p p to the guard
+            /**
+                The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
+                Can be used for a marked pointer that cannot be changed concurrently
+                or for already guarded pointer.
+            */
+            template <typename T, int BITMASK>
+            T* assign( cds::details::marked_ptr<T, BITMASK> p )
+            {
+                return assign( p.ptr());
+            }
+
+            /// Copy from \p src guard to \p this guard
+            void copy( Guard const& src )
+            {
+                assign( src.get_native());
+            }
+
+            /// Clears value of the guard
+            void clear()
+            {
+                assert( guard_ != nullptr );
+
+                guard_->clear();
+            }
+
+            /// Gets the value currently protected (relaxed read)
+            template <typename T>
+            T * get() const
+            {
+                assert( guard_ != nullptr );
+                return guard_->get_as<T>();
+            }
+
+            /// Gets native guarded pointer stored
+            void* get_native() const
+            {
+                assert( guard_ != nullptr );
+                return guard_->get();
+            }
+
+            //@cond
+            dhp::guard* release()
+            {
+                dhp::guard* g = guard_;
+                guard_ = nullptr;
+                return g;
+            }
+
+            dhp::guard*& guard_ref()
+            {
+                return guard_;
+            }
+            //@endcond
+
+        private:
+            //@cond
+            dhp::guard* guard_;
+            //@endcond
+        };
+
+        /// Array of Dynamic Hazard Pointer guards
+        /**
+            The class is intended for allocating an array of hazard pointer guards.
+            Template parameter \p Count defines the size of the array.
+
+            A \p %GuardArray object is not copy- and move-constructible
+            and not copy- and move-assignable.
+        */
+        template <size_t Count>
+        class GuardArray
+        {
+        public:
+            /// Rebind array for other size \p OtherCount
+            template <size_t OtherCount>
+            struct rebind {
+                typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
+            };
+
+            /// Array capacity
+            static CDS_CONSTEXPR const size_t c_nCapacity = Count;
+
+        public:
+            /// Default ctor allocates \p Count hazard pointers
+            GuardArray()
+            {
+                dhp::smr::tls()->hazards_.alloc( guards_ );
+            }
+
+            /// Move ctor is prohibited
+            GuardArray( GuardArray&& ) = delete;
+
+            /// Move assignment is prohibited
+            GuardArray& operator=( GuardArray&& ) = delete;
+
+            /// Copy ctor is prohibited
+            GuardArray( GuardArray const& ) = delete;
+
+            /// Copy assignment is prohibited
+            GuardArray& operator=( GuardArray const& ) = delete;
+
+            /// Frees allocated hazard pointers
+            ~GuardArray()
+            {
+                dhp::smr::tls()->hazards_.free( guards_ );
+            }
+
+            /// Protects a pointer of type \p atomic<T*>
+            /**
+                Return the value of \p toGuard
+
+                The function tries to load \p toGuard and to store it
+                to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
+            */
+            template <typename T>
+            T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
+            {
+                assert( nIndex < capacity() );
+
+                T pRet;
+                do {
+                    pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
+                } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
+
+                return pRet;
+            }
+
+            /// Protects a pointer of type \p atomic<T*>
+            /**
+                Return the value of \p toGuard
+
+                The function tries to load \p toGuard and to store it
+                to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
+
+                The function is useful for intrusive containers when \p toGuard is a node pointer
+                that should be converted to a pointer to the value type before guarding.
+                The parameter \p f of type Func is a functor to make that conversion:
+                \code
+                    struct functor {
+                        value_type * operator()( T * p );
+                    };
+                \endcode
+                Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
+            */
+            template <typename T, class Func>
+            T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
+            {
+                assert( nIndex < capacity() );
+
+                T pRet;
+                do {
+                    assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
+                } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
+
+                return pRet;
+            }
+
+            /// Store \p p to the slot \p nIndex
+            /**
+                The function is just an assignment, no loop is performed.
+            */
+            template <typename T>
+            T * assign( size_t nIndex, T * p )
+            {
+                assert( nIndex < capacity() );
+
+                guards_.set( nIndex, p );
+                dhp::smr::tls()->sync();
+                return p;
+            }
+
+            /// Store marked pointer \p p to the guard
+            /**
+                The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
+                Can be used for a marked pointer that cannot be changed concurrently
+                or for already guarded pointer.
+            */
+            template <typename T, int Bitmask>
+            T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
+            {
+                return assign( nIndex, p.ptr());
+            }
+
+            /// Copy guarded value from \p src guard to slot at index \p nIndex
+            void copy( size_t nIndex, Guard const& src )
+            {
+                assign( nIndex, src.get_native());
+            }
+
+            /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
+            void copy( size_t nDestIndex, size_t nSrcIndex )
+            {
+                assign( nDestIndex, get_native( nSrcIndex ));
+            }
+
+            /// Clear value of the slot \p nIndex
+            void clear( size_t nIndex )
+            {
+                guards_.clear( nIndex );
+            }
+
+            /// Get current value of slot \p nIndex
+            template <typename T>
+            T * get( size_t nIndex ) const
+            {
+                assert( nIndex < capacity() );
+                return guards_[nIndex]->template get_as<T>();
+            }
+
+            /// Get native guarded pointer stored
+            guarded_pointer get_native( size_t nIndex ) const
+            {
+                assert( nIndex < capacity() );
+                return guards_[nIndex]->get();
+            }
+
+            //@cond
+            dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
+            {
+                return guards_.release( nIndex );
+            }
+            //@endcond
+
+            /// Capacity of the guard array
+            static CDS_CONSTEXPR size_t capacity()
+            {
+                return Count;
+            }
+
+        private:
+            //@cond
+            dhp::guard_array<c_nCapacity> guards_;
+            //@endcond
+        };
+
+        /// Guarded pointer
+        /**
+            A guarded pointer is a pair of a pointer and GC's guard.
+            Usually, it is used for returning a pointer to the item from an lock-free container.
+            The guard prevents the pointer to be early disposed (freed) by GC.
+            After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
+
+            Template arguments:
+            - \p GuardedType - a type which the guard stores
+            - \p ValueType - a value type
+            - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
+
+            For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
+            In such case the \p %guarded_ptr is:
+            @code
+            typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
+            @endcode
+
+            For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
+            For example:
+            @code
+            struct foo {
+                int const   key;
+                std::string value;
+            };
+
+            struct value_accessor {
+                std::string* operator()( foo* pFoo ) const
+                {
+                    return &(pFoo->value);
+                }
+            };
+
+            // Guarded ptr
+            typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
+            @endcode
+
+            You don't need use this class directly.
+            All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
+        */
+        template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
+        class guarded_ptr
+        {
+            //@cond
+            struct trivial_cast {
+                ValueType * operator()( GuardedType * p ) const
+                {
+                    return p;
+                }
+            };
+
+            template <typename GT, typename VT, typename C> friend class guarded_ptr;
+            //@endcond
+
+        public:
+            typedef GuardedType guarded_type; ///< Guarded type
+            typedef ValueType   value_type;   ///< Value type
+
+            /// Functor for casting \p guarded_type to \p value_type
+            typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
+
+        public:
+            /// Creates empty guarded pointer
+            guarded_ptr() CDS_NOEXCEPT
+                : guard_( nullptr )
+            {}
+
+            //@cond
+            explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
+                : guard_( g )
+            {}
+
+            /// Initializes guarded pointer with \p p
+            explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
+                : guard_( nullptr )
+            {
+                reset( p );
+            }
+            explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
+                : guard_( nullptr )
+            {}
+            //@endcond
+
+            /// Move ctor
+            guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
+                : guard_( gp.guard_ )
+            {
+                gp.guard_ = nullptr;
+            }
+
+            /// Move ctor
+            template <typename GT, typename VT, typename C>
+            guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
+                : guard_( gp.guard_ )
+            {
+                gp.guard_ = nullptr;
+            }
+
+            /// Ctor from \p Guard
+            explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
+                : guard_( g.release())
+            {}
+
+            /// The guarded pointer is not copy-constructible
+            guarded_ptr( guarded_ptr const& gp ) = delete;
+
+            /// Clears the guarded pointer
+            /**
+                \ref release is called if guarded pointer is not \ref empty
+            */
+            ~guarded_ptr() CDS_NOEXCEPT
+            {
+                release();
+            }
+
+            /// Move-assignment operator
+            guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
+            {
+                std::swap( guard_, gp.guard_ );
+                return *this;
+            }
+
+            /// Move-assignment from \p Guard
+            guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
+            {
+                std::swap( guard_, g.guard_ref());
+                return *this;
+            }
+
+            /// The guarded pointer is not copy-assignable
+            guarded_ptr& operator=(guarded_ptr const& gp) = delete;
+
+            /// Returns a pointer to guarded value
+            value_type * operator ->() const CDS_NOEXCEPT
+            {
+                assert( !empty());
+                return value_cast()( guard_->get_as<guarded_type>() );
+            }
+
+            /// Returns a reference to guarded value
+            value_type& operator *() CDS_NOEXCEPT
+            {
+                assert( !empty());
+                return *value_cast()( guard_->get_as<guarded_type>() );
+            }
+
+            /// Returns const reference to guarded value
+            value_type const& operator *() const CDS_NOEXCEPT
+            {
+                assert( !empty());
+                return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
+            }
+
+            /// Checks if the guarded pointer is \p nullptr
+            bool empty() const CDS_NOEXCEPT
+            {
+                return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
+            }
+
+            /// \p bool operator returns <tt>!empty()</tt>
+            explicit operator bool() const CDS_NOEXCEPT
+            {
+                return !empty();
+            }
+
+            /// Clears guarded pointer
+            /**
+                If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
+                Dereferncing the guarded pointer after \p release() is dangerous.
+            */
+            void release() CDS_NOEXCEPT
+            {
+                free_guard();
+            }
+
+            //@cond
+            // For internal use only!!!
+            void reset(guarded_type * p) CDS_NOEXCEPT
+            {
+                alloc_guard();
+                assert( guard_ );
+                guard_->set( p );
+            }
+
+            //@endcond
+
+        private:
+            //@cond
+            void alloc_guard()
+            {
+                if ( !guard_ )
+                    guard_ = dhp::smr::tls()->hazards_.alloc();
+            }
+
+            void free_guard()
+            {
+                if ( guard_ ) {
+                    dhp::smr::tls()->hazards_.free( guard_ );
+                    guard_ = nullptr;
+                }
+            }
+            //@endcond
+
+        private:
+            //@cond
+            dhp::guard* guard_;
+            //@endcond
+        };
+
+    public:
+        /// Initializes %DHP memory manager singleton
+        /**
+            Constructor creates and initializes %DHP global object.
+            %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
+            it is created in the beginning of \p main() function.
+            After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
+
+            \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
+                When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
+                By perforce the local thread's guard pool is grown automatically from common pool.
+                When the thread terminated its guard pool is backed to common GC's pool.
+        */
+        DHP(
+            size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
+        )
+        {
+            dhp::smr::construct( nInitialHazardPtrCount );
+        }
+
+        /// Destroys %DHP memory manager
+        /**
+            The destructor destroys %DHP global object. After calling of this function you may \b NOT
+            use CDS data structures based on \p %cds::gc::DHP.
+            Usually, %DHP object is destroyed at the end of your \p main().
+        */
+        ~DHP()
+        {
+            dhp::GarbageCollector::destruct( true );
+        }
+
+        /// Checks if count of hazard pointer is no less than \p nCountNeeded
+        /**
+            The function always returns \p true since the guard count is unlimited for
+            \p %gc::DHP garbage collector.
+        */
+        static CDS_CONSTEXPR bool check_available_guards(
+#ifdef CDS_DOXYGEN_INVOKED
+            size_t nCountNeeded,
+#else
+            size_t
+#endif
+        )
+        {
+            return true;
+        }
+
+        /// Set memory management functions
+        /**
+            @note This function may be called <b>BEFORE</b> creating an instance
+            of Dynamic Hazard Pointer SMR
+
+            SMR object allocates some memory for thread-specific data and for
+            creating SMR object.
+            By default, a standard \p new and \p delete operators are used for this.
+        */
+        static void set_memory_allocator(
+            void* ( *alloc_func )( size_t size ),   ///< \p malloc() function
+            void( *free_func )( void * p )          ///< \p free() function
+        )
+        {
+            dhp::smr::set_memory_allocator( alloc_func, free_func );
+        }
+
+        /// Retire pointer \p p with function \p pFunc
+        /**
+            The function places pointer \p p to array of pointers ready for removing.
+            (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
+            \p func is a disposer: when \p p can be safely removed, \p func is called.
+        */
+        template <typename T>
+        static void retire( T * p, void (* func)(T *))
+        {
+            dhp::thread_data* rec = dhp::smr::tls();
+            if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
+                dhp::smr::instance().scan( rec );
+        }
+
+        /// Retire pointer \p p with functor of type \p Disposer
+        /**
+            The function places pointer \p p to array of pointers ready for removing.
+            (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
+
+            Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
+            \code
+            template <typename T>
+            struct disposer {
+                void operator()( T * p )    ;   // disposing operator
+            };
+            \endcode
+            Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
+            - it should be stateless functor
+            - it should be default-constructible
+            - the result of functor call with argument \p p should not depend on where the functor will be called.
+
+            \par Examples:
+            Operator \p delete functor:
+            \code
+            template <typename T>
+            struct disposer {
+                void operator ()( T * p ) {
+                    delete p;
+                }
+            };
+
+            // How to call HP::retire method
+            int * p = new int;
+
+            // ... use p in lock-free manner
+
+            cds::gc::DHP::retire<disposer>( p ) ;   // place p to retired pointer array of DHP SMR
+            \endcode
+
+            Functor based on \p std::allocator :
+            \code
+            template <typename Alloc = std::allocator<int> >
+            struct disposer {
+                template <typename T>
+                void operator()( T * p ) {
+                    typedef typename Alloc::templare rebind<T>::other alloc_t;
+                    alloc_t a;
+                    a.destroy( p );
+                    a.deallocate( p, 1 );
+                }
+            };
+            \endcode
+        */
+        template <class Disposer, typename T>
+        static void retire( T * p )
+        {
+            if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
+                scan();
+        }
+
+        /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
+        static bool isUsed()
+        {
+            return dhp::smr::isUsed();
+        }
+
+        /// Forced GC cycle call for current thread
+        /**
+            Usually, this function should not be called directly.
+        */
+        static void scan()
+        {
+            dhp::smr::instance().scan( dhp::smr::tls() );
+        }
+
+        /// Synonym for \p scan()
+        static void force_dispose()
+        {
+            scan();
+        }
+    };
+
+}} // namespace cds::gc
+
+#endif // #ifndef CDSLIB_GC_DHP_SMR_H
+
+
index 61ec83a..e42fedb 100644 (file)
 #ifndef CDSLIB_GC_HP_H
 #define CDSLIB_GC_HP_H
 
-#include <cds/gc/impl/hp_decl.h>
-#include <cds/gc/impl/hp_impl.h>
+#include <cds/gc/hp_smr.h>
 #include <cds/details/lib.h>
+#include <cds/threading/model.h>
 
 /**
-    @page cds_garbage_collectors_comparison GC comparison
+    @page cds_garbage_collectors_comparison SMR (Safe Memory Reclamation schema) comparison
     @ingroup cds_garbage_collector
 
     <table>
@@ -47,7 +47,7 @@
         </tr>
         <tr>
             <td>Max number of guarded (hazard) pointers per thread</td>
-            <td>limited (specifies in GC object ctor)</td>
+            <td>limited (specifies in SMR object ctor)</td>
             <td>unlimited (dynamically allocated when needed)</td>
         </tr>
         <tr>
diff --git a/cds/gc/hp_smr.h b/cds/gc/hp_smr.h
new file mode 100644 (file)
index 0000000..9d7aa48
--- /dev/null
@@ -0,0 +1,1461 @@
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_GC_HP_SMR_H
+#define CDSLIB_GC_HP_SMR_H
+
+#include <exception>
+#include <cds/gc/details/hp_common.h>
+#include <cds/details/throw_exception.h>
+#include <cds/details/static_functor.h>
+#include <cds/details/marked_ptr.h>
+#include <cds/user_setup/cache_line.h>
+
+namespace cds { namespace gc {
+    namespace hp {
+        using namespace cds::gc::hp::common;
+
+        /// Exception "Not enough Hazard Pointer"
+        class not_enought_hazard_ptr: public std::length_error
+        {
+        public:
+            //@cond
+            not_enought_hazard_ptr()
+                : std::length_error( "Not enough Hazard Pointer" )
+            {}
+            //@endcond
+        };
+
+        /// Exception "Hazard Pointer SMR is not initialized"
+        class not_initialized: public std::runtime_error
+        {
+        public:
+            not_initialized()
+                : std::runtime_error( "Global Hazard Pointer SMR object is not initialized" )
+            {}
+        };
+
+        /// Per-thread hazard pointer storage
+        class thread_hp_storage {
+        public:
+            thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
+                : free_head_( arr )
+                , array_( arr )
+                , capacity_( nSize )
+#       ifdef CDS_ENABLE_HPSTAT
+                , alloc_guard_count_(0)
+                , free_guard_count_(0)
+#       endif
+            {
+                for ( guard* pEnd = arr + nSize - 1; arr < pEnd; ++arr )
+                    arr->next_ = arr + 1;
+                arr->next_ = nullptr;
+            }
+
+            thread_hp_storage() = delete;
+            thread_hp_storage( thread_hp_storage const& ) = delete;
+            thread_hp_storage( thread_hp_storage&& ) = delete;
+
+            size_t capacity() const CDS_NOEXCEPT
+            {
+                return capacity_;
+            }
+
+            bool full() const CDS_NOEXCEPT
+            {
+                return free_head_ == nullptr;
+            }
+
+            guard* alloc()
+            {
+#       ifdef CDS_DISABLE_SMR_EXCEPTION
+                assert( !full());
+#       else
+                if ( full() )
+                    CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
+#       endif
+                guard* g = free_head_;
+                free_head_ = g->next_;
+                CDS_HPSTAT( ++alloc_guard_count_ );
+                return g;
+            }
+
+            void free( guard* g ) CDS_NOEXCEPT
+            {
+                assert( g >= array_ && g < array_ + capacity() );
+
+                if ( g ) {
+                    g->clear();
+                    g->next_ = free_head_;
+                    free_head_ = g;
+                    CDS_HPSTAT( ++free_guard_count_ );
+                }
+            }
+
+            template< size_t Capacity>
+            size_t alloc( guard_array<Capacity>& arr )
+            {
+                size_t i;
+                guard* g = free_head_;
+                for ( i = 0; i < Capacity && g; ++i ) {
+                    arr.reset( i, g );
+                    g = g->next_;
+                }
+
+#       ifdef CDS_DISABLE_SMR_EXCEPTION
+                assert( i == Capacity );
+#       else
+                if ( i != Capacity )
+                    CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
+#       endif
+                free_head_ = g;
+                CDS_HPSTAT( alloc_guard_count_ += Capacity );
+                return i;
+            }
+
+            template <size_t Capacity>
+            void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
+            {
+                guard* gList = free_head_;
+                for ( size_t i = 0; i < Capacity; ++i ) {
+                    guard* g = arr[i];
+                    if ( g ) {
+                        g->clear();
+                        g->next_ = gList;
+                        gList = g;
+                        CDS_HPSTAT( ++free_guard_count_ );
+                    }
+                }
+                free_head_ = gList;
+            }
+
+            void clear()
+            {
+                for ( guard* cur = array_, *last = array_ + capacity(); cur < last; ++cur )
+                    cur->clear();
+            }
+
+            guard& operator[]( size_t idx )
+            {
+                assert( idx < capacity() );
+
+                return array_[idx];
+            }
+
+            static size_t calc_array_size( size_t capacity )
+            {
+                return sizeof( guard ) * capacity;
+            }
+
+        private:
+            guard*          free_head_; ///< Head of free guard list
+            guard* const    array_;     ///< HP array
+            size_t const    capacity_;  ///< HP array capacity
+#       ifdef CDS_ENABLE_HPSTAT
+        public:
+            size_t          alloc_guard_count_;
+            size_t          free_guard_count_;
+#       endif
+        };
+
+        /// Per-thread retired array
+        class retired_array
+        {
+        public:
+            retired_array( retired_ptr* arr, size_t capacity ) CDS_NOEXCEPT
+                : current_( arr )
+                , last_( arr + capacity )
+                , retired_( arr )
+#       ifdef CDS_ENABLE_HPSTAT
+                , retire_call_count_(0)
+#       endif
+            {}
+
+            retired_array() = delete;
+            retired_array( retired_array const& ) = delete;
+            retired_array( retired_array&& ) = delete;
+
+            size_t capacity() const CDS_NOEXCEPT
+            {
+                return last_ - retired_;
+            }
+
+            size_t size() const CDS_NOEXCEPT
+            {
+                return current_ - retired_;
+            }
+
+            bool push( retired_ptr&& p ) CDS_NOEXCEPT
+            {
+                *current_ = p;
+                CDS_HPSTAT( ++retire_call_count_ );
+                return ++current_ < last_;
+            }
+
+            retired_ptr* first() const CDS_NOEXCEPT
+            {
+                return retired_;
+            }
+
+            retired_ptr* last() const CDS_NOEXCEPT
+            {
+                return current_;
+            }
+
+            void reset( size_t nSize ) CDS_NOEXCEPT
+            {
+                current_ = first() + nSize;
+            }
+
+            bool full() const CDS_NOEXCEPT
+            {
+                return current_ == last_;
+            }
+
+            static size_t calc_array_size( size_t capacity )
+            {
+                return sizeof( retired_ptr ) * capacity;
+            }
+
+        private:
+            retired_ptr*            current_;
+            retired_ptr* const      last_;
+            retired_ptr* const      retired_;
+#       ifdef CDS_ENABLE_HPSTAT
+        public:
+            size_t  retire_call_count_;
+#       endif
+
+        };
+
+        /// Internal statistics
+        struct stat {
+            size_t  guard_allocated;    ///< Count of allocated HP guards
+            size_t  guard_freed;        ///< Count of freed HP guards
+            size_t  retired_count;      ///< Count of retired pointers
+            size_t  free_count;         ///< Count of free pointers
+            size_t  scan_count;         ///< Count of \p scan() call
+            size_t  help_scan_count;    ///< Count of \p help_scan() call
+
+            size_t  thread_rec_count;   ///< Count of thread records
+
+            stat()
+            {
+                clear();
+            }
+
+            void clear()
+            {
+                guard_allocated =
+                    guard_freed =
+                    retired_count =
+                    free_count =
+                    scan_count =
+                    help_scan_count =
+                    thread_rec_count = 0;
+            }
+        };
+
+        /// Per-thread data
+        struct thread_data {
+            thread_hp_storage   hazards_;   ///< Hazard pointers private to the thread
+            retired_array       retired_;   ///< Retired data private to the thread
+
+            stat                stat_;      ///< Internal statistics for the thread
+
+            char pad1_[cds::c_nCacheLineSize];
+            atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
+            char pad2_[cds::c_nCacheLineSize];
+
+            thread_data( guard* guards, size_t guard_count, retired_ptr* retired_arr, size_t retired_capacity )
+                : hazards_( guards, guard_count )
+                , retired_( retired_arr, retired_capacity )
+                , sync_(0)
+            {}
+
+            thread_data() = delete;
+            thread_data( thread_data const& ) = delete;
+            thread_data( thread_data&& ) = delete;
+
+            void sync()
+            {
+                sync_.fetch_add( 1, atomics::memory_order_acq_rel );
+            }
+        };
+
+        /// smr::scan() strategy
+        enum scan_type {
+            classic,    ///< classic scan as described in Michael's works (see smr::classic_scan() )
+            inplace     ///< inplace scan without allocation (see smr::inplace_scan() )
+        };
+
+        // Hazard Pointer SMR (Safe Memory Reclamation)
+        class smr
+        {
+            struct thread_record;
+
+        public:
+            /// Returns the instance of Hazard Pointer \ref smr
+            static smr& instance()
+            {
+#       ifdef CDS_DISABLE_SMR_EXCEPTION
+                assert( instance_ != nullptr );
+#       else
+                if ( !instance_ )
+                    CDS_THROW_EXCEPTION( not_initialized());
+#       endif
+                return *instance_;
+            }
+
+            /// Creates Hazard Pointer SMR singleton
+            /**
+                Hazard Pointer SMR is a singleton. If HP instance is not initialized then the function creates the instance.
+                Otherwise it does nothing.
+
+                The Michael's HP reclamation schema depends of three parameters:
+                - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
+                    the data structure algorithms. By default, if \p nHazardPtrCount = 0,
+                    the function uses maximum of HP count for CDS library
+                - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
+                - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
+                    <tt> nHazardPtrCount * nMaxThreadCount </tt>
+                    Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
+            */
+            static CDS_EXPORT_API void construct(
+                size_t nHazardPtrCount = 0,     ///< Hazard pointer count per thread
+                size_t nMaxThreadCount = 0,     ///< Max count of simultaneous working thread in your application
+                size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
+                scan_type nScanType = inplace   ///< Scan type (see \ref scan_type enum)
+            );
+
+            //@cond
+            // for back-copatibility
+            static void Construct(
+                size_t nHazardPtrCount = 0,     ///< Hazard pointer count per thread
+                size_t nMaxThreadCount = 0,     ///< Max count of simultaneous working thread in your application
+                size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
+                scan_type nScanType = inplace   ///< Scan type (see \ref scan_type enum)
+            )
+            {
+                construct( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
+            }
+
+            //@endcond
+
+            /// Destroys global instance of \ref smr
+            /**
+                The parameter \p bDetachAll should be used carefully: if its value is \p true,
+                then the object destroyed automatically detaches all attached threads. This feature
+                can be useful when you have no control over the thread termination, for example,
+                when \p libcds is injected into existing external thread.
+            */
+            static CDS_EXPORT_API void destruct(
+                bool bDetachAll = false     ///< Detach all threads
+            );
+
+            //@cond
+            // for back-copatibility
+            static void Destruct(
+                bool bDetachAll = false     ///< Detach all threads
+            )
+            {
+                destruct( bDetachAll );
+            }
+            //@endcond
+
+            /// Checks if global SMR object is constructed and may be used
+            static bool isUsed() CDS_NOEXCEPT
+            {
+                return instance_ != nullptr;
+            }
+
+            /// Set memory management functions
+            /**
+                @note This function may be called <b>BEFORE</b> creating an instance
+                of Hazard Pointer SMR
+
+                SMR object allocates some memory for thread-specific data and for
+                creating SMR object.
+                By default, a standard \p new and \p delete operators are used for this.
+            */
+            static CDS_EXPORT_API void set_memory_allocator(
+                void* ( *alloc_func )( size_t size ),
+                void (*free_func )( void * p )
+            );
+
+            /// Returns max Hazard Pointer count per thread
+            size_t get_hazard_ptr_count() const CDS_NOEXCEPT
+            {
+                return hazard_ptr_count_;
+            }
+
+            /// Returns max thread count
+            size_t get_max_thread_count() const CDS_NOEXCEPT
+            {
+                return max_thread_count_;
+            }
+
+            /// Returns max size of retired objects array
+            size_t get_max_retired_ptr_count() const CDS_NOEXCEPT
+            {
+                return max_retired_ptr_count_;
+            }
+
+            /// Get current scan strategy
+            scan_type get_scan_type() const
+            {
+                return scan_type_;
+            }
+
+            /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count
+            /**
+                If <tt> nRequiredCount > get_hazard_ptr_count()</tt> then the exception \p not_enought_hazard_ptr is thrown
+            */
+            static void check_hazard_ptr_count( size_t nRequiredCount )
+            {
+                if ( instance().get_hazard_ptr_count() < nRequiredCount ) {
+#       ifdef CDS_DISABLE_SMR_EXCEPTION
+                    assert( false );    // not enough hazard ptr
+#       else
+                    CDS_THROW_EXCEPTION( not_enought_hazard_ptr() );
+#       endif
+                }
+            }
+
+            /// Returns thread-local data for the current thread
+            static CDS_EXPORT_API thread_data* tls();
+
+            static CDS_EXPORT_API void attach_thread();
+            static CDS_EXPORT_API void detach_thread();
+
+            /// Get internal statistics
+            void statistics( stat& st );
+
+        public: // for internal use only
+            /// The main garbage collecting function
+            /**
+                This function is called internally when upper bound of thread's list of reclaimed pointers
+                is reached.
+
+                There are the following scan algorithm:
+                - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use
+                - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory
+
+                Use \p set_scan_type() member function to setup appropriate scan algorithm.
+            */
+            void scan( thread_data* pRec )
+            {
+                ( this->*scan_func_ )( pRec );
+            }
+
+            /// Helper scan routine
+            /**
+                The function guarantees that every node that is eligible for reuse is eventually freed, barring
+                thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
+                where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
+                to thread's list of reclaimed pointers.
+
+                The function is called internally by \p scan().
+            */
+            CDS_EXPORT_API void help_scan( thread_data* pThis );
+
+        private:
+            CDS_EXPORT_API smr(
+                size_t nHazardPtrCount,     ///< Hazard pointer count per thread
+                size_t nMaxThreadCount,     ///< Max count of simultaneous working thread in your application
+                size_t nMaxRetiredPtrCount, ///< Capacity of the array of retired objects for the thread
+                scan_type nScanType         ///< Scan type (see \ref scan_type enum)
+            );
+
+            CDS_EXPORT_API ~smr();
+
+            CDS_EXPORT_API void detach_all_thread();
+
+            /// Classic scan algorithm
+            /** @anchor hzp_gc_classic_scan
+                Classical scan algorithm as described in Michael's paper.
+
+                A scan includes four stages. The first stage involves scanning the array HP for non-null values.
+                Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer.
+                Only stage 1 accesses shared variables. The following stages operate only on private variables.
+
+                The second stage of a scan involves sorting local list of protected pointers to allow
+                binary search in the third stage.
+
+                The third stage of a scan involves checking each reclaimed node
+                against the pointers in local list of protected pointers. If the binary search yields
+                no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list
+                of reclaimed pointers.
+
+                The forth stage prepares new thread's private list of reclaimed pointers
+                that could not be freed during the current scan, where they remain until the next scan.
+
+                This algorithm allocates memory for internal HP array.
+
+                This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
+                is reached.
+            */
+            CDS_EXPORT_API void classic_scan( thread_data* pRec );
+
+            /// In-place scan algorithm
+            /** @anchor hzp_gc_inplace_scan
+                Unlike the \p classic_scan() algorithm, \p %inplace_scan() does not allocate any memory.
+                All operations are performed in-place.
+            */
+            CDS_EXPORT_API void inplace_scan( thread_data* pRec );
+
+        private:
+            //@cond
+            CDS_EXPORT_API thread_record* create_thread_data();
+            CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
+
+            /// Allocates Hazard Pointer SMR thread private data
+            CDS_EXPORT_API thread_record* alloc_thread_data();
+
+            /// Free HP SMR thread-private data
+            CDS_EXPORT_API void free_thread_data( thread_record* pRec );
+
+            //@endcond
+
+        private:
+            static CDS_EXPORT_API smr* instance_;
+
+            atomics::atomic< thread_record*>    thread_list_;   ///< Head of thread list
+
+            size_t const    hazard_ptr_count_;      ///< max count of thread's hazard pointer
+            size_t const    max_thread_count_;      ///< max count of thread
+            size_t const    max_retired_ptr_count_; ///< max count of retired ptr per thread
+            scan_type const scan_type_;             ///< scan type (see \ref scan_type enum)
+            void ( smr::*scan_func_ )( thread_data* pRec );
+        };
+
+        // for backward compatibility
+        typedef smr GarbageCollector;
+
+    } // namespace hp
+
+
+    /// @defgroup cds_garbage_collector Garbage collectors
+
+    /// Hazard Pointer SMR (Safe Memory Reclamation)
+    /**  @ingroup cds_garbage_collector
+
+        Implementation of classic Hazard Pointer garbage collector.
+
+        Sources:
+            - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
+            - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
+            - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
+
+        Hazard Pointer garbage collector is a singleton. The main user-level part of Hazard Pointer schema is
+        \p %cds::gc::HP class and its nested classes. Before use any HP-related class you must initialize HP
+        by contructing \p %cds::gc::HP object in beginning of your \p main().
+        See \ref cds_how_to_use "How to use" section for details how to apply SMR schema.
+    */
+    class HP
+    {
+    public:
+        /// Native guarded pointer type
+        typedef hp::hazard_ptr guarded_pointer;
+
+        /// Atomic reference
+        template <typename T> using atomic_ref = atomics::atomic<T *>;
+
+        /// Atomic marked pointer
+        template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
+
+        /// Atomic type
+        template <typename T> using atomic_type = atomics::atomic<T>;
+
+        /// Exception "Not enough Hazard Pointer"
+        typedef hp::not_enought_hazard_ptr not_enought_hazard_ptr_exception;
+
+        /// Internal statistics
+        typedef hp::stat stat;
+
+        /// Hazard Pointer guard
+        /**
+            A guard is a hazard pointer.
+            Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer.
+
+            \p %Guard object is movable but not copyable.
+
+            The guard object can be in two states:
+            - unlinked - the guard is not linked with any internal hazard pointer.
+              In this state no operation except \p link() and move assignment is supported.
+            - linked (default) - the guard allocates an internal hazard pointer and completely operable.
+
+            Due to performance reason the implementation does not check state of the guard in runtime.
+
+            @warning Move assignment transfers the guard in unlinked state, use with care.
+        */
+        class Guard
+        {
+        public:
+            /// Default ctor allocates a guard (hazard pointer) from thread-private storage
+            /**
+                @warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted.
+            */
+            Guard()
+                : guard_( hp::smr::tls()->hazards_.alloc() )
+            {}
+
+            /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
+            explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
+                : guard_( nullptr )
+            {}
+
+            /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
+            Guard( Guard&& src ) CDS_NOEXCEPT
+                : guard_( src.guard_ )
+            {
+                src.guard_ = nullptr;
+            }
+
+            /// Move assignment: the internal guards are swapped between \p src and \p this
+            /**
+                @warning \p src will become in unlinked state if \p this was unlinked on entry.
+            */
+            Guard& operator=( Guard&& src ) CDS_NOEXCEPT
+            {
+                std::swap( guard_, src.guard_ );
+                return *this;
+            }
+
+            /// Copy ctor is prohibited - the guard is not copyable
+            Guard( Guard const& ) = delete;
+
+            /// Copy assignment is prohibited
+            Guard& operator=( Guard const& ) = delete;
+
+            /// Frees the internal hazard pointer if the guard is in linked state
+            ~Guard()
+            {
+                unlink();
+            }
+
+            /// Checks if the guard object linked with any internal hazard pointer
+            bool is_linked() const
+            {
+                return guard_ != nullptr;
+            }
+
+            /// Links the guard with internal hazard pointer if the guard is in unlinked state
+            /**
+                @warning Can throw \p not_enought_hazard_ptr_exception if internal hazard pointer array is exhausted.
+            */
+            void link()
+            {
+                if ( !guard_ )
+                    guard_ = hp::smr::tls()->hazards_.alloc();
+            }
+
+            /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
+            void unlink()
+            {
+                if ( guard_ ) {
+                    hp::smr::tls()->hazards_.free( guard_ );
+                    guard_ = nullptr;
+                }
+            }
+
+            /// Protects a pointer of type \p atomic<T*>
+            /**
+                Return the value of \p toGuard
+
+                The function tries to load \p toGuard and to store it
+                to the HP slot repeatedly until the guard's value equals \p toGuard
+
+                @warning The guad object should be in linked state, otherwise the result is undefined
+            */
+            template <typename T>
+            T protect( atomics::atomic<T> const& toGuard )
+            {
+                assert( guard_ != nullptr );
+
+                T pCur = toGuard.load(atomics::memory_order_acquire);
+                T pRet;
+                do {
+                    pRet = assign( pCur );
+                    pCur = toGuard.load(atomics::memory_order_acquire);
+                } while ( pRet != pCur );
+                return pCur;
+            }
+
+            /// Protects a converted pointer of type \p atomic<T*>
+            /**
+                Return the value of \p toGuard
+
+                The function tries to load \p toGuard and to store result of \p f functor
+                to the HP slot repeatedly until the guard's value equals \p toGuard.
+
+                The function is useful for intrusive containers when \p toGuard is a node pointer
+                that should be converted to a pointer to the value before protecting.
+                The parameter \p f of type Func is a functor that makes this conversion:
+                \code
+                    struct functor {
+                        value_type * operator()( T * p );
+                    };
+                \endcode
+                Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
+
+                @warning The guad object should be in linked state, otherwise the result is undefined
+            */
+            template <typename T, class Func>
+            T protect( atomics::atomic<T> const& toGuard, Func f )
+            {
+                assert( guard_ != nullptr );
+
+                T pCur = toGuard.load(atomics::memory_order_acquire);
+                T pRet;
+                do {
+                    pRet = pCur;
+                    assign( f( pCur ));
+                    pCur = toGuard.load(atomics::memory_order_acquire);
+                } while ( pRet != pCur );
+                return pCur;
+            }
+
+            /// Store \p p to the guard
+            /**
+                The function equals to a simple assignment the value \p p to guard, no loop is performed.
+                Can be used for a pointer that cannot be changed concurrently or if the pointer is already
+                guarded by another guard.
+
+                @warning The guad object should be in linked state, otherwise the result is undefined
+            */
+            template <typename T>
+            T * assign( T* p )
+            {
+                assert( guard_ != nullptr );
+
+                guard_->set( p );
+                hp::smr::tls()->sync();
+                return p;
+            }
+
+            //@cond
+            std::nullptr_t assign( std::nullptr_t )
+            {
+                assert( guard_ != nullptr );
+
+                guard_->clear();
+                return nullptr;
+            }
+            //@endcond
+
+            /// Copy a value guarded from \p src guard to \p this guard (valid only in linked state)
+            void copy( Guard const& src )
+            {
+                assign( src.get_native());
+            }
+
+            /// Store marked pointer \p p to the guard
+            /**
+                The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
+                Can be used for a marked pointer that cannot be changed concurrently or if the marked pointer
+                is already guarded by another guard.
+
+                @warning The guard object should be in linked state, otherwise the result is undefined
+            */
+            template <typename T, int BITMASK>
+            T * assign( cds::details::marked_ptr<T, BITMASK> p )
+            {
+                return assign( p.ptr());
+            }
+
+            /// Clear value of the guard (valid only in linked state)
+            void clear()
+            {
+                assign( nullptr );
+            }
+
+            /// Get the value currently protected (valid only in linked state)
+            template <typename T>
+            T * get() const
+            {
+                assert( guard_ != nullptr );
+                return guard_->get_as<T>();
+            }
+
+            /// Get native hazard pointer stored (valid only in linked state)
+            guarded_pointer get_native() const
+            {
+                assert( guard_ != nullptr );
+                return guard_->get();
+            }
+
+            //@cond
+            hp::guard* release()
+            {
+                hp::guard* g = guard_;
+                guard_ = nullptr;
+                return g;
+            }
+
+            hp::guard*& guard_ref()
+            {
+                return guard_;
+            }
+            //@endcond
+
+        private:
+            //@cond
+            hp::guard* guard_;
+            //@endcond
+        };
+
+        /// Array of Hazard Pointer guards
+        /**
+            The class is intended for allocating an array of hazard pointer guards.
+            Template parameter \p Count defines the size of the array.
+        */
+        template <size_t Count>
+        class GuardArray
+        {
+        public:
+            /// Rebind array for other size \p Count2
+            template <size_t Count2>
+            struct rebind {
+                typedef GuardArray<Count2>  other;   ///< rebinding result
+            };
+
+            /// Array capacity
+            static CDS_CONSTEXPR const size_t c_nCapacity = Count;
+
+        public:
+            /// Default ctor allocates \p Count hazard pointers
+            GuardArray()
+            {
+                hp::smr::tls()->hazards_.alloc( guards_ );
+            }
+
+            /// Move ctor is prohibited
+            GuardArray( GuardArray&& ) = delete;
+
+            /// Move assignment is prohibited
+            GuardArray& operator=( GuardArray&& ) = delete;
+
+            /// Copy ctor is prohibited
+            GuardArray( GuardArray const& ) = delete;
+
+            /// Copy assignment is prohibited
+            GuardArray& operator=( GuardArray const& ) = delete;
+
+            /// Frees allocated hazard pointers
+            ~GuardArray()
+            {
+                hp::smr::tls()->hazards_.free( guards_ );
+            }
+
+            /// Protects a pointer of type \p atomic<T*>
+            /**
+                Return the value of \p toGuard
+
+                The function tries to load \p toGuard and to store it
+                to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
+            */
+            template <typename T>
+            T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
+            {
+                assert( nIndex < capacity());
+
+                T pRet;
+                do {
+                    pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
+                } while ( pRet != toGuard.load(atomics::memory_order_acquire));
+
+                return pRet;
+            }
+
+            /// Protects a pointer of type \p atomic<T*>
+            /**
+                Return the value of \p toGuard
+
+                The function tries to load \p toGuard and to store it
+                to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
+
+                The function is useful for intrusive containers when \p toGuard is a node pointer
+                that should be converted to a pointer to the value type before guarding.
+                The parameter \p f of type Func is a functor that makes this conversion:
+                \code
+                    struct functor {
+                        value_type * operator()( T * p );
+                    };
+                \endcode
+                Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
+            */
+            template <typename T, class Func>
+            T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
+            {
+                assert( nIndex < capacity());
+
+                T pRet;
+                do {
+                    assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
+                } while ( pRet != toGuard.load(atomics::memory_order_acquire));
+
+                return pRet;
+            }
+
+            /// Store \p to the slot \p nIndex
+            /**
+                The function equals to a simple assignment, no loop is performed.
+            */
+            template <typename T>
+            T * assign( size_t nIndex, T * p )
+            {
+                assert( nIndex < capacity() );
+
+                guards_.set( nIndex, p );
+                hp::smr::tls()->sync();
+                return p;
+            }
+
+            /// Store marked pointer \p p to the guard
+            /**
+                The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
+                Can be used for a marked pointer that cannot be changed concurrently.
+            */
+            template <typename T, int BITMASK>
+            T * assign( size_t nIndex, cds::details::marked_ptr<T, BITMASK> p )
+            {
+                return assign( nIndex, p.ptr());
+            }
+
+            /// Copy guarded value from \p src guard to slot at index \p nIndex
+            void copy( size_t nIndex, Guard const& src )
+            {
+                assign( nIndex, src.get_native());
+            }
+
+            /// Copy guarded value from slot \p nSrcIndex to the slot \p nDestIndex
+            void copy( size_t nDestIndex, size_t nSrcIndex )
+            {
+                assign( nDestIndex, get_native( nSrcIndex ));
+            }
+
+            /// Clear value of the slot \p nIndex
+            void clear( size_t nIndex )
+            {
+                guards_.clear( nIndex );
+            }
+
+            /// Get current value of slot \p nIndex
+            template <typename T>
+            T * get( size_t nIndex ) const
+            {
+                assert( nIndex < capacity() );
+                return guards_[nIndex]->template get_as<T>();
+            }
+
+            /// Get native hazard pointer stored
+            guarded_pointer get_native( size_t nIndex ) const
+            {
+                assert( nIndex < capacity());
+                return guards_[nIndex]->get();
+            }
+
+            //@cond
+            hp::guard* release( size_t nIndex ) CDS_NOEXCEPT
+            {
+                return guards_.release( nIndex );
+            }
+            //@endcond
+
+            /// Capacity of the guard array
+            static CDS_CONSTEXPR size_t capacity()
+            {
+                return c_nCapacity;
+            }
+
+        private:
+            //@cond
+            hp::guard_array<c_nCapacity> guards_;
+            //@endcond
+        };
+
+        /// Guarded pointer
+        /**
+            A guarded pointer is a pair of a pointer and GC's guard.
+            Usually, it is used for returning a pointer to an element of a lock-free container.
+            The guard prevents the pointer to be early disposed (freed) by SMR.
+            After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
+
+            Template arguments:
+            - \p GuardedType - a type which the guard stores
+            - \p ValueType - a value type
+            - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
+
+            For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
+            In such case the \p %guarded_ptr is:
+            @code
+            typedef cds::gc::HP::guarded_ptr< foo > intrusive_guarded_ptr;
+            @endcode
+
+            For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
+            For example:
+            @code
+            struct foo {
+                int const   key;
+                std::string value;
+            };
+
+            struct value_accessor {
+                std::string* operator()( foo* pFoo ) const
+                {
+                    return &(pFoo->value);
+                }
+            };
+
+            // Guarded ptr
+            typedef cds::gc::HP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
+            @endcode
+
+            You don't need use this class directly.
+            All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
+        */
+        template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
+        class guarded_ptr
+        {
+            //@cond
+            struct trivial_cast {
+                ValueType * operator()( GuardedType * p ) const
+                {
+                    return p;
+                }
+            };
+
+            template <typename GT, typename VT, typename C> friend class guarded_ptr;
+            //@endcond
+
+        public:
+            typedef GuardedType guarded_type; ///< Guarded type
+            typedef ValueType   value_type;   ///< Value type
+
+            /// Functor for casting \p guarded_type to \p value_type
+            typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
+
+        public:
+            /// Creates empty guarded pointer
+            guarded_ptr() CDS_NOEXCEPT
+                : guard_(nullptr)
+            {}
+
+            //@cond
+            explicit guarded_ptr( hp::guard* g ) CDS_NOEXCEPT
+                : guard_( g )
+            {}
+
+            /// Initializes guarded pointer with \p p
+            explicit guarded_ptr( guarded_type* p ) CDS_NOEXCEPT
+                : guard_( nullptr )
+            {
+                reset(p);
+            }
+            explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
+                : guard_( nullptr )
+            {}
+            //@endcond
+
+            /// Move ctor
+            guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
+                : guard_( gp.guard_ )
+            {
+                gp.guard_ = nullptr;
+            }
+
+            /// Move ctor
+            template <typename GT, typename VT, typename C>
+            guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
+                : guard_( gp.guard_ )
+            {
+                gp.guard_ = nullptr;
+            }
+
+            /// Ctor from \p Guard
+            explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
+                : guard_( g.release())
+            {}
+
+            /// The guarded pointer is not copy-constructible
+            guarded_ptr( guarded_ptr const& gp ) = delete;
+
+            /// Clears the guarded pointer
+            /**
+                \ref release() is called if guarded pointer is not \ref empty()
+            */
+            ~guarded_ptr() CDS_NOEXCEPT
+            {
+                release();
+            }
+
+            /// Move-assignment operator
+            guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
+            {
+                std::swap( guard_, gp.guard_ );
+                return *this;
+            }
+
+            /// Move-assignment from \p Guard
+            guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
+            {
+                std::swap( guard_, g.guard_ref());
+                return *this;
+            }
+
+            /// The guarded pointer is not copy-assignable
+            guarded_ptr& operator=(guarded_ptr const& gp) = delete;
+
+            /// Returns a pointer to guarded value
+            value_type * operator ->() const CDS_NOEXCEPT
+            {
+                assert( !empty());
+                return value_cast()( guard_->get_as<guarded_type>());
+            }
+
+            /// Returns a reference to guarded value
+            value_type& operator *() CDS_NOEXCEPT
+            {
+                assert( !empty());
+                return *value_cast()( guard_->get_as<guarded_type>());
+            }
+
+            /// Returns const reference to guarded value
+            value_type const& operator *() const CDS_NOEXCEPT
+            {
+                assert( !empty());
+                return *value_cast()( guard_->get_as<guarded_type>());
+            }
+
+            /// Checks if the guarded pointer is \p nullptr
+            bool empty() const CDS_NOEXCEPT
+            {
+                return !guard_ || guard_->get( atomics::memory_order_relaxed ) == nullptr;
+            }
+
+            /// \p bool operator returns <tt>!empty()</tt>
+            explicit operator bool() const CDS_NOEXCEPT
+            {
+                return !empty();
+            }
+
+            /// Clears guarded pointer
+            /**
+                If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
+                Dereferncing the guarded pointer after \p release() is dangerous.
+            */
+            void release() CDS_NOEXCEPT
+            {
+                free_guard();
+            }
+
+            //@cond
+            // For internal use only!!!
+            void reset(guarded_type * p) CDS_NOEXCEPT
+            {
+                alloc_guard();
+                assert( guard_ );
+                guard_->set(p);
+            }
+            //@endcond
+
+        private:
+            //@cond
+            void alloc_guard()
+            {
+                if ( !guard_ )
+                    guard_ = hp::smr::tls()->hazards_.alloc();
+            }
+
+            void free_guard()
+            {
+                if ( guard_ ) {
+                    hp::smr::tls()->hazards_.free( guard_ );
+                    guard_ = nullptr;
+                }
+            }
+            //@endcond
+
+        private:
+            //@cond
+            hp::guard* guard_;
+            //@endcond
+        };
+
+    public:
+        /// \p scan() type
+        enum class scan_type {
+            classic = hp::classic,    ///< classic scan as described in Michael's papers
+            inplace = hp::inplace     ///< inplace scan without allocation
+        };
+
+        /// Initializes %HP singleton
+        /**
+            The constructor initializes Hazard Pointer SMR singleton with passed parameters.
+            If the instance does not yet exist then the function creates the instance.
+            Otherwise it does nothing.
+
+            The Michael's %HP reclamation schema depends of three parameters:
+            - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from
+                the data structure algorithms. If \p nHazardPtrCount = 0, the defaul value 8 is used
+            - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100.
+            - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
+                <tt> nHazardPtrCount * nMaxThreadCount </tt>. Default is <tt>2 * nHazardPtrCount * nMaxThreadCount </tt>.
+        */
+        HP(
+            size_t nHazardPtrCount = 0,     ///< Hazard pointer count per thread
+            size_t nMaxThreadCount = 0,     ///< Max count of simultaneous working thread in your application
+            size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
+            scan_type nScanType = scan_type::inplace   ///< Scan type (see \p scan_type enum)
+        )
+        {
+            hp::smr::construct(
+                nHazardPtrCount,
+                nMaxThreadCount,
+                nMaxRetiredPtrCount,
+                static_cast<hp::scan_type>(nScanType)
+            );
+        }
+
+        /// Terminates GC singleton
+        /**
+            The destructor destroys %HP global object. After calling of this function you may \b NOT
+            use CDS data structures based on \p %cds::gc::HP.
+            Usually, %HP object is destroyed at the end of your \p main().
+        */
+        ~HP()
+        {
+            hp::smr::destruct( true );
+        }
+
+        /// Checks that required hazard pointer count \p nCountNeeded is less or equal then max hazard pointer count
+        /**
+            If <tt> nRequiredCount > get_hazard_ptr_count()</tt> then the exception \p not_enought_hazard_ptr is thrown
+        */
+        static void check_available_guards( size_t nCountNeeded )
+        {
+            hp::smr::check_hazard_ptr_count( nCountNeeded );
+        }
+
+        /// Set memory management functions
+        /**
+            @note This function may be called <b>BEFORE</b> creating an instance
+            of Hazard Pointer SMR
+
+            SMR object allocates some memory for thread-specific data and for
+            creating SMR object.
+            By default, a standard \p new and \p delete operators are used for this.
+        */
+        static void set_memory_allocator(
+            void* ( *alloc_func )( size_t size ),   ///< \p malloc() function
+            void( *free_func )( void * p )          ///< \p free() function
+        )
+        {
+            hp::smr::set_memory_allocator( alloc_func, free_func );
+        }
+
+        /// Returns max Hazard Pointer count
+        static size_t max_hazard_count()
+        {
+            return hp::smr::instance().get_hazard_ptr_count();
+        }
+
+        /// Returns max count of thread
+        static size_t max_thread_count()
+        {
+            return hp::smr::instance().get_max_thread_count();
+        }
+
+        /// Returns capacity of retired pointer array
+        static size_t retired_array_capacity()
+        {
+            return hp::smr::instance().get_max_retired_ptr_count();
+        }
+
+        /// Retire pointer \p p with function \p func
+        /**
+            The function places pointer \p p to array of pointers ready for removing.
+            (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
+            \p func is a disposer: when \p p can be safely removed, \p func is called.
+        */
+        template <typename T>
+        static void retire( T * p, void( *func )( T * ))
+        {
+            hp::thread_data* rec = hp::smr::tls();
+            if ( !rec->retired_.push( hp::retired_ptr( p, func )))
+                hp::smr::instance().scan( rec );
+        }
+
+        /// Retire pointer \p p with functor of type \p Disposer
+        /**
+            The function places pointer \p p to array of pointers ready for removing.
+            (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
+
+            Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
+            \code
+            template <typename T>
+            struct disposer {
+                void operator()( T * p )    ;   // disposing operator
+            };
+            \endcode
+            Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
+            - it should be stateless functor
+            - it should be default-constructible
+            - the result of functor call with argument \p p should not depend on where the functor will be called.
+
+            \par Examples:
+            Operator \p delete functor:
+            \code
+            template <typename T>
+            struct disposer {
+                void operator ()( T * p ) {
+                    delete p;
+                }
+            };
+
+            // How to call HP::retire method
+            int * p = new int;
+
+            // ... use p in lock-free manner
+
+            cds::gc::HP::retire<disposer>( p ) ;   // place p to retired pointer array of HP GC
+            \endcode
+
+            Functor based on \p std::allocator :
+            \code
+            template <typename Alloc = std::allocator<int> >
+            struct disposer {
+                template <typename T>
+                void operator()( T * p ) {
+                    typedef typename Alloc::templare rebind<T>::other alloc_t;
+                    alloc_t a;
+                    a.destroy( p );
+                    a.deallocate( p, 1 );
+                }
+            };
+            \endcode
+        */
+        template <class Disposer, typename T>
+        static void retire( T * p )
+        {
+            if ( !hp::smr::tls()->retired_.push( hp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
+                scan();
+        }
+
+        /// Get current scan strategy
+        static scan_type getScanType()
+        {
+            return static_cast<scan_type>( hp::smr::instance().get_scan_type());
+        }
+
+        /// Checks if Hazard Pointer GC is constructed and may be used
+        static bool isUsed()
+        {
+            return hp::smr::isUsed();
+        }
+
+        /// Forces SMR call for current thread
+        /**
+            Usually, this function should not be called directly.
+        */
+        static void scan()
+        {
+            hp::smr::instance().scan( hp::smr::tls());
+        }
+
+        /// Synonym for \p scan()
+        static void force_dispose()
+        {
+            scan();
+        }
+
+        /// Returns internal statistics
+        /**
+            The function clears \p st before gathering statistics.
+
+            @note Internal statistics is available only if you compile
+            \p libcds and your program with \p -DCDS_ENABLE_HPSTAT key.
+        */
+        static void statistics( stat& st )
+        {
+            hp::smr::instance().statistics( st );
+        }
+
+        /// Returns post-mortem statistics
+        /**
+            Post-mortem statistics is gathered in the \p %HP object destructor
+            and can be accessible after destructing the global \p %HP object.
+
+            @note Internal statistics is available only if you compile
+            \p libcds and your program with \p -DCDS_ENABLE_HPSTAT key.
+
+            Usage:
+            \code
+            int main()
+            {
+                cds::Initialize();
+                {
+                    // Initialize HP SMR
+                    cds::gc::HP hp;
+
+                    // deal with HP-based data structured
+                    // ...
+                }
+
+                // HP object destroyed
+                // Get total post-mortem statistics
+                cds::gc::HP::stat const& st = cds::gc::HP::postmortem_statistics();
+
+                printf( "HP statistics:\n"
+                    "\tthread count           = %llu\n"
+                    "\tguard allocated        = %llu\n"
+                    "\tguard freed            = %llu\n"
+                    "\tretired data count     = %llu\n"
+                    "\tfree data count        = %llu\n"
+                    "\tscan() call count      = %llu\n"
+                    "\thelp_scan() call count = %llu\n",
+                    st.thread_rec_count,
+                    st.guard_allocated, st.guard_freed,
+                    st.retired_count, st.free_count,
+                    st.scan_count, st.help_scan_count
+                );
+
+                cds::Terminate();
+            }
+            \endcode
+        */
+        static stat const& postmortem_statistics();
+    };
+
+}} // namespace cds::gc
+
+#endif // #ifndef CDSLIB_GC_HP_SMR_H
+
diff --git a/cds/gc/impl/dhp_decl.h b/cds/gc/impl/dhp_decl.h
deleted file mode 100644 (file)
index c87a4e0..0000000
+++ /dev/null
@@ -1,823 +0,0 @@
-/*
-    This file is a part of libcds - Concurrent Data Structures library
-
-    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
-
-    Source code repo: http://github.com/khizmax/libcds/
-    Download: http://sourceforge.net/projects/libcds/files/
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright notice, this
-      list of conditions and the following disclaimer.
-
-    * Redistributions in binary form must reproduce the above copyright notice,
-      this list of conditions and the following disclaimer in the documentation
-      and/or other materials provided with the distribution.
-
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef CDSLIB_GC_IMPL_DHP_DECL_H
-#define CDSLIB_GC_IMPL_DHP_DECL_H
-
-#include <cds/gc/details/dhp.h>
-#include <cds/details/marked_ptr.h>
-#include <cds/details/static_functor.h>
-
-namespace cds { namespace gc {
-
-    /// Dynamic Hazard Pointer garbage collector
-    /**  @ingroup cds_garbage_collector
-        @headerfile cds/gc/dhp.h
-
-        Implementation of Dynamic Hazard Pointer garbage collector.
-
-        Sources:
-            - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
-            - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
-            - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
-
-        Dynamic Hazard Pointers SMR (safe memory reclamation) provides an unbounded number of hazard pointer per thread
-        despite of classic Hazard Pointer SMR in which the count of the hazard pointef per thread is limited.
-
-        See \ref cds_how_to_use "How to use" section for details how to apply garbage collector.
-    */
-    class DHP
-    {
-    public:
-        /// Native guarded pointer type
-        /**
-            @headerfile cds/gc/dhp.h
-        */
-        typedef void * guarded_pointer;
-
-        /// Atomic reference
-        /**
-            @headerfile cds/gc/dhp.h
-        */
-        template <typename T> using atomic_ref = atomics::atomic<T *>;
-
-        /// Atomic type
-        /**
-            @headerfile cds/gc/dhp.h
-        */
-        template <typename T> using atomic_type = atomics::atomic<T>;
-
-        /// Atomic marked pointer
-        /**
-            @headerfile cds/gc/dhp.h
-        */
-        template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
-
-        /// Thread GC implementation for internal usage
-        /**
-            @headerfile cds/gc/dhp.h
-        */
-        typedef dhp::ThreadGC   thread_gc_impl;
-
-        /// Thread-level garbage collector
-        /**
-            @headerfile cds/gc/dhp.h
-            This class performs automatically attaching/detaching Dynamic Hazard Pointer GC
-            for the current thread.
-        */
-        class thread_gc: public thread_gc_impl
-        {
-            //@cond
-            bool    m_bPersistent;
-            //@endcond
-        public:
-            /// Constructor
-            /**
-                The constructor attaches the current thread to the Dynamic Hazard Pointer GC
-                if it is not yet attached.
-                The \p bPersistent parameter specifies attachment persistence:
-                - \p true - the class destructor will not detach the thread from Dynamic Hazard Pointer GC.
-                - \p false (default) - the class destructor will detach the thread from Dynamic Hazard Pointer GC.
-            */
-            thread_gc(
-                bool    bPersistent = false
-            )   ;   // inline in dhp_impl.h
-
-            /// Destructor
-            /**
-                If the object has been created in persistent mode, the destructor does nothing.
-                Otherwise it detaches the current thread from Dynamic Hazard Pointer GC.
-            */
-            ~thread_gc()    ;   // inline in dhp_impl.h
-
-        public: // for internal use only!!!
-            //@cond
-            static dhp::details::guard_data* alloc_guard(); // inline in dhp_impl.h
-            static void free_guard( dhp::details::guard_data* g ); // inline in dhp_impl.h
-            //@endcond
-        };
-
-
-        /// Dynamic Hazard Pointer guard
-        /**
-            @headerfile cds/gc/dhp.h
-
-            A guard is the hazard pointer.
-            Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
-
-            \p %Guard object is movable but not copyable.
-
-            The guard object can be in two states:
-            - unlinked - the guard is not linked with any internal hazard pointer.
-              In this state no operation except \p link() and move assignment is supported.
-            - linked (default) - the guard allocates an internal hazard pointer and fully operable.
-
-            Due to performance reason the implementation does not check state of the guard in runtime.
-
-            @warning Move assignment can transfer the guard in unlinked state, use with care.
-        */
-        class Guard
-        {
-        public:
-            /// Default ctor allocates a guard (hazard pointer) from thread-private storage
-            Guard() CDS_NOEXCEPT
-                : m_guard( thread_gc::alloc_guard())
-            {}
-
-            /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
-            explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
-                : m_guard( nullptr )
-            {}
-
-            /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
-            Guard( Guard&& src ) CDS_NOEXCEPT
-                : m_guard( src.m_guard )
-            {
-                src.m_guard = nullptr;
-            }
-
-            /// Move assignment: the internal guards are swapped between \p src and \p this
-            /**
-                @warning \p src will become in unlinked state if \p this was unlinked on entry.
-            */
-            Guard& operator=( Guard&& src ) CDS_NOEXCEPT
-            {
-                std::swap( m_guard, src.m_guard );
-                return *this;
-            }
-
-            /// Copy ctor is prohibited - the guard is not copyable
-            Guard( Guard const& ) = delete;
-
-            /// Copy assignment is prohibited
-            Guard& operator=( Guard const& ) = delete;
-
-            ~Guard()
-            {
-                if ( m_guard )
-                    thread_gc::free_guard( m_guard );
-            }
-
-            /// Checks if the guard object linked with any internal hazard pointer
-            bool is_linked() const
-            {
-                return m_guard != nullptr;
-            }
-
-            /// Links the guard with internal hazard pointer if the guard is in unlinked state
-            void link()
-            {
-                if ( !m_guard )
-                    m_guard = thread_gc::alloc_guard();
-            }
-
-            /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
-            void unlink()
-            {
-                if ( m_guard ) {
-                    thread_gc::free_guard( m_guard );
-                    m_guard = nullptr;
-                }
-            }
-
-            /// Protects a pointer of type <tt> atomic<T*> </tt>
-            /**
-                Return the value of \p toGuard
-
-                The function tries to load \p toGuard and to store it
-                to the HP slot repeatedly until the guard's value equals \p toGuard
-            */
-            template <typename T>
-            T protect( atomics::atomic<T> const& toGuard )
-            {
-                T pCur = toGuard.load(atomics::memory_order_acquire);
-                T pRet;
-                do {
-                    pRet = assign( pCur );
-                    pCur = toGuard.load(atomics::memory_order_acquire);
-                } while ( pRet != pCur );
-                return pCur;
-            }
-
-            /// Protects a converted pointer of type <tt> atomic<T*> </tt>
-            /**
-                Return the value of \p toGuard
-
-                The function tries to load \p toGuard and to store result of \p f functor
-                to the HP slot repeatedly until the guard's value equals \p toGuard.
-
-                The function is useful for intrusive containers when \p toGuard is a node pointer
-                that should be converted to a pointer to the value type before guarding.
-                The parameter \p f of type Func is a functor that makes this conversion:
-                \code
-                    struct functor {
-                        value_type * operator()( T * p );
-                    };
-                \endcode
-                Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
-            */
-            template <typename T, class Func>
-            T protect( atomics::atomic<T> const& toGuard, Func f )
-            {
-                T pCur = toGuard.load(atomics::memory_order_acquire);
-                T pRet;
-                do {
-                    pRet = pCur;
-                    assign( f( pCur ));
-                    pCur = toGuard.load(atomics::memory_order_acquire);
-                } while ( pRet != pCur );
-                return pCur;
-            }
-
-            /// Store \p p to the guard
-            /**
-                The function is just an assignment, no loop is performed.
-                Can be used for a pointer that cannot be changed concurrently
-                or for already guarded pointer.
-            */
-            template <typename T>
-            T* assign( T* p )
-            {
-                assert( m_guard != nullptr );
-                m_guard->pPost.store( p, atomics::memory_order_release );
-                return p;
-            }
-
-            //@cond
-            std::nullptr_t assign( std::nullptr_t )
-            {
-                clear();
-                return nullptr;
-            }
-            //@endcond
-
-            /// Store marked pointer \p p to the guard
-            /**
-                The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
-                Can be used for a marked pointer that cannot be changed concurrently
-                or for already guarded pointer.
-            */
-            template <typename T, int BITMASK>
-            T* assign( cds::details::marked_ptr<T, BITMASK> p )
-            {
-                return assign( p.ptr());
-            }
-
-            /// Copy from \p src guard to \p this guard
-            void copy( Guard const& src )
-            {
-                assign( src.get_native());
-            }
-
-            /// Clears value of the guard
-            void clear()
-            {
-                assert( m_guard != nullptr );
-                m_guard->pPost.store( nullptr, atomics::memory_order_release );
-            }
-
-            /// Gets the value currently protected (relaxed read)
-            template <typename T>
-            T * get() const
-            {
-                return reinterpret_cast<T *>( get_native());
-            }
-
-            /// Gets native guarded pointer stored
-            void* get_native() const
-            {
-                assert( m_guard != nullptr );
-                return m_guard->pPost.load( atomics::memory_order_acquire );
-            }
-
-            //@cond
-            dhp::details::guard_data* release()
-            {
-                dhp::details::guard_data* g = m_guard;
-                m_guard = nullptr;
-                return g;
-            }
-
-            dhp::details::guard_data*& guard_ref()
-            {
-                return m_guard;
-            }
-            //@endcond
-
-        private:
-            //@cond
-            dhp::details::guard_data* m_guard;
-            //@endcond
-        };
-
-        /// Array of Dynamic Hazard Pointer guards
-        /**
-            @headerfile cds/gc/dhp.h
-            The class is intended for allocating an array of hazard pointer guards.
-            Template parameter \p Count defines the size of the array.
-
-            A \p %GuardArray object is not copy- and move-constructible
-            and not copy- and move-assignable.
-        */
-        template <size_t Count>
-        class GuardArray
-        {
-        public:
-            /// Rebind array for other size \p OtherCount
-            template <size_t OtherCount>
-            struct rebind {
-                typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
-            };
-
-            /// Array capacity
-            static CDS_CONSTEXPR const size_t c_nCapacity = Count;
-
-        public:
-            /// Default ctor allocates \p Count hazard pointers
-            GuardArray(); // inline in dhp_impl.h
-
-            /// Move ctor is prohibited
-            GuardArray( GuardArray&& ) = delete;
-
-            /// Move assignment is prohibited
-            GuardArray& operator=( GuardArray&& ) = delete;
-
-            /// Copy ctor is prohibited
-            GuardArray( GuardArray const& ) = delete;
-
-            /// Copy assignment is prohibited
-            GuardArray& operator=( GuardArray const& ) = delete;
-
-            /// Frees allocated hazard pointers
-            ~GuardArray(); // inline in dhp_impl.h
-
-            /// Protects a pointer of type \p atomic<T*>
-            /**
-                Return the value of \p toGuard
-
-                The function tries to load \p toGuard and to store it
-                to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
-            */
-            template <typename T>
-            T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
-            {
-                T pRet;
-                do {
-                    pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
-                } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
-
-                return pRet;
-            }
-
-            /// Protects a pointer of type \p atomic<T*>
-            /**
-                Return the value of \p toGuard
-
-                The function tries to load \p toGuard and to store it
-                to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
-
-                The function is useful for intrusive containers when \p toGuard is a node pointer
-                that should be converted to a pointer to the value type before guarding.
-                The parameter \p f of type Func is a functor to make that conversion:
-                \code
-                    struct functor {
-                        value_type * operator()( T * p );
-                    };
-                \endcode
-                Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
-            */
-            template <typename T, class Func>
-            T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
-            {
-                T pRet;
-                do {
-                    assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
-                } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
-
-                return pRet;
-            }
-
-            /// Store \p p to the slot \p nIndex
-            /**
-                The function is just an assignment, no loop is performed.
-            */
-            template <typename T>
-            T * assign( size_t nIndex, T * p )
-            {
-                assert( nIndex < capacity());
-                assert( m_arr[nIndex] != nullptr );
-
-                m_arr[nIndex]->pPost.store( p, atomics::memory_order_release );
-                return p;
-            }
-
-            /// Store marked pointer \p p to the guard
-            /**
-                The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
-                Can be used for a marked pointer that cannot be changed concurrently
-                or for already guarded pointer.
-            */
-            template <typename T, int Bitmask>
-            T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
-            {
-                return assign( nIndex, p.ptr());
-            }
-
-            /// Copy guarded value from \p src guard to slot at index \p nIndex
-            void copy( size_t nIndex, Guard const& src )
-            {
-                assign( nIndex, src.get_native());
-            }
-
-            /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
-            void copy( size_t nDestIndex, size_t nSrcIndex )
-            {
-                assign( nDestIndex, get_native( nSrcIndex ));
-            }
-
-            /// Clear value of the slot \p nIndex
-            void clear( size_t nIndex )
-            {
-                assert( nIndex < capacity());
-                assert( m_arr[nIndex] != nullptr );
-
-                m_arr[nIndex]->pPost.store( nullptr, atomics::memory_order_release );
-            }
-
-            /// Get current value of slot \p nIndex
-            template <typename T>
-            T * get( size_t nIndex ) const
-            {
-                return reinterpret_cast<T *>( get_native( nIndex ));
-            }
-
-            /// Get native guarded pointer stored
-            guarded_pointer get_native( size_t nIndex ) const
-            {
-                assert( nIndex < capacity());
-                assert( m_arr[nIndex] != nullptr );
-
-                return m_arr[nIndex]->pPost.load( atomics::memory_order_acquire );
-            }
-
-            //@cond
-            dhp::details::guard_data* release( size_t nIndex ) CDS_NOEXCEPT
-            {
-                assert( nIndex < capacity());
-
-                dhp::details::guard_data* ret = m_arr[ nIndex ];
-                m_arr[nIndex] = nullptr;
-                return ret;
-            }
-            //@endcond
-
-            /// Capacity of the guard array
-            static CDS_CONSTEXPR size_t capacity()
-            {
-                return Count;
-            }
-
-        private:
-            //@cond
-            dhp::details::guard_data* m_arr[c_nCapacity];
-            //@endcond
-        };
-
-        /// Guarded pointer
-        /**
-            A guarded pointer is a pair of a pointer and GC's guard.
-            Usually, it is used for returning a pointer to the item from an lock-free container.
-            The guard prevents the pointer to be early disposed (freed) by GC.
-            After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
-
-            Template arguments:
-            - \p GuardedType - a type which the guard stores
-            - \p ValueType - a value type
-            - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
-
-            For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
-            In such case the \p %guarded_ptr is:
-            @code
-            typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
-            @endcode
-
-            For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
-            For example:
-            @code
-            struct foo {
-                int const   key;
-                std::string value;
-            };
-
-            struct value_accessor {
-                std::string* operator()( foo* pFoo ) const
-                {
-                    return &(pFoo->value);
-                }
-            };
-
-            // Guarded ptr
-            typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
-            @endcode
-
-            You don't need use this class directly.
-            All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
-        */
-        template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
-        class guarded_ptr
-        {
-            //@cond
-            struct trivial_cast {
-                ValueType * operator()( GuardedType * p ) const
-                {
-                    return p;
-                }
-            };
-
-            template <typename GT, typename VT, typename C> friend class guarded_ptr;
-            //@endcond
-
-        public:
-            typedef GuardedType guarded_type; ///< Guarded type
-            typedef ValueType   value_type;   ///< Value type
-
-            /// Functor for casting \p guarded_type to \p value_type
-            typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
-
-        public:
-            /// Creates empty guarded pointer
-            guarded_ptr() CDS_NOEXCEPT
-                : m_guard( nullptr )
-            {}
-
-            //@cond
-            explicit guarded_ptr( dhp::details::guard_data* g ) CDS_NOEXCEPT
-                : m_guard( g )
-            {}
-
-            /// Initializes guarded pointer with \p p
-            explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
-            {
-                reset( p );
-            }
-            explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
-                : m_guard( nullptr )
-            {}
-            //@endcond
-
-            /// Move ctor
-            guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
-                : m_guard( gp.m_guard )
-            {
-                gp.m_guard = nullptr;
-            }
-
-            /// Move ctor
-            template <typename GT, typename VT, typename C>
-            guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
-                : m_guard( gp.m_guard )
-            {
-                gp.m_guard = nullptr;
-            }
-
-            /// Ctor from \p Guard
-            explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
-                : m_guard( g.release())
-            {}
-
-            /// The guarded pointer is not copy-constructible
-            guarded_ptr( guarded_ptr const& gp ) = delete;
-
-            /// Clears the guarded pointer
-            /**
-                \ref release is called if guarded pointer is not \ref empty
-            */
-            ~guarded_ptr() CDS_NOEXCEPT
-            {
-                release();
-            }
-
-            /// Move-assignment operator
-            guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
-            {
-                std::swap( m_guard, gp.m_guard );
-                return *this;
-            }
-
-            /// Move-assignment from \p Guard
-            guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
-            {
-                std::swap( m_guard, g.guard_ref());
-                return *this;
-            }
-
-            /// The guarded pointer is not copy-assignable
-            guarded_ptr& operator=(guarded_ptr const& gp) = delete;
-
-            /// Returns a pointer to guarded value
-            value_type * operator ->() const CDS_NOEXCEPT
-            {
-                assert( !empty());
-                return value_cast()( reinterpret_cast<guarded_type *>(m_guard->get()));
-            }
-
-            /// Returns a reference to guarded value
-            value_type& operator *() CDS_NOEXCEPT
-            {
-                assert( !empty());
-                return *value_cast()(reinterpret_cast<guarded_type *>(m_guard->get()));
-            }
-
-            /// Returns const reference to guarded value
-            value_type const& operator *() const CDS_NOEXCEPT
-            {
-                assert( !empty());
-                return *value_cast()(reinterpret_cast<guarded_type *>(m_guard->get()));
-            }
-
-            /// Checks if the guarded pointer is \p nullptr
-            bool empty() const CDS_NOEXCEPT
-            {
-                return m_guard == nullptr || m_guard->get( atomics::memory_order_relaxed ) == nullptr;
-            }
-
-            /// \p bool operator returns <tt>!empty()</tt>
-            explicit operator bool() const CDS_NOEXCEPT
-            {
-                return !empty();
-            }
-
-            /// Clears guarded pointer
-            /**
-                If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
-                Dereferncing the guarded pointer after \p release() is dangerous.
-            */
-            void release() CDS_NOEXCEPT
-            {
-                free_guard();
-            }
-
-            //@cond
-            // For internal use only!!!
-            void reset(guarded_type * p) CDS_NOEXCEPT
-            {
-                alloc_guard();
-                assert( m_guard );
-                m_guard->set( p );
-            }
-
-            //@endcond
-
-        private:
-            //@cond
-            void alloc_guard()
-            {
-                if ( !m_guard )
-                    m_guard = thread_gc::alloc_guard();
-            }
-
-            void free_guard()
-            {
-                if ( m_guard ) {
-                    thread_gc::free_guard( m_guard );
-                    m_guard = nullptr;
-                }
-            }
-            //@endcond
-
-        private:
-            //@cond
-            dhp::details::guard_data* m_guard;
-            //@endcond
-        };
-
-    public:
-        /// Initializes %DHP memory manager singleton
-        /**
-            Constructor creates and initializes %DHP global object.
-            %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP GC. Usually,
-            it is created in the \p main() function.
-            After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
-
-            \par Parameters
-            - \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
-                the \p scan() member function would be called for freeing retired pointers.
-            - \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
-                When a thread is initialized the GC allocates local guard pool for the thread from common guard pool.
-                By perforce the local thread's guard pool is grown automatically from common pool.
-                When the thread terminated its guard pool is backed to common GC's pool.
-            - \p nEpochCount: internally, DHP memory manager uses epoch-based schema to solve
-                ABA problem for internal data. \p nEpochCount specifies the epoch count,
-                i.e. the count of simultaneously working threads that remove the elements
-                of DHP-based concurrent data structure. Default value is 16.
-        */
-        DHP(
-            size_t nLiberateThreshold = 1024
-            , size_t nInitialThreadGuardCount = 8
-            , size_t nEpochCount = 16
-        )
-        {
-            dhp::GarbageCollector::Construct( nLiberateThreshold, nInitialThreadGuardCount, nEpochCount );
-        }
-
-        /// Destroys %DHP memory manager
-        /**
-            The destructor destroys %DHP global object. After calling of this function you may \b NOT
-            use CDS data structures based on \p %cds::gc::DHP.
-            Usually, %DHP object is destroyed at the end of your \p main().
-        */
-        ~DHP()
-        {
-            dhp::GarbageCollector::Destruct();
-        }
-
-        /// Checks if count of hazard pointer is no less than \p nCountNeeded
-        /**
-            The function always returns \p true since the guard count is unlimited for
-            \p %gc::DHP garbage collector.
-        */
-        static CDS_CONSTEXPR bool check_available_guards(
-#ifdef CDS_DOXYGEN_INVOKED
-            size_t nCountNeeded,
-#else
-            size_t,
-#endif
-            bool /*bRaiseException*/ = true )
-        {
-            return true;
-        }
-
-        /// Retire pointer \p p with function \p pFunc
-        /**
-            The function places pointer \p p to array of pointers ready for removing.
-            (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it.
-            Deleting the pointer is the function \p pFunc call.
-        */
-        template <typename T>
-        static void retire( T * p, void (* pFunc)(T *))
-        {
-            dhp::GarbageCollector::instance().retirePtr( p, pFunc );
-        }
-
-        /// Retire pointer \p p with functor of type \p Disposer
-        /**
-            The function places pointer \p p to array of pointers ready for removing.
-            (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it.
-
-            See \p gc::HP::retire for \p Disposer requirements.
-        */
-        template <class Disposer, typename T>
-        static void retire( T * p )
-        {
-            retire( p, cds::details::static_functor<Disposer, T>::call );
-        }
-
-        /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
-        static bool isUsed()
-        {
-            return dhp::GarbageCollector::isUsed();
-        }
-
-        /// Forced GC cycle call for current thread
-        /**
-            Usually, this function should not be called directly.
-        */
-        static void scan()  ;   // inline in dhp_impl.h
-
-        /// Synonym for \ref scan()
-        static void force_dispose()
-        {
-            scan();
-        }
-    };
-
-}} // namespace cds::gc
-
-#endif // #ifndef CDSLIB_GC_IMPL_DHP_DECL_H
diff --git a/cds/gc/impl/dhp_impl.h b/cds/gc/impl/dhp_impl.h
deleted file mode 100644 (file)
index 843276e..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
-    This file is a part of libcds - Concurrent Data Structures library
-
-    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
-
-    Source code repo: http://github.com/khizmax/libcds/
-    Download: http://sourceforge.net/projects/libcds/files/
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright notice, this
-      list of conditions and the following disclaimer.
-
-    * Redistributions in binary form must reproduce the above copyright notice,
-      this list of conditions and the following disclaimer in the documentation
-      and/or other materials provided with the distribution.
-
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef CDSLIB_GC_IMPL_DHP_IMPL_H
-#define CDSLIB_GC_IMPL_DHP_IMPL_H
-
-#include <cds/threading/model.h>
-
-//@cond
-namespace cds { namespace gc {
-
-    namespace dhp {
-
-        static inline ThreadGC& get_thread_gc()
-        {
-            return cds::threading::getGC<DHP>();
-        }
-
-        //inline Guard::Guard()
-        //{
-        //    cds::threading::getGC<DHP>().allocGuard( *this );
-        //}
-
-        //inline Guard::~Guard()
-        //{
-        //    cds::threading::getGC<DHP>().freeGuard( *this );
-        //}
-
-        //template <size_t Count>
-        //inline GuardArray<Count>::GuardArray()
-        //{
-        //    cds::threading::getGC<DHP>().allocGuard( *this );
-        //}
-
-        //template <size_t Count>
-        //inline GuardArray<Count>::~GuardArray()
-        //{
-        //    cds::threading::getGC<DHP>().freeGuard( *this );
-        //}
-    } // namespace dhp
-
-
-    inline DHP::thread_gc::thread_gc(
-        bool    bPersistent
-        )
-        : m_bPersistent( bPersistent )
-    {
-        if ( !cds::threading::Manager::isThreadAttached())
-            cds::threading::Manager::attachThread();
-    }
-
-    inline DHP::thread_gc::~thread_gc()
-    {
-        if ( !m_bPersistent )
-            cds::threading::Manager::detachThread();
-    }
-
-    inline /*static*/ dhp::details::guard_data* DHP::thread_gc::alloc_guard()
-    {
-        return dhp::get_thread_gc().allocGuard();
-    }
-    inline /*static*/ void DHP::thread_gc::free_guard( dhp::details::guard_data* g )
-    {
-        if ( g )
-            dhp::get_thread_gc().freeGuard( g );
-    }
-
-    template <size_t Count>
-    inline DHP::GuardArray<Count>::GuardArray()
-    {
-        dhp::get_thread_gc().allocGuard( m_arr );
-    }
-
-    template <size_t Count>
-    inline DHP::GuardArray<Count>::~GuardArray()
-    {
-        dhp::get_thread_gc().freeGuard( m_arr );
-    }
-
-    inline void DHP::scan()
-    {
-        cds::threading::getGC<DHP>().scan();
-    }
-
-}} // namespace cds::gc
-//@endcond
-
-#endif // #ifndef CDSLIB_GC_IMPL_DHP_IMPL_H
diff --git a/cds/gc/impl/hp_decl.h b/cds/gc/impl/hp_decl.h
deleted file mode 100644 (file)
index 82d21e3..0000000
+++ /dev/null
@@ -1,883 +0,0 @@
-/*
-    This file is a part of libcds - Concurrent Data Structures library
-
-    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
-
-    Source code repo: http://github.com/khizmax/libcds/
-    Download: http://sourceforge.net/projects/libcds/files/
-
-    Redistribution and use in source and binary forms, with or without
-    modification, are permitted provided that the following conditions are met:
-
-    * Redistributions of source code must retain the above copyright notice, this
-      list of conditions and the following disclaimer.
-
-    * Redistributions in binary form must reproduce the above copyright notice,
-      this list of conditions and the following disclaimer in the documentation
-      and/or other materials provided with the distribution.
-
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef CDSLIB_GC_IMPL_HP_DECL_H
-#define CDSLIB_GC_IMPL_HP_DECL_H
-
-#include <stdexcept>    // overflow_error
-#include <cds/gc/details/hp.h>
-#include <cds/details/marked_ptr.h>
-
-namespace cds { namespace gc {
-    /// @defgroup cds_garbage_collector Garbage collectors
-
-    /// Hazard Pointer garbage collector
-    /**  @ingroup cds_garbage_collector
-        @headerfile cds/gc/hp.h
-
-        Implementation of classic Hazard Pointer garbage collector.
-
-        Sources:
-            - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
-            - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
-            - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
-
-        Hazard Pointer garbage collector is a singleton. The main user-level part of Hazard Pointer schema is
-        GC class \p %cds::gc::HP and its nested classes. Before use any HP-related class you must initialize HP garbage collector
-        by contructing \p %cds::gc::HP object in beginning of your \p main().
-        See \ref cds_how_to_use "How to use" section for details how to apply garbage collector.
-    */
-    class HP
-    {
-    public:
-        /// Native guarded pointer type
-        /**
-            @headerfile cds/gc/hp.h
-        */
-        typedef gc::hp::hazard_pointer guarded_pointer;
-
-        /// Atomic reference
-        /**
-            @headerfile cds/gc/hp.h
-        */
-        template <typename T> using atomic_ref = atomics::atomic<T *>;
-
-        /// Atomic marked pointer
-        /**
-            @headerfile cds/gc/hp.h
-        */
-        template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
-
-        /// Atomic type
-        /**
-            @headerfile cds/gc/hp.h
-        */
-        template <typename T> using atomic_type = atomics::atomic<T>;
-
-        /// Thread GC implementation for internal usage
-        /**
-            @headerfile cds/gc/hp.h
-        */
-        typedef hp::ThreadGC   thread_gc_impl;
-
-        /// Exception "Too many Hazard Pointer"
-        typedef hp::GarbageCollector::too_many_hazard_ptr too_many_hazard_ptr_exception;
-
-        /// Wrapper for hp::ThreadGC class
-        /**
-            @headerfile cds/gc/hp.h
-            This class performs automatically attaching/detaching Hazard Pointer GC
-            for the current thread.
-        */
-        class thread_gc: public thread_gc_impl
-        {
-            //@cond
-            bool    m_bPersistent;
-            //@endcond
-        public:
-
-            /// Constructor
-            /**
-                The constructor attaches the current thread to the Hazard Pointer GC
-                if it is not yet attached.
-                The \p bPersistent parameter specifies attachment persistence:
-                - \p true - the class destructor will not detach the thread from Hazard Pointer GC.
-                - \p false (default) - the class destructor will detach the thread from Hazard Pointer GC.
-            */
-            thread_gc(
-                bool    bPersistent = false
-            ) ;     //inline in hp_impl.h
-
-            /// Destructor
-            /**
-                If the object has been created in persistent mode, the destructor does nothing.
-                Otherwise it detaches the current thread from Hazard Pointer GC.
-            */
-            ~thread_gc() ;  // inline in hp_impl.h
-
-        public: // for internal use only!!!
-            //@cond
-            static cds::gc::hp::details::hp_guard* alloc_guard(); // inline in hp_impl.h
-            static void free_guard( cds::gc::hp::details::hp_guard* g ); // inline in hp_impl.h
-            //@endcond
-        };
-
-        /// Hazard Pointer guard
-        /**
-            @headerfile cds/gc/hp.h
-
-            A guard is a hazard pointer.
-            Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer.
-
-            \p %Guard object is movable but not copyable.
-
-            The guard object can be in two states:
-            - unlinked - the guard is not linked with any internal hazard pointer.
-              In this state no operation except \p link() and move assignment is supported.
-            - linked (default) - the guard allocates an internal hazard pointer and fully operable.
-
-            Due to performance reason the implementation does not check state of the guard in runtime.
-
-            @warning Move assignment can transfer the guard in unlinked state, use with care.
-        */
-        class Guard
-        {
-        public:
-            /// Default ctor allocates a guard (hazard pointer) from thread-private storage
-            /**
-                @warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted.
-            */
-            Guard();  // inline in hp_impl.h
-
-            /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
-            explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
-                : m_guard( nullptr )
-            {}
-
-            /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
-            Guard( Guard&& src ) CDS_NOEXCEPT
-                : m_guard( src.m_guard )
-            {
-                src.m_guard = nullptr;
-            }
-
-            /// Move assignment: the internal guards are swapped between \p src and \p this
-            /**
-                @warning \p src will become in unlinked state if \p this was unlinked on entry.
-            */
-            Guard& operator=( Guard&& src ) CDS_NOEXCEPT
-            {
-                std::swap( m_guard, src.m_guard );
-                return *this;
-            }
-
-            /// Copy ctor is prohibited - the guard is not copyable
-            Guard( Guard const& ) = delete;
-
-            /// Copy assignment is prohibited
-            Guard& operator=( Guard const& ) = delete;
-
-            /// Frees the internal hazard pointer if the guard is in linked state
-            ~Guard()
-            {
-                unlink();
-            }
-
-            /// Checks if the guard object linked with any internal hazard pointer
-            bool is_linked() const
-            {
-                return m_guard != nullptr;
-            }
-
-            /// Links the guard with internal hazard pointer if the guard is in unlinked state
-            /**
-                @warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted.
-            */
-            void link(); // inline in hp_impl.h
-
-            /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
-            void unlink(); // inline in hp_impl.h
-
-            /// Protects a pointer of type \p atomic<T*>
-            /**
-                Return the value of \p toGuard
-
-                The function tries to load \p toGuard and to store it
-                to the HP slot repeatedly until the guard's value equals \p toGuard
-
-                @warning The guad object should be in linked state, otherwise the result is undefined
-            */
-            template <typename T>
-            T protect( atomics::atomic<T> const& toGuard )
-            {
-                assert( m_guard != nullptr );
-
-                T pCur = toGuard.load(atomics::memory_order_acquire);
-                T pRet;
-                do {
-                    pRet = assign( pCur );
-                    pCur = toGuard.load(atomics::memory_order_acquire);
-                } while ( pRet != pCur );
-                return pCur;
-            }
-
-            /// Protects a converted pointer of type \p atomic<T*>
-            /**
-                Return the value of \p toGuard
-
-                The function tries to load \p toGuard and to store result of \p f functor
-                to the HP slot repeatedly until the guard's value equals \p toGuard.
-
-                The function is useful for intrusive containers when \p toGuard is a node pointer
-                that should be converted to a pointer to the value before protecting.
-                The parameter \p f of type Func is a functor that makes this conversion:
-                \code
-                    struct functor {
-                        value_type * operator()( T * p );
-                    };
-                \endcode
-                Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
-
-                @warning The guad object should be in linked state, otherwise the result is undefined
-            */
-            template <typename T, class Func>
-            T protect( atomics::atomic<T> const& toGuard, Func f )
-            {
-                assert( m_guard != nullptr );
-
-                T pCur = toGuard.load(atomics::memory_order_acquire);
-                T pRet;
-                do {
-                    pRet = pCur;
-                    assign( f( pCur ));
-                    pCur = toGuard.load(atomics::memory_order_acquire);
-                } while ( pRet != pCur );
-                return pCur;
-            }
-
-            /// Store \p p to the guard
-            /**
-                The function equals to a simple assignment the value \p p to guard, no loop is performed.
-                Can be used for a pointer that cannot be changed concurrently
-
-                @warning The guad object should be in linked state, otherwise the result is undefined
-            */
-            template <typename T>
-            T * assign( T* p );    // inline in hp_impl.h
-
-            //@cond
-            std::nullptr_t assign( std::nullptr_t )
-            {
-                assert(m_guard != nullptr );
-                return *m_guard = nullptr;
-            }
-            //@endcond
-
-            /// Copy a value guarded from \p src guard to \p this guard (valid only in linked state)
-            void copy( Guard const& src )
-            {
-                assign( src.get_native());
-            }
-
-            /// Store marked pointer \p p to the guard
-            /**
-                The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
-                Can be used for a marked pointer that cannot be changed concurrently.
-
-                @warning The guad object should be in linked state, otherwise the result is undefined
-            */
-            template <typename T, int BITMASK>
-            T * assign( cds::details::marked_ptr<T, BITMASK> p )
-            {
-                return assign( p.ptr());
-            }
-
-            /// Clear value of the guard (valid only in linked state)
-            void clear()
-            {
-                assign( nullptr );
-            }
-
-            /// Get the value currently protected (valid only in linked state)
-            template <typename T>
-            T * get() const
-            {
-                return reinterpret_cast<T *>( get_native());
-            }
-
-            /// Get native hazard pointer stored (valid only in linked state)
-            guarded_pointer get_native() const
-            {
-                assert( m_guard != nullptr );
-                return m_guard->get();
-            }
-
-            //@cond
-            hp::details::hp_guard* release()
-            {
-                hp::details::hp_guard* g = m_guard;
-                m_guard = nullptr;
-                return g;
-            }
-
-            hp::details::hp_guard*& guard_ref()
-            {
-                return m_guard;
-            }
-            //@endcond
-
-        private:
-            //@cond
-            hp::details::hp_guard* m_guard;
-            //@endcond
-        };
-
-        /// Array of Hazard Pointer guards
-        /**
-            @headerfile cds/gc/hp.h
-            The class is intended for allocating an array of hazard pointer guards.
-            Template parameter \p Count defines the size of the array.
-
-        */
-        template <size_t Count>
-        class GuardArray
-        {
-        public:
-            /// Rebind array for other size \p Count2
-            template <size_t Count2>
-            struct rebind {
-                typedef GuardArray<Count2>  other;   ///< rebinding result
-            };
-
-            /// Array capacity
-            static CDS_CONSTEXPR const size_t c_nCapacity = Count;
-
-        public:
-            /// Default ctor allocates \p Count hazard pointers
-            GuardArray(); // inline in hp_impl.h
-
-            /// Move ctor is prohibited
-            GuardArray( GuardArray&& ) = delete;
-
-            /// Move assignment is prohibited
-            GuardArray& operator=( GuardArray&& ) = delete;
-
-            /// Copy ctor is prohibited
-            GuardArray( GuardArray const& ) = delete;
-
-            /// Copy assignment is prohibited
-            GuardArray& operator=( GuardArray const& ) = delete;
-
-            /// Frees allocated hazard pointers
-            ~GuardArray(); // inline in hp_impl.h
-
-            /// Protects a pointer of type \p atomic<T*>
-            /**
-                Return the value of \p toGuard
-
-                The function tries to load \p toGuard and to store it
-                to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
-            */
-            template <typename T>
-            T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
-            {
-                assert( nIndex < capacity());
-
-                T pRet;
-                do {
-                    pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
-                } while ( pRet != toGuard.load(atomics::memory_order_acquire));
-
-                return pRet;
-            }
-
-            /// Protects a pointer of type \p atomic<T*>
-            /**
-                Return the value of \p toGuard
-
-                The function tries to load \p toGuard and to store it
-                to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
-
-                The function is useful for intrusive containers when \p toGuard is a node pointer
-                that should be converted to a pointer to the value type before guarding.
-                The parameter \p f of type Func is a functor that makes this conversion:
-                \code
-                    struct functor {
-                        value_type * operator()( T * p );
-                    };
-                \endcode
-                Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
-            */
-            template <typename T, class Func>
-            T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
-            {
-                assert( nIndex < capacity());
-
-                T pRet;
-                do {
-                    assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
-                } while ( pRet != toGuard.load(atomics::memory_order_acquire));
-
-                return pRet;
-            }
-
-            /// Store \p to the slot \p nIndex
-            /**
-                The function equals to a simple assignment, no loop is performed.
-            */
-            template <typename T>
-            T * assign( size_t nIndex, T * p ); // inline in hp_impl.h
-
-            /// Store marked pointer \p p to the guard
-            /**
-                The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
-                Can be used for a marked pointer that cannot be changed concurrently.
-            */
-            template <typename T, int BITMASK>
-            T * assign( size_t nIndex, cds::details::marked_ptr<T, BITMASK> p )
-            {
-                return assign( nIndex, p.ptr());
-            }
-
-            /// Copy guarded value from \p src guard to slot at index \p nIndex
-            void copy( size_t nIndex, Guard const& src )
-            {
-                assign( nIndex, src.get_native());
-            }
-
-            /// Copy guarded value from slot \p nSrcIndex to the slot \p nDestIndex
-            void copy( size_t nDestIndex, size_t nSrcIndex )
-            {
-                assign( nDestIndex, get_native( nSrcIndex ));
-            }
-
-            /// Clear value of the slot \p nIndex
-            void clear( size_t nIndex )
-            {
-                m_arr.clear( nIndex );
-            }
-
-            /// Get current value of slot \p nIndex
-            template <typename T>
-            T * get( size_t nIndex ) const
-            {
-                return reinterpret_cast<T *>( get_native( nIndex ));
-            }
-
-            /// Get native hazard pointer stored
-            guarded_pointer get_native( size_t nIndex ) const
-            {
-                assert( nIndex < capacity());
-                return m_arr[nIndex]->get();
-            }
-
-            //@cond
-            hp::details::hp_guard* release( size_t nIndex ) CDS_NOEXCEPT
-            {
-                return m_arr.release( nIndex );
-            }
-            //@endcond
-
-            /// Capacity of the guard array
-            static CDS_CONSTEXPR size_t capacity()
-            {
-                return c_nCapacity;
-            }
-
-        private:
-            //@cond
-            hp::details::hp_array<Count> m_arr;
-            //@endcond
-        };
-
-        /// Guarded pointer
-        /**
-            A guarded pointer is a pair of a pointer and GC's guard.
-            Usually, it is used for returning a pointer to the item from an lock-free container.
-            The guard prevents the pointer to be early disposed (freed) by GC.
-            After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
-
-            Template arguments:
-            - \p GuardedType - a type which the guard stores
-            - \p ValueType - a value type
-            - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
-
-            For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
-            In such case the \p %guarded_ptr is:
-            @code
-            typedef cds::gc::HP::guarded_ptr< foo > intrusive_guarded_ptr;
-            @endcode
-
-            For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
-            For example:
-            @code
-            struct foo {
-                int const   key;
-                std::string value;
-            };
-
-            struct value_accessor {
-                std::string* operator()( foo* pFoo ) const
-                {
-                    return &(pFoo->value);
-                }
-            };
-
-            // Guarded ptr
-            typedef cds::gc::HP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
-            @endcode
-
-            You don't need use this class directly.
-            All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
-        */
-        template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
-        class guarded_ptr
-        {
-            //@cond
-            struct trivial_cast {
-                ValueType * operator()( GuardedType * p ) const
-                {
-                    return p;
-                }
-            };
-
-            template <typename GT, typename VT, typename C> friend class guarded_ptr;
-            //@endcond
-
-        public:
-            typedef GuardedType guarded_type; ///< Guarded type
-            typedef ValueType   value_type;   ///< Value type
-
-            /// Functor for casting \p guarded_type to \p value_type
-            typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
-
-        public:
-            /// Creates empty guarded pointer
-            guarded_ptr() CDS_NOEXCEPT
-                : m_pGuard(nullptr)
-            {}
-
-            //@cond
-            explicit guarded_ptr( hp::details::hp_guard* g ) CDS_NOEXCEPT
-                : m_pGuard( g )
-            {}
-
-            /// Initializes guarded pointer with \p p
-            explicit guarded_ptr( guarded_type* p ) CDS_NOEXCEPT
-                : m_pGuard( nullptr )
-            {
-                reset(p);
-            }
-            explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
-                : m_pGuard( nullptr )
-            {}
-            //@endcond
-
-            /// Move ctor
-            guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
-                : m_pGuard( gp.m_pGuard )
-            {
-                gp.m_pGuard = nullptr;
-            }
-
-            /// Move ctor
-            template <typename GT, typename VT, typename C>
-            guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
-                : m_pGuard( gp.m_pGuard )
-            {
-                gp.m_pGuard = nullptr;
-            }
-
-            /// Ctor from \p Guard
-            explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
-                : m_pGuard( g.release())
-            {}
-
-            /// The guarded pointer is not copy-constructible
-            guarded_ptr( guarded_ptr const& gp ) = delete;
-
-            /// Clears the guarded pointer
-            /**
-                \ref release() is called if guarded pointer is not \ref empty()
-            */
-            ~guarded_ptr() CDS_NOEXCEPT
-            {
-                release();
-            }
-
-            /// Move-assignment operator
-            guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
-            {
-                std::swap( m_pGuard, gp.m_pGuard );
-                return *this;
-            }
-
-            /// Move-assignment from \p Guard
-            guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
-            {
-                std::swap( m_pGuard, g.guard_ref());
-                return *this;
-            }
-
-            /// The guarded pointer is not copy-assignable
-            guarded_ptr& operator=(guarded_ptr const& gp) = delete;
-
-            /// Returns a pointer to guarded value
-            value_type * operator ->() const CDS_NOEXCEPT
-            {
-                assert( !empty());
-                return value_cast()( reinterpret_cast<guarded_type *>(m_pGuard->get()));
-            }
-
-            /// Returns a reference to guarded value
-            value_type& operator *() CDS_NOEXCEPT
-            {
-                assert( !empty());
-                return *value_cast()(reinterpret_cast<guarded_type *>(m_pGuard->get()));
-            }
-
-            /// Returns const reference to guarded value
-            value_type const& operator *() const CDS_NOEXCEPT
-            {
-                assert( !empty());
-                return *value_cast()(reinterpret_cast<guarded_type *>(m_pGuard->get()));
-            }
-
-            /// Checks if the guarded pointer is \p nullptr
-            bool empty() const CDS_NOEXCEPT
-            {
-                return !m_pGuard || m_pGuard->get( atomics::memory_order_relaxed ) == nullptr;
-            }
-
-            /// \p bool operator returns <tt>!empty()</tt>
-            explicit operator bool() const CDS_NOEXCEPT
-            {
-                return !empty();
-            }
-
-            /// Clears guarded pointer
-            /**
-                If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
-                Dereferncing the guarded pointer after \p release() is dangerous.
-            */
-            void release() CDS_NOEXCEPT
-            {
-                free_guard();
-            }
-
-            //@cond
-            // For internal use only!!!
-            void reset(guarded_type * p) CDS_NOEXCEPT
-            {
-                alloc_guard();
-                assert( m_pGuard );
-                m_pGuard->set(p);
-            }
-            //@endcond
-
-        private:
-            //@cond
-            void alloc_guard()
-            {
-                if ( !m_pGuard )
-                    m_pGuard = thread_gc::alloc_guard();
-            }
-
-            void free_guard()
-            {
-                if ( m_pGuard ) {
-                    thread_gc::free_guard( m_pGuard );
-                    m_pGuard = nullptr;
-                }
-            }
-            //@endcond
-
-        private:
-            //@cond
-            hp::details::hp_guard* m_pGuard;
-            //@endcond
-        };
-
-    public:
-        /// \p scan() type
-        enum class scan_type {
-            classic = hp::classic,    ///< classic scan as described in Michael's papers
-            inplace = hp::inplace     ///< inplace scan without allocation
-        };
-        /// Initializes %HP singleton
-        /**
-            The constructor initializes GC singleton with passed parameters.
-            If GC instance is not exist then the function creates the instance.
-            Otherwise it does nothing.
-
-            The Michael's %HP reclamation schema depends of three parameters:
-            - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from
-                the data structure algorithms. By default, if \p nHazardPtrCount = 0, the function
-                uses maximum of the hazard pointer count for CDS library.
-            - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100.
-            - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
-                <tt> nHazardPtrCount * nMaxThreadCount </tt>. Default is <tt>2 * nHazardPtrCount * nMaxThreadCount </tt>.
-        */
-        HP(
-            size_t nHazardPtrCount = 0,     ///< Hazard pointer count per thread
-            size_t nMaxThreadCount = 0,     ///< Max count of simultaneous working thread in your application