2cf4645e87f39bf9d2e7f9da65c8ab0bbca6d39d
[libcds.git] / cds / gc / dhp.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
33
34 #include <exception>
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/details/lib.h>
37 #include <cds/threading/model.h>
38 #include <cds/intrusive/free_list_selector.h>
39 #include <cds/details/throw_exception.h>
40 #include <cds/details/static_functor.h>
41 #include <cds/details/marked_ptr.h>
42 #include <cds/user_setup/cache_line.h>
43
44 namespace cds { namespace gc {
45
46     /// Dynamic (adaptive) Hazard Pointer implementation details
47     namespace dhp {
48         using namespace cds::gc::hp::common;
49
50         /// Exception "Dynamic Hazard Pointer SMR is not initialized"
51         class not_initialized: public std::runtime_error
52         {
53         public:
54             //@cond
55             not_initialized()
56                 : std::runtime_error( "Global DHP SMR object is not initialized" )
57             {}
58             //@endcond
59         };
60
61         //@cond
62         struct guard_block: public cds::intrusive::FreeListImpl::node
63         {
64             guard_block*    next_;  // next block in the thread list
65
66             guard_block()
67                 : next_( nullptr )
68             {}
69
70             guard* first()
71             {
72                 return reinterpret_cast<guard*>( this + 1 );
73             }
74         };
75         //@endcond
76
77         //@cond
78         /// \p guard_block allocator (global object)
79         class hp_allocator
80         {
81             friend class smr;
82         public:
83             static hp_allocator& instance();
84
85             CDS_EXPORT_API guard_block* alloc();
86             void free( guard_block* block )
87             {
88                 free_list_.put( block );
89             }
90
91         private:
92             hp_allocator()
93 #ifdef CDS_ENABLE_HPSTAT
94                 : block_allocated_(0)
95 #endif
96             {}
97             CDS_EXPORT_API ~hp_allocator();
98
99         private:
100             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
101 #ifdef CDS_ENABLE_HPSTAT
102         public:
103             atomics::atomic<size_t>         block_allocated_;   ///< count of allocated blocks
104 #endif
105         };
106         //@endcond
107
108         //@cond
109         /// Per-thread hazard pointer storage
110         class thread_hp_storage 
111         {
112             friend class smr;
113         public:
114             thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
115                 : free_head_( arr )
116                 , extended_list_( nullptr )
117                 , array_( arr )
118                 , initial_capacity_( nSize )
119 #       ifdef CDS_ENABLE_HPSTAT
120                 , alloc_guard_count_( 0 )
121                 , free_guard_count_( 0 )
122                 , extend_call_count_( 0 )
123 #       endif
124             {
125                 // Initialize guards
126                 new( arr ) guard[nSize];
127             }
128
129             thread_hp_storage() = delete;
130             thread_hp_storage( thread_hp_storage const& ) = delete;
131             thread_hp_storage( thread_hp_storage&& ) = delete;
132
133             ~thread_hp_storage()
134             {
135                 clear();
136             }
137
138             guard* alloc()
139             {
140                 if ( cds_unlikely( free_head_ == nullptr )) {
141                     extend();
142                     assert( free_head_ != nullptr );
143                 }
144
145                 guard* g = free_head_;
146                 free_head_ = g->next_;
147                 CDS_HPSTAT( ++alloc_guard_count_ );
148                 return g;
149             }
150
151             void free( guard* g ) CDS_NOEXCEPT
152             {
153                 if ( g ) {
154                     g->clear();
155                     g->next_ = free_head_;
156                     free_head_ = g;
157                     CDS_HPSTAT( ++free_guard_count_ );
158                 }
159             }
160
161             template< size_t Capacity>
162             size_t alloc( guard_array<Capacity>& arr )
163             {
164                 for ( size_t i = 0; i < Capacity; ++i ) {
165                     if ( cds_unlikely( free_head_ == nullptr ))
166                         extend();
167                     arr.reset( i, free_head_ );
168                     free_head_ = free_head_->next_;
169                 }
170                 CDS_HPSTAT( alloc_guard_count_ += Capacity );
171                 return Capacity;
172             }
173
174             template <size_t Capacity>
175             void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
176             {
177                 guard* gList = free_head_;
178                 for ( size_t i = 0; i < Capacity; ++i ) {
179                     guard* g = arr[i];
180                     if ( g ) {
181                         g->clear();
182                         g->next_ = gList;
183                         gList = g;
184                         CDS_HPSTAT( ++free_guard_count_ );
185                     }
186                 }
187                 free_head_ = gList;
188             }
189
190             void clear()
191             {
192                 // clear array_
193                 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
194                     cur->clear();
195
196                 // free all extended blocks
197                 hp_allocator& alloc = hp_allocator::instance();
198                 for ( guard_block* p = extended_list_; p; ) {
199                     guard_block* next = p->next_;
200                     alloc.free( p );
201                     p = next;
202                 }
203
204                 extended_list_ = nullptr;
205             }
206
207             void init()
208             {
209                 assert( extended_list_ == nullptr );
210
211                 guard* p = array_;
212                 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
213                     p->next_ = p + 1;
214                 p->next_ = nullptr;
215                 free_head_ = array_;
216             }
217
218         private:
219             void extend()
220             {
221                 assert( free_head_ == nullptr );
222
223                 guard_block* block = hp_allocator::instance().alloc();
224                 block->next_ = extended_list_;
225                 extended_list_ = block;
226                 free_head_ = block->first();
227                 CDS_HPSTAT( ++extend_call_count_ );
228             }
229
230         private:
231             guard*          free_head_;        ///< Head of free guard list
232             guard_block*    extended_list_;    ///< Head of extended guard blocks allocated for the thread
233             guard* const    array_;            ///< initial HP array
234             size_t const    initial_capacity_; ///< Capacity of \p array_
235 #       ifdef CDS_ENABLE_HPSTAT
236         public:
237             size_t          alloc_guard_count_;
238             size_t          free_guard_count_;
239             size_t          extend_call_count_;
240 #       endif
241         };
242         //@endcond
243
244         //@cond
245         struct retired_block: public cds::intrusive::FreeListImpl::node
246         {
247             retired_block*  next_;  ///< Next block in thread-private retired array
248
249             static size_t const c_capacity = 256;
250
251             retired_block()
252                 : next_( nullptr )
253             {}
254
255             retired_ptr* first() const
256             {
257                 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
258             }
259
260             retired_ptr* last() const
261             {
262                 return first() + c_capacity;
263             }
264         };
265         //@endcond
266
267         //@cond
268         class retired_allocator
269         {
270             friend class smr;
271         public:
272             static retired_allocator& instance();
273
274             CDS_EXPORT_API retired_block* alloc();
275             void free( retired_block* block )
276             {
277                 block->next_ = nullptr;
278                 free_list_.put( block );
279             }
280
281         private:
282             retired_allocator()
283 #ifdef CDS_ENABLE_HPSTAT
284                 : block_allocated_(0)
285 #endif
286             {}
287             CDS_EXPORT_API ~retired_allocator();
288
289         private:
290             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
291 #ifdef CDS_ENABLE_HPSTAT
292         public:
293             atomics::atomic<size_t> block_allocated_; ///< Count of allocated blocks
294 #endif
295         };
296         //@endcond
297
298         //@cond
299         /// Per-thread retired array
300         class retired_array
301         {
302             friend class smr;
303         public:
304             retired_array() CDS_NOEXCEPT
305                 : current_block_( nullptr )
306                 , current_cell_( nullptr )
307                 , list_head_( nullptr )
308                 , list_tail_( nullptr )
309                 , block_count_(0)
310 #       ifdef CDS_ENABLE_HPSTAT
311                 , retire_call_count_( 0 )
312                 , extend_call_count_( 0 )
313 #       endif
314             {}
315
316             retired_array( retired_array const& ) = delete;
317             retired_array( retired_array&& ) = delete;
318
319             ~retired_array()
320             {
321                 assert( empty());
322                 fini();
323             }
324
325             bool push( retired_ptr const& p ) CDS_NOEXCEPT
326             {
327                 assert( current_block_ != nullptr );
328                 assert( current_block_->first() <= current_cell_ );
329                 assert( current_cell_ < current_block_->last() );
330                 //assert( &p != current_cell_ );
331
332                 *current_cell_ = p;
333                 CDS_HPSTAT( ++retire_call_count_ );
334
335                 if ( ++current_cell_ == current_block_->last() ) {
336                     // goto next block if exists
337                     if ( current_block_->next_ ) {
338                         current_block_ = current_block_->next_;
339                         current_cell_ = current_block_->first();
340                         return true;
341                     }
342
343                     // no free block
344                     // smr::scan() extend retired_array if needed
345                     return false;
346                 }
347
348                 return true;
349             }
350
351             bool repush( retired_ptr* p ) CDS_NOEXCEPT
352             {                
353                 bool ret = push( *p );
354                 CDS_HPSTAT( --retire_call_count_ );
355                 assert( ret );
356                 return ret;
357             }
358
359         private: // called by smr
360             void init()
361             {
362                 if ( list_head_ == nullptr ) {
363                     retired_block* block = retired_allocator::instance().alloc();
364                     assert( block->next_ == nullptr );
365
366                     current_block_ =
367                         list_head_ =
368                         list_tail_ = block;
369                     current_cell_ = block->first();
370
371                     block_count_ = 1;
372                 }
373             }
374
375             void fini()
376             {
377                 retired_allocator& alloc = retired_allocator::instance();
378                 for ( retired_block* p = list_head_; p; ) {
379                     retired_block* next = p->next_;
380                     alloc.free( p );
381                     p = next;
382                 }
383
384                 current_block_ =
385                     list_head_ =
386                     list_tail_ = nullptr;
387                 current_cell_ = nullptr;
388
389                 block_count_ = 0;
390             }
391
392             void extend()
393             {
394                 assert( list_head_ != nullptr );
395                 assert( current_block_ == list_tail_ );
396                 assert( current_cell_ == current_block_->last() );
397
398                 retired_block* block = retired_allocator::instance().alloc();
399                 assert( block->next_ == nullptr );
400
401                 list_tail_ = list_tail_->next_ = block;
402                 current_cell_ = block->first();
403                 ++block_count_;
404                 CDS_HPSTAT( ++extend_call_count_ );
405             }
406
407             bool empty() const
408             {
409                 return current_block_ == nullptr
410                     || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
411             }
412
413         private:
414             retired_block*          current_block_;
415             retired_ptr*            current_cell_;  // in current_block_
416
417             retired_block*          list_head_;
418             retired_block*          list_tail_;
419             size_t                  block_count_;
420 #       ifdef CDS_ENABLE_HPSTAT
421         public:
422             size_t  retire_call_count_;
423             size_t  extend_call_count_;
424 #       endif
425         };
426         //@endcond
427
428         /// Internal statistics
429         struct stat {
430             size_t  guard_allocated;    ///< Count of allocated HP guards
431             size_t  guard_freed;        ///< Count of freed HP guards
432             size_t  retired_count;      ///< Count of retired pointers
433             size_t  free_count;         ///< Count of free pointers
434             size_t  scan_count;         ///< Count of \p scan() call
435             size_t  help_scan_count;    ///< Count of \p help_scan() call
436
437             size_t  thread_rec_count;   ///< Count of thread records
438
439             size_t  hp_block_count;         ///< Count of extended HP blocks allocated
440             size_t  retired_block_count;    ///< Count of retired blocks allocated
441             size_t  hp_extend_count;        ///< Count of hp array \p extend() call
442             size_t  retired_extend_count;   ///< Count of retired array \p extend() call
443
444                                         /// Default ctor
445             stat()
446             {
447                 clear();
448             }
449
450             /// Clears all counters
451             void clear()
452             {
453                 guard_allocated =
454                     guard_freed =
455                     retired_count =
456                     free_count =
457                     scan_count =
458                     help_scan_count =
459                     thread_rec_count = 
460                     hp_block_count = 
461                     retired_block_count = 
462                     hp_extend_count = 
463                     retired_extend_count = 0;
464             }
465         };
466
467         //@cond
468         /// Per-thread data
469         struct thread_data {
470             thread_hp_storage   hazards_;   ///< Hazard pointers private to the thread
471             retired_array       retired_;   ///< Retired data private to the thread
472
473             char pad1_[cds::c_nCacheLineSize];
474             atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
475             char pad2_[cds::c_nCacheLineSize];
476
477 #       ifdef CDS_ENABLE_HPSTAT
478             size_t              free_call_count_;
479             size_t              scan_call_count_;
480             size_t              help_scan_call_count_;
481 #       endif
482
483             // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
484             // cppcheck-suppress uninitMemberVar
485             thread_data( guard* guards, size_t guard_count )
486                 : hazards_( guards, guard_count )
487                 , sync_( 0 )
488 #       ifdef CDS_ENABLE_HPSTAT
489                 , free_call_count_(0)
490                 , scan_call_count_(0)
491                 , help_scan_call_count_(0)
492 #       endif
493             {}
494
495             thread_data() = delete;
496             thread_data( thread_data const& ) = delete;
497             thread_data( thread_data&& ) = delete;
498
499             void sync()
500             {
501                 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
502             }
503         };
504         //@endcond
505
506         //@cond
507         // Dynamic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation)
508         class smr
509         {
510             struct thread_record;
511
512         public:
513             /// Returns the instance of Hazard Pointer \ref smr
514             static smr& instance()
515             {
516 #       ifdef CDS_DISABLE_SMR_EXCEPTION
517                 assert( instance_ != nullptr );
518 #       else
519                 if ( !instance_ )
520                     CDS_THROW_EXCEPTION( not_initialized() );
521 #       endif
522                 return *instance_;
523             }
524
525             /// Creates Dynamic Hazard Pointer SMR singleton
526             /**
527                 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
528                 Otherwise it does nothing.
529
530                 The Michael's HP reclamation schema depends of three parameters:
531                 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
532                 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
533                 the function uses maximum of HP count for CDS library
534                 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
535                 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
536                 <tt> nHazardPtrCount * nMaxThreadCount </tt>
537                 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
538             */
539             static CDS_EXPORT_API void construct(
540                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
541             );
542
543             // for back-copatibility
544             static void Construct(
545                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
546             )
547             {
548                 construct( nInitialHazardPtrCount );
549             }
550
551             /// Destroys global instance of \ref smr
552             /**
553                 The parameter \p bDetachAll should be used carefully: if its value is \p true,
554                 then the object destroyed automatically detaches all attached threads. This feature
555                 can be useful when you have no control over the thread termination, for example,
556                 when \p libcds is injected into existing external thread.
557             */
558             static CDS_EXPORT_API void destruct(
559                 bool bDetachAll = false     ///< Detach all threads
560             );
561
562             // for back-compatibility
563             static void Destruct(
564                 bool bDetachAll = false     ///< Detach all threads
565             )
566             {
567                 destruct( bDetachAll );
568             }
569
570             /// Checks if global SMR object is constructed and may be used
571             static bool isUsed() CDS_NOEXCEPT
572             {
573                 return instance_ != nullptr;
574             }
575
576             /// Set memory management functions
577             /**
578                 @note This function may be called <b>BEFORE</b> creating an instance
579                 of Dynamic Hazard Pointer SMR
580
581                 SMR object allocates some memory for thread-specific data and for
582                 creating SMR object.
583                 By default, a standard \p new and \p delete operators are used for this.
584             */
585             static CDS_EXPORT_API void set_memory_allocator(
586                 void* ( *alloc_func )( size_t size ),
587                 void( *free_func )( void * p )
588             );
589
590             /// Returns thread-local data for the current thread
591             static CDS_EXPORT_API thread_data* tls();
592
593             static CDS_EXPORT_API void attach_thread();
594             static CDS_EXPORT_API void detach_thread();
595
596             /// Get internal statistics
597             CDS_EXPORT_API void statistics( stat& st );
598
599         public: // for internal use only
600             /// The main garbage collecting function
601             CDS_EXPORT_API void scan( thread_data* pRec );
602
603             /// Helper scan routine
604             /**
605                 The function guarantees that every node that is eligible for reuse is eventually freed, barring
606                 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
607                 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
608                 to thread's list of reclaimed pointers.
609
610                 The function is called internally by \p scan().
611             */
612             CDS_EXPORT_API void help_scan( thread_data* pThis );
613
614             hp_allocator& get_hp_allocator()
615             {
616                 return hp_allocator_;
617             }
618
619             retired_allocator& get_retired_allocator()
620             {
621                 return retired_allocator_;
622             }
623
624         private:
625             CDS_EXPORT_API explicit smr(
626                 size_t nInitialHazardPtrCount
627             );
628
629             CDS_EXPORT_API ~smr();
630
631             CDS_EXPORT_API void detach_all_thread();
632
633         private:
634             CDS_EXPORT_API thread_record* create_thread_data();
635             static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
636
637             /// Allocates Hazard Pointer SMR thread private data
638             CDS_EXPORT_API thread_record* alloc_thread_data();
639
640             /// Free HP SMR thread-private data
641             CDS_EXPORT_API void free_thread_data( thread_record* pRec );
642
643         private:
644             static CDS_EXPORT_API smr* instance_;
645
646             atomics::atomic< thread_record*>    thread_list_;   ///< Head of thread list
647             size_t const        initial_hazard_count_;  ///< initial number of hazard pointers per thread
648             hp_allocator        hp_allocator_;
649             retired_allocator   retired_allocator_;
650
651             // temporaries
652             std::atomic<size_t> last_plist_size_;   ///< HP array size in last scan() call
653         };
654         //@endcond
655
656         //@cond
657         // for backward compatibility
658         typedef smr GarbageCollector;
659
660
661         // inlines
662         inline hp_allocator& hp_allocator::instance()
663         {
664             return smr::instance().get_hp_allocator();
665         }
666
667         inline retired_allocator& retired_allocator::instance()
668         {
669             return smr::instance().get_retired_allocator();
670         }
671         //@endcond
672
673     } // namespace dhp
674
675
676     /// Dynamic (adaptie) Hazard Pointer SMR
677     /**  @ingroup cds_garbage_collector
678
679         Implementation of Dynamic (adaptive) Hazard Pointer SMR
680
681         Sources:
682             - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
683             - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
684             - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
685
686         %DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation"
687
688         See \ref cds_how_to_use "How to use" section for details how to apply SMR.
689     */
690     class DHP
691     {
692     public:
693         /// Native guarded pointer type
694         typedef void* guarded_pointer;
695
696         /// Atomic reference
697         template <typename T> using atomic_ref = atomics::atomic<T *>;
698
699         /// Atomic type
700         /**
701             @headerfile cds/gc/dhp.h
702         */
703         template <typename T> using atomic_type = atomics::atomic<T>;
704
705         /// Atomic marked pointer
706         template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
707
708         /// Internal statistics
709         typedef dhp::stat stat;
710
711         /// Dynamic Hazard Pointer guard
712         /**
713             A guard is a hazard pointer.
714             Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
715
716             \p %Guard object is movable but not copyable.
717
718             The guard object can be in two states:
719             - unlinked - the guard is not linked with any internal hazard pointer.
720               In this state no operation except \p link() and move assignment is supported.
721             - linked (default) - the guard allocates an internal hazard pointer and fully operable.
722
723             Due to performance reason the implementation does not check state of the guard in runtime.
724
725             @warning Move assignment can transfer the guard in unlinked state, use with care.
726         */
727         class Guard
728         {
729         public:
730             /// Default ctor allocates a guard (hazard pointer) from thread-private storage
731             Guard() CDS_NOEXCEPT
732                 : guard_( dhp::smr::tls()->hazards_.alloc() )
733             {}
734
735             /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
736             explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
737                 : guard_( nullptr )
738             {}
739
740             /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
741             Guard( Guard&& src ) CDS_NOEXCEPT
742                 : guard_( src.guard_ )
743             {
744                 src.guard_ = nullptr;
745             }
746
747             /// Move assignment: the internal guards are swapped between \p src and \p this
748             /**
749                 @warning \p src will become in unlinked state if \p this was unlinked on entry.
750             */
751             Guard& operator=( Guard&& src ) CDS_NOEXCEPT
752             {
753                 std::swap( guard_, src.guard_ );
754                 return *this;
755             }
756
757             /// Copy ctor is prohibited - the guard is not copyable
758             Guard( Guard const& ) = delete;
759
760             /// Copy assignment is prohibited
761             Guard& operator=( Guard const& ) = delete;
762
763             /// Frees the internal hazard pointer if the guard is in linked state
764             ~Guard()
765             {
766                 unlink();
767             }
768
769             /// Checks if the guard object linked with any internal hazard pointer
770             bool is_linked() const
771             {
772                 return guard_ != nullptr;
773             }
774
775             /// Links the guard with internal hazard pointer if the guard is in unlinked state
776             void link()
777             {
778                 if ( !guard_ )
779                     guard_ = dhp::smr::tls()->hazards_.alloc();
780             }
781
782             /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
783             void unlink()
784             {
785                 if ( guard_ ) {
786                     dhp::smr::tls()->hazards_.free( guard_ );
787                     guard_ = nullptr;
788                 }
789             }
790
791             /// Protects a pointer of type <tt> atomic<T*> </tt>
792             /**
793                 Return the value of \p toGuard
794
795                 The function tries to load \p toGuard and to store it
796                 to the HP slot repeatedly until the guard's value equals \p toGuard
797             */
798             template <typename T>
799             T protect( atomics::atomic<T> const& toGuard )
800             {
801                 assert( guard_ != nullptr );
802
803                 T pCur = toGuard.load(atomics::memory_order_acquire);
804                 T pRet;
805                 do {
806                     pRet = assign( pCur );
807                     pCur = toGuard.load(atomics::memory_order_acquire);
808                 } while ( pRet != pCur );
809                 return pCur;
810             }
811
812             /// Protects a converted pointer of type <tt> atomic<T*> </tt>
813             /**
814                 Return the value of \p toGuard
815
816                 The function tries to load \p toGuard and to store result of \p f functor
817                 to the HP slot repeatedly until the guard's value equals \p toGuard.
818
819                 The function is useful for intrusive containers when \p toGuard is a node pointer
820                 that should be converted to a pointer to the value type before guarding.
821                 The parameter \p f of type Func is a functor that makes this conversion:
822                 \code
823                     struct functor {
824                         value_type * operator()( T * p );
825                     };
826                 \endcode
827                 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
828             */
829             template <typename T, class Func>
830             T protect( atomics::atomic<T> const& toGuard, Func f )
831             {
832                 assert( guard_ != nullptr );
833
834                 T pCur = toGuard.load(atomics::memory_order_acquire);
835                 T pRet;
836                 do {
837                     pRet = pCur;
838                     assign( f( pCur ));
839                     pCur = toGuard.load(atomics::memory_order_acquire);
840                 } while ( pRet != pCur );
841                 return pCur;
842             }
843
844             /// Store \p p to the guard
845             /**
846                 The function is just an assignment, no loop is performed.
847                 Can be used for a pointer that cannot be changed concurrently
848                 or for already guarded pointer.
849             */
850             template <typename T>
851             T* assign( T* p )
852             {
853                 assert( guard_ != nullptr );
854
855                 guard_->set( p );
856                 dhp::smr::tls()->sync();
857                 return p;
858             }
859
860             //@cond
861             std::nullptr_t assign( std::nullptr_t )
862             {
863                 assert( guard_ != nullptr );
864
865                 clear();
866                 return nullptr;
867             }
868             //@endcond
869
870             /// Store marked pointer \p p to the guard
871             /**
872                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
873                 Can be used for a marked pointer that cannot be changed concurrently
874                 or for already guarded pointer.
875             */
876             template <typename T, int BITMASK>
877             T* assign( cds::details::marked_ptr<T, BITMASK> p )
878             {
879                 return assign( p.ptr());
880             }
881
882             /// Copy from \p src guard to \p this guard
883             void copy( Guard const& src )
884             {
885                 assign( src.get_native());
886             }
887
888             /// Clears value of the guard
889             void clear()
890             {
891                 assert( guard_ != nullptr );
892
893                 guard_->clear();
894             }
895
896             /// Gets the value currently protected (relaxed read)
897             template <typename T>
898             T * get() const
899             {
900                 assert( guard_ != nullptr );
901                 return guard_->get_as<T>();
902             }
903
904             /// Gets native guarded pointer stored
905             void* get_native() const
906             {
907                 assert( guard_ != nullptr );
908                 return guard_->get();
909             }
910
911             //@cond
912             dhp::guard* release()
913             {
914                 dhp::guard* g = guard_;
915                 guard_ = nullptr;
916                 return g;
917             }
918
919             dhp::guard*& guard_ref()
920             {
921                 return guard_;
922             }
923             //@endcond
924
925         private:
926             //@cond
927             dhp::guard* guard_;
928             //@endcond
929         };
930
931         /// Array of Dynamic Hazard Pointer guards
932         /**
933             The class is intended for allocating an array of hazard pointer guards.
934             Template parameter \p Count defines the size of the array.
935
936             A \p %GuardArray object is not copy- and move-constructible
937             and not copy- and move-assignable.
938         */
939         template <size_t Count>
940         class GuardArray
941         {
942         public:
943             /// Rebind array for other size \p OtherCount
944             template <size_t OtherCount>
945             struct rebind {
946                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
947             };
948
949             /// Array capacity
950             static CDS_CONSTEXPR const size_t c_nCapacity = Count;
951
952         public:
953             /// Default ctor allocates \p Count hazard pointers
954             GuardArray()
955             {
956                 dhp::smr::tls()->hazards_.alloc( guards_ );
957             }
958
959             /// Move ctor is prohibited
960             GuardArray( GuardArray&& ) = delete;
961
962             /// Move assignment is prohibited
963             GuardArray& operator=( GuardArray&& ) = delete;
964
965             /// Copy ctor is prohibited
966             GuardArray( GuardArray const& ) = delete;
967
968             /// Copy assignment is prohibited
969             GuardArray& operator=( GuardArray const& ) = delete;
970
971             /// Frees allocated hazard pointers
972             ~GuardArray()
973             {
974                 dhp::smr::tls()->hazards_.free( guards_ );
975             }
976
977             /// Protects a pointer of type \p atomic<T*>
978             /**
979                 Return the value of \p toGuard
980
981                 The function tries to load \p toGuard and to store it
982                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
983             */
984             template <typename T>
985             T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
986             {
987                 assert( nIndex < capacity() );
988
989                 T pRet;
990                 do {
991                     pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
992                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
993
994                 return pRet;
995             }
996
997             /// Protects a pointer of type \p atomic<T*>
998             /**
999                 Return the value of \p toGuard
1000
1001                 The function tries to load \p toGuard and to store it
1002                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
1003
1004                 The function is useful for intrusive containers when \p toGuard is a node pointer
1005                 that should be converted to a pointer to the value type before guarding.
1006                 The parameter \p f of type Func is a functor to make that conversion:
1007                 \code
1008                     struct functor {
1009                         value_type * operator()( T * p );
1010                     };
1011                 \endcode
1012                 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
1013             */
1014             template <typename T, class Func>
1015             T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
1016             {
1017                 assert( nIndex < capacity() );
1018
1019                 T pRet;
1020                 do {
1021                     assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
1022                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
1023
1024                 return pRet;
1025             }
1026
1027             /// Store \p p to the slot \p nIndex
1028             /**
1029                 The function is just an assignment, no loop is performed.
1030             */
1031             template <typename T>
1032             T * assign( size_t nIndex, T * p )
1033             {
1034                 assert( nIndex < capacity() );
1035
1036                 guards_.set( nIndex, p );
1037                 dhp::smr::tls()->sync();
1038                 return p;
1039             }
1040
1041             /// Store marked pointer \p p to the guard
1042             /**
1043                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
1044                 Can be used for a marked pointer that cannot be changed concurrently
1045                 or for already guarded pointer.
1046             */
1047             template <typename T, int Bitmask>
1048             T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
1049             {
1050                 return assign( nIndex, p.ptr());
1051             }
1052
1053             /// Copy guarded value from \p src guard to slot at index \p nIndex
1054             void copy( size_t nIndex, Guard const& src )
1055             {
1056                 assign( nIndex, src.get_native());
1057             }
1058
1059             /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
1060             void copy( size_t nDestIndex, size_t nSrcIndex )
1061             {
1062                 assign( nDestIndex, get_native( nSrcIndex ));
1063             }
1064
1065             /// Clear value of the slot \p nIndex
1066             void clear( size_t nIndex )
1067             {
1068                 guards_.clear( nIndex );
1069             }
1070
1071             /// Get current value of slot \p nIndex
1072             template <typename T>
1073             T * get( size_t nIndex ) const
1074             {
1075                 assert( nIndex < capacity() );
1076                 return guards_[nIndex]->template get_as<T>();
1077             }
1078
1079             /// Get native guarded pointer stored
1080             guarded_pointer get_native( size_t nIndex ) const
1081             {
1082                 assert( nIndex < capacity() );
1083                 return guards_[nIndex]->get();
1084             }
1085
1086             //@cond
1087             dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
1088             {
1089                 return guards_.release( nIndex );
1090             }
1091             //@endcond
1092
1093             /// Capacity of the guard array
1094             static CDS_CONSTEXPR size_t capacity()
1095             {
1096                 return Count;
1097             }
1098
1099         private:
1100             //@cond
1101             dhp::guard_array<c_nCapacity> guards_;
1102             //@endcond
1103         };
1104
1105         /// Guarded pointer
1106         /**
1107             A guarded pointer is a pair of a pointer and GC's guard.
1108             Usually, it is used for returning a pointer to the item from an lock-free container.
1109             The guard prevents the pointer to be early disposed (freed) by GC.
1110             After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1111
1112             Template arguments:
1113             - \p GuardedType - a type which the guard stores
1114             - \p ValueType - a value type
1115             - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1116
1117             For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1118             In such case the \p %guarded_ptr is:
1119             @code
1120             typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1121             @endcode
1122
1123             For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1124             For example:
1125             @code
1126             struct foo {
1127                 int const   key;
1128                 std::string value;
1129             };
1130
1131             struct value_accessor {
1132                 std::string* operator()( foo* pFoo ) const
1133                 {
1134                     return &(pFoo->value);
1135                 }
1136             };
1137
1138             // Guarded ptr
1139             typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1140             @endcode
1141
1142             You don't need use this class directly.
1143             All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1144         */
1145         template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1146         class guarded_ptr
1147         {
1148             //@cond
1149             struct trivial_cast {
1150                 ValueType * operator()( GuardedType * p ) const
1151                 {
1152                     return p;
1153                 }
1154             };
1155
1156             template <typename GT, typename VT, typename C> friend class guarded_ptr;
1157             //@endcond
1158
1159         public:
1160             typedef GuardedType guarded_type; ///< Guarded type
1161             typedef ValueType   value_type;   ///< Value type
1162
1163             /// Functor for casting \p guarded_type to \p value_type
1164             typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1165
1166         public:
1167             /// Creates empty guarded pointer
1168             guarded_ptr() CDS_NOEXCEPT
1169                 : guard_( nullptr )
1170             {}
1171
1172             //@cond
1173             explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1174                 : guard_( g )
1175             {}
1176
1177             /// Initializes guarded pointer with \p p
1178             explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1179                 : guard_( nullptr )
1180             {
1181                 reset( p );
1182             }
1183             explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1184                 : guard_( nullptr )
1185             {}
1186             //@endcond
1187
1188             /// Move ctor
1189             guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1190                 : guard_( gp.guard_ )
1191             {
1192                 gp.guard_ = nullptr;
1193             }
1194
1195             /// Move ctor
1196             template <typename GT, typename VT, typename C>
1197             guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1198                 : guard_( gp.guard_ )
1199             {
1200                 gp.guard_ = nullptr;
1201             }
1202
1203             /// Ctor from \p Guard
1204             explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1205                 : guard_( g.release())
1206             {}
1207
1208             /// The guarded pointer is not copy-constructible
1209             guarded_ptr( guarded_ptr const& gp ) = delete;
1210
1211             /// Clears the guarded pointer
1212             /**
1213                 \ref release is called if guarded pointer is not \ref empty
1214             */
1215             ~guarded_ptr() CDS_NOEXCEPT
1216             {
1217                 release();
1218             }
1219
1220             /// Move-assignment operator
1221             guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1222             {
1223                 std::swap( guard_, gp.guard_ );
1224                 return *this;
1225             }
1226
1227             /// Move-assignment from \p Guard
1228             guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1229             {
1230                 std::swap( guard_, g.guard_ref());
1231                 return *this;
1232             }
1233
1234             /// The guarded pointer is not copy-assignable
1235             guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1236
1237             /// Returns a pointer to guarded value
1238             value_type * operator ->() const CDS_NOEXCEPT
1239             {
1240                 assert( !empty());
1241                 return value_cast()( guard_->get_as<guarded_type>() );
1242             }
1243
1244             /// Returns a reference to guarded value
1245             value_type& operator *() CDS_NOEXCEPT
1246             {
1247                 assert( !empty());
1248                 return *value_cast()( guard_->get_as<guarded_type>() );
1249             }
1250
1251             /// Returns const reference to guarded value
1252             value_type const& operator *() const CDS_NOEXCEPT
1253             {
1254                 assert( !empty());
1255                 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1256             }
1257
1258             /// Checks if the guarded pointer is \p nullptr
1259             bool empty() const CDS_NOEXCEPT
1260             {
1261                 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1262             }
1263
1264             /// \p bool operator returns <tt>!empty()</tt>
1265             explicit operator bool() const CDS_NOEXCEPT
1266             {
1267                 return !empty();
1268             }
1269
1270             /// Clears guarded pointer
1271             /**
1272                 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1273                 Dereferncing the guarded pointer after \p release() is dangerous.
1274             */
1275             void release() CDS_NOEXCEPT
1276             {
1277                 free_guard();
1278             }
1279
1280             //@cond
1281             // For internal use only!!!
1282             void reset(guarded_type * p) CDS_NOEXCEPT
1283             {
1284                 alloc_guard();
1285                 assert( guard_ );
1286                 guard_->set( p );
1287             }
1288
1289             //@endcond
1290
1291         private:
1292             //@cond
1293             void alloc_guard()
1294             {
1295                 if ( !guard_ )
1296                     guard_ = dhp::smr::tls()->hazards_.alloc();
1297             }
1298
1299             void free_guard()
1300             {
1301                 if ( guard_ ) {
1302                     dhp::smr::tls()->hazards_.free( guard_ );
1303                     guard_ = nullptr;
1304                 }
1305             }
1306             //@endcond
1307
1308         private:
1309             //@cond
1310             dhp::guard* guard_;
1311             //@endcond
1312         };
1313
1314     public:
1315         /// Initializes %DHP memory manager singleton
1316         /**
1317             Constructor creates and initializes %DHP global object.
1318             %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1319             it is created in the beginning of \p main() function.
1320             After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1321
1322             \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1323                 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1324                 By perforce the local thread's guard pool is grown automatically from common pool.
1325                 When the thread terminated its guard pool is backed to common GC's pool.
1326         */
1327         explicit DHP(
1328             size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
1329         )
1330         {
1331             dhp::smr::construct( nInitialHazardPtrCount );
1332         }
1333
1334         /// Destroys %DHP memory manager
1335         /**
1336             The destructor destroys %DHP global object. After calling of this function you may \b NOT
1337             use CDS data structures based on \p %cds::gc::DHP.
1338             Usually, %DHP object is destroyed at the end of your \p main().
1339         */
1340         ~DHP()
1341         {
1342             dhp::GarbageCollector::destruct( true );
1343         }
1344
1345         /// Checks if count of hazard pointer is no less than \p nCountNeeded
1346         /**
1347             The function always returns \p true since the guard count is unlimited for
1348             \p %gc::DHP garbage collector.
1349         */
1350         static CDS_CONSTEXPR bool check_available_guards(
1351 #ifdef CDS_DOXYGEN_INVOKED
1352             size_t nCountNeeded,
1353 #else
1354             size_t
1355 #endif
1356         )
1357         {
1358             return true;
1359         }
1360
1361         /// Set memory management functions
1362         /**
1363             @note This function may be called <b>BEFORE</b> creating an instance
1364             of Dynamic Hazard Pointer SMR
1365
1366             SMR object allocates some memory for thread-specific data and for creating SMR object.
1367             By default, a standard \p new and \p delete operators are used for this.
1368         */
1369         static void set_memory_allocator(
1370             void* ( *alloc_func )( size_t size ),   ///< \p malloc() function
1371             void( *free_func )( void * p )          ///< \p free() function
1372         )
1373         {
1374             dhp::smr::set_memory_allocator( alloc_func, free_func );
1375         }
1376
1377         /// Retire pointer \p p with function \p pFunc
1378         /**
1379             The function places pointer \p p to array of pointers ready for removing.
1380             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1381             \p func is a disposer: when \p p can be safely removed, \p func is called.
1382         */
1383         template <typename T>
1384         static void retire( T * p, void (* func)(T *))
1385         {
1386             dhp::thread_data* rec = dhp::smr::tls();
1387             if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
1388                 dhp::smr::instance().scan( rec );
1389         }
1390
1391         /// Retire pointer \p p with functor of type \p Disposer
1392         /**
1393             The function places pointer \p p to array of pointers ready for removing.
1394             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1395
1396             Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1397             \code
1398             template <typename T>
1399             struct disposer {
1400                 void operator()( T * p )    ;   // disposing operator
1401             };
1402             \endcode
1403             Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1404             - it should be stateless functor
1405             - it should be default-constructible
1406             - the result of functor call with argument \p p should not depend on where the functor will be called.
1407
1408             \par Examples:
1409             Operator \p delete functor:
1410             \code
1411             template <typename T>
1412             struct disposer {
1413                 void operator ()( T * p ) {
1414                     delete p;
1415                 }
1416             };
1417
1418             // How to call HP::retire method
1419             int * p = new int;
1420
1421             // ... use p in lock-free manner
1422
1423             cds::gc::DHP::retire<disposer>( p ) ;   // place p to retired pointer array of DHP SMR
1424             \endcode
1425
1426             Functor based on \p std::allocator :
1427             \code
1428             template <typename Alloc = std::allocator<int> >
1429             struct disposer {
1430                 template <typename T>
1431                 void operator()( T * p ) {
1432                     typedef typename Alloc::templare rebind<T>::other alloc_t;
1433                     alloc_t a;
1434                     a.destroy( p );
1435                     a.deallocate( p, 1 );
1436                 }
1437             };
1438             \endcode
1439         */
1440         template <class Disposer, typename T>
1441         static void retire( T * p )
1442         {
1443             if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1444                 scan();
1445         }
1446
1447         /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1448         static bool isUsed()
1449         {
1450             return dhp::smr::isUsed();
1451         }
1452
1453         /// Forced GC cycle call for current thread
1454         /**
1455             Usually, this function should not be called directly.
1456         */
1457         static void scan()
1458         {
1459             dhp::smr::instance().scan( dhp::smr::tls() );
1460         }
1461
1462         /// Synonym for \p scan()
1463         static void force_dispose()
1464         {
1465             scan();
1466         }
1467
1468         /// Returns internal statistics
1469         /**
1470             The function clears \p st before gathering statistics.
1471
1472             @note Internal statistics is available only if you compile
1473             \p libcds and your program with \p -DCDS_ENABLE_HPSTAT.
1474         */
1475         static void statistics( stat& st )
1476         {
1477             dhp::smr::instance().statistics( st );
1478         }
1479
1480         /// Returns post-mortem statistics
1481         /**
1482             Post-mortem statistics is gathered in the \p %DHP object destructor
1483             and can be accessible after destructing the global \p %DHP object.
1484
1485             @note Internal statistics is available only if you compile
1486             \p libcds and your program with \p -DCDS_ENABLE_HPSTAT.
1487
1488             Usage:
1489             \code
1490             int main()
1491             {
1492                 cds::Initialize();
1493                 {
1494                     // Initialize DHP SMR
1495                     cds::gc::DHP dhp;
1496
1497                     // deal with DHP-based data structured
1498                     // ...
1499                 }
1500
1501                 // DHP object destroyed
1502                 // Get total post-mortem statistics
1503                 cds::gc::DHP::stat const& st = cds::gc::DHP::postmortem_statistics();
1504
1505                 printf( "DHP statistics:\n"
1506                     "  thread count           = %llu\n"
1507                     "  guard allocated        = %llu\n"
1508                     "  guard freed            = %llu\n"
1509                     "  retired data count     = %llu\n"
1510                     "  free data count        = %llu\n"
1511                     "  scan() call count      = %llu\n"
1512                     "  help_scan() call count = %llu\n",
1513                     st.thread_rec_count,
1514                     st.guard_allocated, st.guard_freed,
1515                     st.retired_count, st.free_count,
1516                     st.scan_count, st.help_scan_count
1517                 );
1518
1519                 cds::Terminate();
1520             }
1521             \endcode
1522         */
1523         CDS_EXPORT_API static stat const& postmortem_statistics();
1524     };
1525
1526 }} // namespace cds::gc
1527
1528 #endif // #ifndef CDSLIB_GC_DHP_SMR_H
1529
1530