issue #82: added doc about DCAS support
[libcds.git] / cds / gc / dhp.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
33
34 #include <exception>
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/details/lib.h>
37 #include <cds/threading/model.h>
38 #include <cds/intrusive/free_list_selector.h>
39 #include <cds/details/throw_exception.h>
40 #include <cds/details/static_functor.h>
41 #include <cds/details/marked_ptr.h>
42 #include <cds/user_setup/cache_line.h>
43
44 namespace cds { namespace gc {
45
46     /// Dynamic (adaptive) Hazard Pointer implementation details
47     namespace dhp {
48         using namespace cds::gc::hp::common;
49
50         /// Exception "Dynamic Hazard Pointer SMR is not initialized"
51         class not_initialized: public std::runtime_error
52         {
53         public:
54             //@cond
55             not_initialized()
56                 : std::runtime_error( "Global DHP SMR object is not initialized" )
57             {}
58             //@endcond
59         };
60
61         //@cond
62         struct guard_block: public cds::intrusive::FreeListImpl::node
63         {
64             atomics::atomic<guard_block*>  next_block_;  // next block in the thread list
65
66             guard_block()
67                 : next_block_( nullptr )
68             {}
69
70             guard* first()
71             {
72                 return reinterpret_cast<guard*>( this + 1 );
73             }
74         };
75         //@endcond
76
77         //@cond
78         /// \p guard_block allocator (global object)
79         class hp_allocator
80         {
81             friend class smr;
82         public:
83             static hp_allocator& instance();
84
85             CDS_EXPORT_API guard_block* alloc();
86             void free( guard_block* block )
87             {
88                 free_list_.put( block );
89             }
90
91         private:
92             hp_allocator()
93 #ifdef CDS_ENABLE_HPSTAT
94                 : block_allocated_(0)
95 #endif
96             {}
97             CDS_EXPORT_API ~hp_allocator();
98
99         private:
100             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
101 #ifdef CDS_ENABLE_HPSTAT
102         public:
103             atomics::atomic<size_t>         block_allocated_;   ///< count of allocated blocks
104 #endif
105         };
106         //@endcond
107
108         //@cond
109         /// Per-thread hazard pointer storage
110         class thread_hp_storage
111         {
112             friend class smr;
113         public:
114             thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
115                 : free_head_( arr )
116                 , array_( arr )
117                 , initial_capacity_( nSize )
118 #       ifdef CDS_ENABLE_HPSTAT
119                 , alloc_guard_count_( 0 )
120                 , free_guard_count_( 0 )
121                 , extend_call_count_( 0 )
122 #       endif
123             {
124                 // Initialize guards
125                 new( arr ) guard[nSize];
126                 extended_list_.store( nullptr, atomics::memory_order_release );
127             }
128
129             thread_hp_storage() = delete;
130             thread_hp_storage( thread_hp_storage const& ) = delete;
131             thread_hp_storage( thread_hp_storage&& ) = delete;
132
133             ~thread_hp_storage()
134             {
135                 clear();
136             }
137
138             guard* alloc()
139             {
140                 if ( cds_unlikely( free_head_ == nullptr )) {
141                     extend();
142                     assert( free_head_ != nullptr );
143                 }
144
145                 guard* g = free_head_;
146                 free_head_ = g->next_;
147                 CDS_HPSTAT( ++alloc_guard_count_ );
148                 return g;
149             }
150
151             void free( guard* g ) CDS_NOEXCEPT
152             {
153                 if ( g ) {
154                     g->clear();
155                     g->next_ = free_head_;
156                     free_head_ = g;
157                     CDS_HPSTAT( ++free_guard_count_ );
158                 }
159             }
160
161             template< size_t Capacity>
162             size_t alloc( guard_array<Capacity>& arr )
163             {
164                 for ( size_t i = 0; i < Capacity; ++i ) {
165                     if ( cds_unlikely( free_head_ == nullptr ))
166                         extend();
167                     arr.reset( i, free_head_ );
168                     free_head_ = free_head_->next_;
169                 }
170                 CDS_HPSTAT( alloc_guard_count_ += Capacity );
171                 return Capacity;
172             }
173
174             template <size_t Capacity>
175             void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
176             {
177                 guard* gList = free_head_;
178                 for ( size_t i = 0; i < Capacity; ++i ) {
179                     guard* g = arr[i];
180                     if ( g ) {
181                         g->clear();
182                         g->next_ = gList;
183                         gList = g;
184                         CDS_HPSTAT( ++free_guard_count_ );
185                     }
186                 }
187                 free_head_ = gList;
188             }
189
190             void clear()
191             {
192                 // clear array_
193                 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
194                     cur->clear();
195
196                 // free all extended blocks
197                 hp_allocator& a = hp_allocator::instance();
198                 for ( guard_block* p = extended_list_.load( atomics::memory_order_relaxed ); p; ) {
199                     guard_block* next = p->next_block_.load( atomics::memory_order_relaxed );
200                     a.free( p );
201                     p = next;
202                 }
203
204                 extended_list_.store( nullptr, atomics::memory_order_release );
205             }
206
207             void init()
208             {
209                 assert( extended_list_.load(atomics::memory_order_relaxed) == nullptr );
210
211                 guard* p = array_;
212                 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
213                     p->next_ = p + 1;
214                 p->next_ = nullptr;
215                 free_head_ = array_;
216             }
217
218         private:
219             void extend()
220             {
221                 assert( free_head_ == nullptr );
222
223                 guard_block* block = hp_allocator::instance().alloc();
224                 block->next_block_.store( extended_list_.load( atomics::memory_order_relaxed ), atomics::memory_order_release );
225                 extended_list_.store( block, atomics::memory_order_release );
226                 free_head_ = block->first();
227                 CDS_HPSTAT( ++extend_call_count_ );
228             }
229
230         private:
231             guard*          free_head_;        ///< Head of free guard list
232             atomics::atomic<guard_block*> extended_list_;    ///< Head of extended guard blocks allocated for the thread
233             guard* const    array_;            ///< initial HP array
234             size_t const    initial_capacity_; ///< Capacity of \p array_
235 #       ifdef CDS_ENABLE_HPSTAT
236         public:
237             size_t          alloc_guard_count_;
238             size_t          free_guard_count_;
239             size_t          extend_call_count_;
240 #       endif
241         };
242         //@endcond
243
244         //@cond
245         struct retired_block: public cds::intrusive::FreeListImpl::node
246         {
247             retired_block*  next_;  ///< Next block in thread-private retired array
248
249             static size_t const c_capacity = 256;
250
251             retired_block()
252                 : next_( nullptr )
253             {}
254
255             retired_ptr* first() const
256             {
257                 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
258             }
259
260             retired_ptr* last() const
261             {
262                 return first() + c_capacity;
263             }
264         };
265         //@endcond
266
267         //@cond
268         class retired_allocator
269         {
270             friend class smr;
271         public:
272             static retired_allocator& instance();
273
274             CDS_EXPORT_API retired_block* alloc();
275             void free( retired_block* block )
276             {
277                 block->next_ = nullptr;
278                 free_list_.put( block );
279             }
280
281         private:
282             retired_allocator()
283 #ifdef CDS_ENABLE_HPSTAT
284                 : block_allocated_(0)
285 #endif
286             {}
287             CDS_EXPORT_API ~retired_allocator();
288
289         private:
290             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
291 #ifdef CDS_ENABLE_HPSTAT
292         public:
293             atomics::atomic<size_t> block_allocated_; ///< Count of allocated blocks
294 #endif
295         };
296         //@endcond
297
298         //@cond
299         /// Per-thread retired array
300         class retired_array
301         {
302             friend class smr;
303         public:
304             retired_array() CDS_NOEXCEPT
305                 : current_block_( nullptr )
306                 , current_cell_( nullptr )
307                 , list_head_( nullptr )
308                 , list_tail_( nullptr )
309                 , block_count_(0)
310 #       ifdef CDS_ENABLE_HPSTAT
311                 , retire_call_count_( 0 )
312                 , extend_call_count_( 0 )
313 #       endif
314             {}
315
316             retired_array( retired_array const& ) = delete;
317             retired_array( retired_array&& ) = delete;
318
319             ~retired_array()
320             {
321                 assert( empty());
322                 fini();
323             }
324
325             bool push( retired_ptr const& p ) CDS_NOEXCEPT
326             {
327                 assert( current_block_ != nullptr );
328                 assert( current_block_->first() <= current_cell_ );
329                 assert( current_cell_ < current_block_->last());
330                 //assert( &p != current_cell_ );
331
332                 *current_cell_ = p;
333                 CDS_HPSTAT( ++retire_call_count_ );
334
335                 if ( ++current_cell_ == current_block_->last()) {
336                     // goto next block if exists
337                     if ( current_block_->next_ ) {
338                         current_block_ = current_block_->next_;
339                         current_cell_ = current_block_->first();
340                         return true;
341                     }
342
343                     // no free block
344                     // smr::scan() extend retired_array if needed
345                     return false;
346                 }
347
348                 return true;
349             }
350
351             bool repush( retired_ptr* p ) CDS_NOEXCEPT
352             {
353                 bool ret = push( *p );
354                 CDS_HPSTAT( --retire_call_count_ );
355                 assert( ret );
356                 return ret;
357             }
358
359         private: // called by smr
360             void init()
361             {
362                 if ( list_head_ == nullptr ) {
363                     retired_block* block = retired_allocator::instance().alloc();
364                     assert( block->next_ == nullptr );
365
366                     current_block_ =
367                         list_head_ =
368                         list_tail_ = block;
369                     current_cell_ = block->first();
370
371                     block_count_ = 1;
372                 }
373             }
374
375             void fini()
376             {
377                 retired_allocator& alloc = retired_allocator::instance();
378                 for ( retired_block* p = list_head_; p; ) {
379                     retired_block* next = p->next_;
380                     alloc.free( p );
381                     p = next;
382                 }
383
384                 current_block_ =
385                     list_head_ =
386                     list_tail_ = nullptr;
387                 current_cell_ = nullptr;
388
389                 block_count_ = 0;
390             }
391
392             void extend()
393             {
394                 assert( list_head_ != nullptr );
395                 assert( current_block_ == list_tail_ );
396                 assert( current_cell_ == current_block_->last());
397
398                 retired_block* block = retired_allocator::instance().alloc();
399                 assert( block->next_ == nullptr );
400
401                 current_block_ = list_tail_ = list_tail_->next_ = block;
402                 current_cell_ = block->first();
403                 ++block_count_;
404                 CDS_HPSTAT( ++extend_call_count_ );
405             }
406
407             bool empty() const
408             {
409                 return current_block_ == nullptr
410                     || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
411             }
412
413         private:
414             retired_block*          current_block_;
415             retired_ptr*            current_cell_;  // in current_block_
416
417             retired_block*          list_head_;
418             retired_block*          list_tail_;
419             size_t                  block_count_;
420 #       ifdef CDS_ENABLE_HPSTAT
421         public:
422             size_t  retire_call_count_;
423             size_t  extend_call_count_;
424 #       endif
425         };
426         //@endcond
427
428         /// Internal statistics
429         struct stat {
430             size_t  guard_allocated;    ///< Count of allocated HP guards
431             size_t  guard_freed;        ///< Count of freed HP guards
432             size_t  retired_count;      ///< Count of retired pointers
433             size_t  free_count;         ///< Count of free pointers
434             size_t  scan_count;         ///< Count of \p scan() call
435             size_t  help_scan_count;    ///< Count of \p help_scan() call
436
437             size_t  thread_rec_count;   ///< Count of thread records
438
439             size_t  hp_block_count;         ///< Count of extended HP blocks allocated
440             size_t  retired_block_count;    ///< Count of retired blocks allocated
441             size_t  hp_extend_count;        ///< Count of hp array \p extend() call
442             size_t  retired_extend_count;   ///< Count of retired array \p extend() call
443
444                                         /// Default ctor
445             stat()
446             {
447                 clear();
448             }
449
450             /// Clears all counters
451             void clear()
452             {
453                 guard_allocated =
454                     guard_freed =
455                     retired_count =
456                     free_count =
457                     scan_count =
458                     help_scan_count =
459                     thread_rec_count =
460                     hp_block_count =
461                     retired_block_count =
462                     hp_extend_count =
463                     retired_extend_count = 0;
464             }
465         };
466
467         //@cond
468         /// Per-thread data
469         struct thread_data {
470             thread_hp_storage   hazards_;   ///< Hazard pointers private to the thread
471             retired_array       retired_;   ///< Retired data private to the thread
472
473             char pad1_[cds::c_nCacheLineSize];
474             atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
475             char pad2_[cds::c_nCacheLineSize];
476
477 #       ifdef CDS_ENABLE_HPSTAT
478             size_t              free_call_count_;
479             size_t              scan_call_count_;
480             size_t              help_scan_call_count_;
481 #       endif
482
483             // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
484             // cppcheck-suppress uninitMemberVar
485             thread_data( guard* guards, size_t guard_count )
486                 : hazards_( guards, guard_count )
487                 , sync_( 0 )
488 #       ifdef CDS_ENABLE_HPSTAT
489                 , free_call_count_(0)
490                 , scan_call_count_(0)
491                 , help_scan_call_count_(0)
492 #       endif
493             {}
494
495             thread_data() = delete;
496             thread_data( thread_data const& ) = delete;
497             thread_data( thread_data&& ) = delete;
498
499             void sync()
500             {
501                 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
502             }
503         };
504         //@endcond
505
506         //@cond
507         // Dynamic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation)
508         class smr
509         {
510             struct thread_record;
511
512         public:
513             /// Returns the instance of Hazard Pointer \ref smr
514             static smr& instance()
515             {
516 #       ifdef CDS_DISABLE_SMR_EXCEPTION
517                 assert( instance_ != nullptr );
518 #       else
519                 if ( !instance_ )
520                     CDS_THROW_EXCEPTION( not_initialized());
521 #       endif
522                 return *instance_;
523             }
524
525             /// Creates Dynamic Hazard Pointer SMR singleton
526             /**
527                 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
528                 Otherwise it does nothing.
529
530                 The Michael's HP reclamation schema depends of three parameters:
531                 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
532                 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
533                 the function uses maximum of HP count for CDS library
534                 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
535                 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
536                 <tt> nHazardPtrCount * nMaxThreadCount </tt>
537                 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
538             */
539             static CDS_EXPORT_API void construct(
540                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
541             );
542
543             // for back-copatibility
544             static void Construct(
545                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
546             )
547             {
548                 construct( nInitialHazardPtrCount );
549             }
550
551             /// Destroys global instance of \ref smr
552             /**
553                 The parameter \p bDetachAll should be used carefully: if its value is \p true,
554                 then the object destroyed automatically detaches all attached threads. This feature
555                 can be useful when you have no control over the thread termination, for example,
556                 when \p libcds is injected into existing external thread.
557             */
558             static CDS_EXPORT_API void destruct(
559                 bool bDetachAll = false     ///< Detach all threads
560             );
561
562             // for back-compatibility
563             static void Destruct(
564                 bool bDetachAll = false     ///< Detach all threads
565             )
566             {
567                 destruct( bDetachAll );
568             }
569
570             /// Checks if global SMR object is constructed and may be used
571             static bool isUsed() CDS_NOEXCEPT
572             {
573                 return instance_ != nullptr;
574             }
575
576             /// Set memory management functions
577             /**
578                 @note This function may be called <b>BEFORE</b> creating an instance
579                 of Dynamic Hazard Pointer SMR
580
581                 SMR object allocates some memory for thread-specific data and for
582                 creating SMR object.
583                 By default, a standard \p new and \p delete operators are used for this.
584             */
585             static CDS_EXPORT_API void set_memory_allocator(
586                 void* ( *alloc_func )( size_t size ),
587                 void( *free_func )( void * p )
588             );
589
590             /// Returns thread-local data for the current thread
591             static CDS_EXPORT_API thread_data* tls();
592
593             static CDS_EXPORT_API void attach_thread();
594             static CDS_EXPORT_API void detach_thread();
595
596             /// Get internal statistics
597             CDS_EXPORT_API void statistics( stat& st );
598
599         public: // for internal use only
600             /// The main garbage collecting function
601             CDS_EXPORT_API void scan( thread_data* pRec );
602
603             /// Helper scan routine
604             /**
605                 The function guarantees that every node that is eligible for reuse is eventually freed, barring
606                 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
607                 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
608                 to thread's list of reclaimed pointers.
609
610                 The function is called internally by \p scan().
611             */
612             CDS_EXPORT_API void help_scan( thread_data* pThis );
613
614             hp_allocator& get_hp_allocator()
615             {
616                 return hp_allocator_;
617             }
618
619             retired_allocator& get_retired_allocator()
620             {
621                 return retired_allocator_;
622             }
623
624         private:
625             CDS_EXPORT_API explicit smr(
626                 size_t nInitialHazardPtrCount
627             );
628
629             CDS_EXPORT_API ~smr();
630
631             CDS_EXPORT_API void detach_all_thread();
632
633         private:
634             CDS_EXPORT_API thread_record* create_thread_data();
635             static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
636
637             /// Allocates Hazard Pointer SMR thread private data
638             CDS_EXPORT_API thread_record* alloc_thread_data();
639
640             /// Free HP SMR thread-private data
641             CDS_EXPORT_API void free_thread_data( thread_record* pRec );
642
643         private:
644             static CDS_EXPORT_API smr* instance_;
645
646             atomics::atomic< thread_record*>    thread_list_;   ///< Head of thread list
647             size_t const        initial_hazard_count_;  ///< initial number of hazard pointers per thread
648             hp_allocator        hp_allocator_;
649             retired_allocator   retired_allocator_;
650
651             // temporaries
652             std::atomic<size_t> last_plist_size_;   ///< HP array size in last scan() call
653         };
654         //@endcond
655
656         //@cond
657         // for backward compatibility
658         typedef smr GarbageCollector;
659
660
661         // inlines
662         inline hp_allocator& hp_allocator::instance()
663         {
664             return smr::instance().get_hp_allocator();
665         }
666
667         inline retired_allocator& retired_allocator::instance()
668         {
669             return smr::instance().get_retired_allocator();
670         }
671         //@endcond
672
673     } // namespace dhp
674
675
676     /// Dynamic (adaptie) Hazard Pointer SMR
677     /**  @ingroup cds_garbage_collector
678
679         Implementation of Dynamic (adaptive) Hazard Pointer SMR
680
681         Sources:
682             - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
683             - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
684             - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
685
686         %DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation"
687
688         @note: Internally, %DHP depends on free-list implementation. There are
689         DCAS-based free-list \p cds::intrusive::TaggedFreeList and more complicated CAS-based free-list
690         \p cds::intrusive::FreeList. For x86 architecture and GCC/clang, libcds selects appropriate free-list 
691         based on \p -mcx16 compiler flag. You may manually disable DCAS support specifying
692         \p -DCDS_DISABLE_128BIT_ATOMIC for 64bit build or \p -DCDS_DISABLE_64BIT_ATOMIC for 32bit build
693         in compiler command line. All your projects and libcds MUST be compiled with the same flags -
694         either with DCAS support or without it.
695         For MS VC++ compiler DCAS is not supported.
696
697         See \ref cds_how_to_use "How to use" section for details how to apply SMR.
698     */
699     class DHP
700     {
701     public:
702         /// Native guarded pointer type
703         typedef void* guarded_pointer;
704
705         /// Atomic reference
706         template <typename T> using atomic_ref = atomics::atomic<T *>;
707
708         /// Atomic type
709         /**
710             @headerfile cds/gc/dhp.h
711         */
712         template <typename T> using atomic_type = atomics::atomic<T>;
713
714         /// Atomic marked pointer
715         template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
716
717         /// Internal statistics
718         typedef dhp::stat stat;
719
720         /// Dynamic Hazard Pointer guard
721         /**
722             A guard is a hazard pointer.
723             Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
724
725             \p %Guard object is movable but not copyable.
726
727             The guard object can be in two states:
728             - unlinked - the guard is not linked with any internal hazard pointer.
729               In this state no operation except \p link() and move assignment is supported.
730             - linked (default) - the guard allocates an internal hazard pointer and fully operable.
731
732             Due to performance reason the implementation does not check state of the guard in runtime.
733
734             @warning Move assignment can transfer the guard in unlinked state, use with care.
735         */
736         class Guard
737         {
738         public:
739             /// Default ctor allocates a guard (hazard pointer) from thread-private storage
740             Guard() CDS_NOEXCEPT
741                 : guard_( dhp::smr::tls()->hazards_.alloc())
742             {}
743
744             /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
745             explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
746                 : guard_( nullptr )
747             {}
748
749             /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
750             Guard( Guard&& src ) CDS_NOEXCEPT
751                 : guard_( src.guard_ )
752             {
753                 src.guard_ = nullptr;
754             }
755
756             /// Move assignment: the internal guards are swapped between \p src and \p this
757             /**
758                 @warning \p src will become in unlinked state if \p this was unlinked on entry.
759             */
760             Guard& operator=( Guard&& src ) CDS_NOEXCEPT
761             {
762                 std::swap( guard_, src.guard_ );
763                 return *this;
764             }
765
766             /// Copy ctor is prohibited - the guard is not copyable
767             Guard( Guard const& ) = delete;
768
769             /// Copy assignment is prohibited
770             Guard& operator=( Guard const& ) = delete;
771
772             /// Frees the internal hazard pointer if the guard is in linked state
773             ~Guard()
774             {
775                 unlink();
776             }
777
778             /// Checks if the guard object linked with any internal hazard pointer
779             bool is_linked() const
780             {
781                 return guard_ != nullptr;
782             }
783
784             /// Links the guard with internal hazard pointer if the guard is in unlinked state
785             void link()
786             {
787                 if ( !guard_ )
788                     guard_ = dhp::smr::tls()->hazards_.alloc();
789             }
790
791             /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
792             void unlink()
793             {
794                 if ( guard_ ) {
795                     dhp::smr::tls()->hazards_.free( guard_ );
796                     guard_ = nullptr;
797                 }
798             }
799
800             /// Protects a pointer of type <tt> atomic<T*> </tt>
801             /**
802                 Return the value of \p toGuard
803
804                 The function tries to load \p toGuard and to store it
805                 to the HP slot repeatedly until the guard's value equals \p toGuard
806             */
807             template <typename T>
808             T protect( atomics::atomic<T> const& toGuard )
809             {
810                 assert( guard_ != nullptr );
811
812                 T pCur = toGuard.load(atomics::memory_order_acquire);
813                 T pRet;
814                 do {
815                     pRet = assign( pCur );
816                     pCur = toGuard.load(atomics::memory_order_acquire);
817                 } while ( pRet != pCur );
818                 return pCur;
819             }
820
821             /// Protects a converted pointer of type <tt> atomic<T*> </tt>
822             /**
823                 Return the value of \p toGuard
824
825                 The function tries to load \p toGuard and to store result of \p f functor
826                 to the HP slot repeatedly until the guard's value equals \p toGuard.
827
828                 The function is useful for intrusive containers when \p toGuard is a node pointer
829                 that should be converted to a pointer to the value type before guarding.
830                 The parameter \p f of type Func is a functor that makes this conversion:
831                 \code
832                     struct functor {
833                         value_type * operator()( T * p );
834                     };
835                 \endcode
836                 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
837             */
838             template <typename T, class Func>
839             T protect( atomics::atomic<T> const& toGuard, Func f )
840             {
841                 assert( guard_ != nullptr );
842
843                 T pCur = toGuard.load(atomics::memory_order_acquire);
844                 T pRet;
845                 do {
846                     pRet = pCur;
847                     assign( f( pCur ));
848                     pCur = toGuard.load(atomics::memory_order_acquire);
849                 } while ( pRet != pCur );
850                 return pCur;
851             }
852
853             /// Store \p p to the guard
854             /**
855                 The function is just an assignment, no loop is performed.
856                 Can be used for a pointer that cannot be changed concurrently
857                 or for already guarded pointer.
858             */
859             template <typename T>
860             T* assign( T* p )
861             {
862                 assert( guard_ != nullptr );
863
864                 guard_->set( p );
865                 dhp::smr::tls()->sync();
866                 return p;
867             }
868
869             //@cond
870             std::nullptr_t assign( std::nullptr_t )
871             {
872                 assert( guard_ != nullptr );
873
874                 clear();
875                 return nullptr;
876             }
877             //@endcond
878
879             /// Store marked pointer \p p to the guard
880             /**
881                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
882                 Can be used for a marked pointer that cannot be changed concurrently
883                 or for already guarded pointer.
884             */
885             template <typename T, int BITMASK>
886             T* assign( cds::details::marked_ptr<T, BITMASK> p )
887             {
888                 return assign( p.ptr());
889             }
890
891             /// Copy from \p src guard to \p this guard
892             void copy( Guard const& src )
893             {
894                 assign( src.get_native());
895             }
896
897             /// Clears value of the guard
898             void clear()
899             {
900                 assert( guard_ != nullptr );
901
902                 guard_->clear();
903             }
904
905             /// Gets the value currently protected (relaxed read)
906             template <typename T>
907             T * get() const
908             {
909                 assert( guard_ != nullptr );
910                 return guard_->get_as<T>();
911             }
912
913             /// Gets native guarded pointer stored
914             void* get_native() const
915             {
916                 assert( guard_ != nullptr );
917                 return guard_->get();
918             }
919
920             //@cond
921             dhp::guard* release()
922             {
923                 dhp::guard* g = guard_;
924                 guard_ = nullptr;
925                 return g;
926             }
927
928             dhp::guard*& guard_ref()
929             {
930                 return guard_;
931             }
932             //@endcond
933
934         private:
935             //@cond
936             dhp::guard* guard_;
937             //@endcond
938         };
939
940         /// Array of Dynamic Hazard Pointer guards
941         /**
942             The class is intended for allocating an array of hazard pointer guards.
943             Template parameter \p Count defines the size of the array.
944
945             A \p %GuardArray object is not copy- and move-constructible
946             and not copy- and move-assignable.
947         */
948         template <size_t Count>
949         class GuardArray
950         {
951         public:
952             /// Rebind array for other size \p OtherCount
953             template <size_t OtherCount>
954             struct rebind {
955                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
956             };
957
958             /// Array capacity
959             static CDS_CONSTEXPR const size_t c_nCapacity = Count;
960
961         public:
962             /// Default ctor allocates \p Count hazard pointers
963             GuardArray()
964             {
965                 dhp::smr::tls()->hazards_.alloc( guards_ );
966             }
967
968             /// Move ctor is prohibited
969             GuardArray( GuardArray&& ) = delete;
970
971             /// Move assignment is prohibited
972             GuardArray& operator=( GuardArray&& ) = delete;
973
974             /// Copy ctor is prohibited
975             GuardArray( GuardArray const& ) = delete;
976
977             /// Copy assignment is prohibited
978             GuardArray& operator=( GuardArray const& ) = delete;
979
980             /// Frees allocated hazard pointers
981             ~GuardArray()
982             {
983                 dhp::smr::tls()->hazards_.free( guards_ );
984             }
985
986             /// Protects a pointer of type \p atomic<T*>
987             /**
988                 Return the value of \p toGuard
989
990                 The function tries to load \p toGuard and to store it
991                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
992             */
993             template <typename T>
994             T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
995             {
996                 assert( nIndex < capacity());
997
998                 T pRet;
999                 do {
1000                     pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
1001                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
1002
1003                 return pRet;
1004             }
1005
1006             /// Protects a pointer of type \p atomic<T*>
1007             /**
1008                 Return the value of \p toGuard
1009
1010                 The function tries to load \p toGuard and to store it
1011                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
1012
1013                 The function is useful for intrusive containers when \p toGuard is a node pointer
1014                 that should be converted to a pointer to the value type before guarding.
1015                 The parameter \p f of type Func is a functor to make that conversion:
1016                 \code
1017                     struct functor {
1018                         value_type * operator()( T * p );
1019                     };
1020                 \endcode
1021                 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
1022             */
1023             template <typename T, class Func>
1024             T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
1025             {
1026                 assert( nIndex < capacity());
1027
1028                 T pRet;
1029                 do {
1030                     assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
1031                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
1032
1033                 return pRet;
1034             }
1035
1036             /// Store \p p to the slot \p nIndex
1037             /**
1038                 The function is just an assignment, no loop is performed.
1039             */
1040             template <typename T>
1041             T * assign( size_t nIndex, T * p )
1042             {
1043                 assert( nIndex < capacity());
1044
1045                 guards_.set( nIndex, p );
1046                 dhp::smr::tls()->sync();
1047                 return p;
1048             }
1049
1050             /// Store marked pointer \p p to the guard
1051             /**
1052                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
1053                 Can be used for a marked pointer that cannot be changed concurrently
1054                 or for already guarded pointer.
1055             */
1056             template <typename T, int Bitmask>
1057             T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
1058             {
1059                 return assign( nIndex, p.ptr());
1060             }
1061
1062             /// Copy guarded value from \p src guard to slot at index \p nIndex
1063             void copy( size_t nIndex, Guard const& src )
1064             {
1065                 assign( nIndex, src.get_native());
1066             }
1067
1068             /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
1069             void copy( size_t nDestIndex, size_t nSrcIndex )
1070             {
1071                 assign( nDestIndex, get_native( nSrcIndex ));
1072             }
1073
1074             /// Clear value of the slot \p nIndex
1075             void clear( size_t nIndex )
1076             {
1077                 guards_.clear( nIndex );
1078             }
1079
1080             /// Get current value of slot \p nIndex
1081             template <typename T>
1082             T * get( size_t nIndex ) const
1083             {
1084                 assert( nIndex < capacity());
1085                 return guards_[nIndex]->template get_as<T>();
1086             }
1087
1088             /// Get native guarded pointer stored
1089             guarded_pointer get_native( size_t nIndex ) const
1090             {
1091                 assert( nIndex < capacity());
1092                 return guards_[nIndex]->get();
1093             }
1094
1095             //@cond
1096             dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
1097             {
1098                 return guards_.release( nIndex );
1099             }
1100             //@endcond
1101
1102             /// Capacity of the guard array
1103             static CDS_CONSTEXPR size_t capacity()
1104             {
1105                 return Count;
1106             }
1107
1108         private:
1109             //@cond
1110             dhp::guard_array<c_nCapacity> guards_;
1111             //@endcond
1112         };
1113
1114         /// Guarded pointer
1115         /**
1116             A guarded pointer is a pair of a pointer and GC's guard.
1117             Usually, it is used for returning a pointer to the item from an lock-free container.
1118             The guard prevents the pointer to be early disposed (freed) by GC.
1119             After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1120
1121             Template arguments:
1122             - \p GuardedType - a type which the guard stores
1123             - \p ValueType - a value type
1124             - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1125
1126             For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1127             In such case the \p %guarded_ptr is:
1128             @code
1129             typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1130             @endcode
1131
1132             For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1133             For example:
1134             @code
1135             struct foo {
1136                 int const   key;
1137                 std::string value;
1138             };
1139
1140             struct value_accessor {
1141                 std::string* operator()( foo* pFoo ) const
1142                 {
1143                     return &(pFoo->value);
1144                 }
1145             };
1146
1147             // Guarded ptr
1148             typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1149             @endcode
1150
1151             You don't need use this class directly.
1152             All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1153         */
1154         template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1155         class guarded_ptr
1156         {
1157             //@cond
1158             struct trivial_cast {
1159                 ValueType * operator()( GuardedType * p ) const
1160                 {
1161                     return p;
1162                 }
1163             };
1164
1165             template <typename GT, typename VT, typename C> friend class guarded_ptr;
1166             //@endcond
1167
1168         public:
1169             typedef GuardedType guarded_type; ///< Guarded type
1170             typedef ValueType   value_type;   ///< Value type
1171
1172             /// Functor for casting \p guarded_type to \p value_type
1173             typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1174
1175         public:
1176             /// Creates empty guarded pointer
1177             guarded_ptr() CDS_NOEXCEPT
1178                 : guard_( nullptr )
1179             {}
1180
1181             //@cond
1182             explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1183                 : guard_( g )
1184             {}
1185
1186             /// Initializes guarded pointer with \p p
1187             explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1188                 : guard_( nullptr )
1189             {
1190                 reset( p );
1191             }
1192             explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1193                 : guard_( nullptr )
1194             {}
1195             //@endcond
1196
1197             /// Move ctor
1198             guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1199                 : guard_( gp.guard_ )
1200             {
1201                 gp.guard_ = nullptr;
1202             }
1203
1204             /// Move ctor
1205             template <typename GT, typename VT, typename C>
1206             guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1207                 : guard_( gp.guard_ )
1208             {
1209                 gp.guard_ = nullptr;
1210             }
1211
1212             /// Ctor from \p Guard
1213             explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1214                 : guard_( g.release())
1215             {}
1216
1217             /// The guarded pointer is not copy-constructible
1218             guarded_ptr( guarded_ptr const& gp ) = delete;
1219
1220             /// Clears the guarded pointer
1221             /**
1222                 \ref release is called if guarded pointer is not \ref empty
1223             */
1224             ~guarded_ptr() CDS_NOEXCEPT
1225             {
1226                 release();
1227             }
1228
1229             /// Move-assignment operator
1230             guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1231             {
1232                 std::swap( guard_, gp.guard_ );
1233                 return *this;
1234             }
1235
1236             /// Move-assignment from \p Guard
1237             guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1238             {
1239                 std::swap( guard_, g.guard_ref());
1240                 return *this;
1241             }
1242
1243             /// The guarded pointer is not copy-assignable
1244             guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1245
1246             /// Returns a pointer to guarded value
1247             value_type * operator ->() const CDS_NOEXCEPT
1248             {
1249                 assert( !empty());
1250                 return value_cast()( guard_->get_as<guarded_type>());
1251             }
1252
1253             /// Returns a reference to guarded value
1254             value_type& operator *() CDS_NOEXCEPT
1255             {
1256                 assert( !empty());
1257                 return *value_cast()( guard_->get_as<guarded_type>());
1258             }
1259
1260             /// Returns const reference to guarded value
1261             value_type const& operator *() const CDS_NOEXCEPT
1262             {
1263                 assert( !empty());
1264                 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1265             }
1266
1267             /// Checks if the guarded pointer is \p nullptr
1268             bool empty() const CDS_NOEXCEPT
1269             {
1270                 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1271             }
1272
1273             /// \p bool operator returns <tt>!empty()</tt>
1274             explicit operator bool() const CDS_NOEXCEPT
1275             {
1276                 return !empty();
1277             }
1278
1279             /// Clears guarded pointer
1280             /**
1281                 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1282                 Dereferncing the guarded pointer after \p release() is dangerous.
1283             */
1284             void release() CDS_NOEXCEPT
1285             {
1286                 free_guard();
1287             }
1288
1289             //@cond
1290             // For internal use only!!!
1291             void reset(guarded_type * p) CDS_NOEXCEPT
1292             {
1293                 alloc_guard();
1294                 assert( guard_ );
1295                 guard_->set( p );
1296             }
1297
1298             //@endcond
1299
1300         private:
1301             //@cond
1302             void alloc_guard()
1303             {
1304                 if ( !guard_ )
1305                     guard_ = dhp::smr::tls()->hazards_.alloc();
1306             }
1307
1308             void free_guard()
1309             {
1310                 if ( guard_ ) {
1311                     dhp::smr::tls()->hazards_.free( guard_ );
1312                     guard_ = nullptr;
1313                 }
1314             }
1315             //@endcond
1316
1317         private:
1318             //@cond
1319             dhp::guard* guard_;
1320             //@endcond
1321         };
1322
1323     public:
1324         /// Initializes %DHP memory manager singleton
1325         /**
1326             Constructor creates and initializes %DHP global object.
1327             %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1328             it is created in the beginning of \p main() function.
1329             After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1330
1331             \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1332                 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1333                 By perforce the local thread's guard pool is grown automatically from common pool.
1334                 When the thread terminated its guard pool is backed to common GC's pool.
1335         */
1336         explicit DHP(
1337             size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
1338         )
1339         {
1340             dhp::smr::construct( nInitialHazardPtrCount );
1341         }
1342
1343         /// Destroys %DHP memory manager
1344         /**
1345             The destructor destroys %DHP global object. After calling of this function you may \b NOT
1346             use CDS data structures based on \p %cds::gc::DHP.
1347             Usually, %DHP object is destroyed at the end of your \p main().
1348         */
1349         ~DHP()
1350         {
1351             dhp::GarbageCollector::destruct( true );
1352         }
1353
1354         /// Checks if count of hazard pointer is no less than \p nCountNeeded
1355         /**
1356             The function always returns \p true since the guard count is unlimited for
1357             \p %gc::DHP garbage collector.
1358         */
1359         static CDS_CONSTEXPR bool check_available_guards(
1360 #ifdef CDS_DOXYGEN_INVOKED
1361             size_t nCountNeeded,
1362 #else
1363             size_t
1364 #endif
1365         )
1366         {
1367             return true;
1368         }
1369
1370         /// Set memory management functions
1371         /**
1372             @note This function may be called <b>BEFORE</b> creating an instance
1373             of Dynamic Hazard Pointer SMR
1374
1375             SMR object allocates some memory for thread-specific data and for creating SMR object.
1376             By default, a standard \p new and \p delete operators are used for this.
1377         */
1378         static void set_memory_allocator(
1379             void* ( *alloc_func )( size_t size ),   ///< \p malloc() function
1380             void( *free_func )( void * p )          ///< \p free() function
1381         )
1382         {
1383             dhp::smr::set_memory_allocator( alloc_func, free_func );
1384         }
1385
1386         /// Retire pointer \p p with function \p pFunc
1387         /**
1388             The function places pointer \p p to array of pointers ready for removing.
1389             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1390             \p func is a disposer: when \p p can be safely removed, \p func is called.
1391         */
1392         template <typename T>
1393         static void retire( T * p, void (* func)(void *))
1394         {
1395             dhp::thread_data* rec = dhp::smr::tls();
1396             if ( !rec->retired_.push( dhp::retired_ptr( p, func )) )
1397                 dhp::smr::instance().scan( rec );
1398         }
1399
1400         /// Retire pointer \p p with functor of type \p Disposer
1401         /**
1402             The function places pointer \p p to array of pointers ready for removing.
1403             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1404
1405             Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1406             \code
1407             template <typename T>
1408             struct disposer {
1409                 void operator()( T * p )    ;   // disposing operator
1410             };
1411             \endcode
1412             Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1413             - it should be stateless functor
1414             - it should be default-constructible
1415             - the result of functor call with argument \p p should not depend on where the functor will be called.
1416
1417             \par Examples:
1418             Operator \p delete functor:
1419             \code
1420             template <typename T>
1421             struct disposer {
1422                 void operator ()( T * p ) {
1423                     delete p;
1424                 }
1425             };
1426
1427             // How to call HP::retire method
1428             int * p = new int;
1429
1430             // ... use p in lock-free manner
1431
1432             cds::gc::DHP::retire<disposer>( p ) ;   // place p to retired pointer array of DHP SMR
1433             \endcode
1434
1435             Functor based on \p std::allocator :
1436             \code
1437             template <typename Alloc = std::allocator<int> >
1438             struct disposer {
1439                 template <typename T>
1440                 void operator()( T * p ) {
1441                     typedef typename Alloc::templare rebind<T>::other alloc_t;
1442                     alloc_t a;
1443                     a.destroy( p );
1444                     a.deallocate( p, 1 );
1445                 }
1446             };
1447             \endcode
1448         */
1449         template <class Disposer, typename T>
1450         static void retire( T* p )
1451         {
1452             if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1453                 scan();
1454         }
1455
1456         /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1457         static bool isUsed()
1458         {
1459             return dhp::smr::isUsed();
1460         }
1461
1462         /// Forced GC cycle call for current thread
1463         /**
1464             Usually, this function should not be called directly.
1465         */
1466         static void scan()
1467         {
1468             dhp::smr::instance().scan( dhp::smr::tls());
1469         }
1470
1471         /// Synonym for \p scan()
1472         static void force_dispose()
1473         {
1474             scan();
1475         }
1476
1477         /// Returns internal statistics
1478         /**
1479             The function clears \p st before gathering statistics.
1480
1481             @note Internal statistics is available only if you compile
1482             \p libcds and your program with \p -DCDS_ENABLE_HPSTAT.
1483         */
1484         static void statistics( stat& st )
1485         {
1486             dhp::smr::instance().statistics( st );
1487         }
1488
1489         /// Returns post-mortem statistics
1490         /**
1491             Post-mortem statistics is gathered in the \p %DHP object destructor
1492             and can be accessible after destructing the global \p %DHP object.
1493
1494             @note Internal statistics is available only if you compile
1495             \p libcds and your program with \p -DCDS_ENABLE_HPSTAT.
1496
1497             Usage:
1498             \code
1499             int main()
1500             {
1501                 cds::Initialize();
1502                 {
1503                     // Initialize DHP SMR
1504                     cds::gc::DHP dhp;
1505
1506                     // deal with DHP-based data structured
1507                     // ...
1508                 }
1509
1510                 // DHP object destroyed
1511                 // Get total post-mortem statistics
1512                 cds::gc::DHP::stat const& st = cds::gc::DHP::postmortem_statistics();
1513
1514                 printf( "DHP statistics:\n"
1515                     "  thread count           = %llu\n"
1516                     "  guard allocated        = %llu\n"
1517                     "  guard freed            = %llu\n"
1518                     "  retired data count     = %llu\n"
1519                     "  free data count        = %llu\n"
1520                     "  scan() call count      = %llu\n"
1521                     "  help_scan() call count = %llu\n",
1522                     st.thread_rec_count,
1523                     st.guard_allocated, st.guard_freed,
1524                     st.retired_count, st.free_count,
1525                     st.scan_count, st.help_scan_count
1526                 );
1527
1528                 cds::Terminate();
1529             }
1530             \endcode
1531         */
1532         CDS_EXPORT_API static stat const& postmortem_statistics();
1533     };
1534
1535 }} // namespace cds::gc
1536
1537 #endif // #ifndef CDSLIB_GC_DHP_SMR_H
1538
1539