HP and DHP SMR totally refactored
[libcds.git] / cds / gc / dhp_smr.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
33
34 #include <exception>
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/intrusive/free_list_selector.h>
37 #include <cds/details/throw_exception.h>
38 #include <cds/details/static_functor.h>
39 #include <cds/details/marked_ptr.h>
40 #include <cds/user_setup/cache_line.h>
41
42 namespace cds { namespace gc {
43     namespace dhp {
44         using namespace cds::gc::hp::common;
45
46         /// Exception "Dynamic Hazard Pointer SMR is not initialized"
47         class not_initialized: public std::runtime_error
48         {
49         public:
50             not_initialized()
51                 : std::runtime_error( "Global DHP SMR object is not initialized" )
52             {}
53         };
54
55         struct guard_block: public cds::intrusive::FreeListImpl::node
56         {
57             guard_block*    next_;  // next block in the thread list
58
59             guard_block()
60                 : next_( nullptr )
61             {}
62
63             guard* first()
64             {
65                 return reinterpret_cast<guard*>( this + 1 );
66             }
67         };
68
69         /// \p guard_block allocator (global object)
70         class hp_allocator
71         {
72             friend class smr;
73         public:
74             static hp_allocator& instance();
75
76             CDS_EXPORT_API guard_block*    alloc();
77             void            free( guard_block* block )
78             {
79                 free_list_.put( block );
80             }
81
82         private:
83             hp_allocator()
84             {}
85             CDS_EXPORT_API ~hp_allocator();
86
87         private:
88             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
89         };
90
91         /// Per-thread hazard pointer storage
92         class thread_hp_storage 
93         {
94             friend class smr;
95         public:
96             thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
97                 : free_head_( arr )
98                 , extended_list_( nullptr )
99                 , array_( arr )
100                 , initial_capacity_( nSize )
101             {}
102
103             thread_hp_storage() = delete;
104             thread_hp_storage( thread_hp_storage const& ) = delete;
105             thread_hp_storage( thread_hp_storage&& ) = delete;
106
107             ~thread_hp_storage()
108             {
109                 clear();
110             }
111
112             guard* alloc()
113             {
114                 if ( cds_unlikely( free_head_ == nullptr )) {
115                     extend();
116                     assert( free_head_ != nullptr );
117                 }
118
119                 guard* g = free_head_;
120                 free_head_ = g->next_;
121                 return g;
122             }
123
124             void free( guard* g ) CDS_NOEXCEPT
125             {
126                 if ( g ) {
127                     g->clear();
128                     g->next_ = free_head_;
129                     free_head_ = g;
130                 }
131             }
132
133             template< size_t Capacity>
134             size_t alloc( guard_array<Capacity>& arr )
135             {
136                 for ( size_t i = 0; i < Capacity; ++i ) {
137                     if ( cds_unlikely( free_head_ == nullptr ))
138                         extend();
139                     arr.reset( i, free_head_ );
140                     free_head_ = free_head_->next_;
141                 }
142                 return Capacity;
143             }
144
145             template <size_t Capacity>
146             void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
147             {
148                 guard* gList = free_head_;
149                 for ( size_t i = 0; i < Capacity; ++i ) {
150                     guard* g = arr[i];
151                     if ( g ) {
152                         g->clear();
153                         g->next_ = gList;
154                         gList = g;
155                     }
156                 }
157                 free_head_ = gList;
158             }
159
160             void clear()
161             {
162                 // clear array_
163                 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
164                     cur->clear();
165
166                 // free all extended blocks
167                 hp_allocator& alloc = hp_allocator::instance();
168                 for ( guard_block* p = extended_list_; p; ) {
169                     guard_block* next = p->next_;
170                     alloc.free( p );
171                     p = next;
172                 }
173
174                 extended_list_ = nullptr;
175             }
176
177             void init()
178             {
179                 assert( extended_list_ == nullptr );
180
181                 guard* p = array_;
182                 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
183                     p->next_ = p + 1;
184                 p->next_ = nullptr;
185                 free_head_ = array_;
186             }
187
188         private:
189             void extend()
190             {
191                 assert( free_head_ == nullptr );
192
193                 guard_block* block = hp_allocator::instance().alloc();
194                 block->next_ = extended_list_;
195                 extended_list_ = block;
196                 free_head_ = block->first();
197             }
198
199         private:
200             guard*          free_head_;        ///< Head of free guard list
201             guard_block*    extended_list_;    ///< Head of extended guard blocks allocated for the thread
202             guard* const    array_;            ///< initial HP array
203             size_t const    initial_capacity_; ///< Capacity of \p array_
204         };
205
206         struct retired_block: public cds::intrusive::FreeListImpl::node
207         {
208             retired_block*  next_;  ///< Next block in thread-private retired array
209
210             static size_t const c_capacity = 256;
211
212             retired_block()
213                 : next_( nullptr )
214             {}
215
216             retired_ptr*    first()
217             {
218                 return reinterpret_cast<retired_ptr*>( this + 1 );
219             }
220
221             retired_ptr*    last()
222             {
223                 return first() + c_capacity;
224             }
225         };
226
227         class retired_allocator
228         {
229             friend class smr;
230         public:
231             static retired_allocator& instance();
232
233             CDS_EXPORT_API retired_block* alloc();
234             void free( retired_block* block )
235             {
236                 block->next_ = nullptr;
237                 free_list_.put( block );
238             }
239
240         private:
241             retired_allocator()
242             {}
243             CDS_EXPORT_API ~retired_allocator();
244
245         private:
246             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
247         };
248
249         /// Per-thread retired array
250         class retired_array
251         {
252             friend class smr;
253         public:
254             retired_array() CDS_NOEXCEPT
255                 : current_block_( nullptr )
256                 , current_cell_( nullptr )
257                 , list_head_( nullptr )
258                 , list_tail_( nullptr )
259                 , block_count_(0)
260             {}
261
262             retired_array( retired_array const& ) = delete;
263             retired_array( retired_array&& ) = delete;
264
265             ~retired_array()
266             {
267                 assert( empty());
268                 fini();
269             }
270
271             bool push( retired_ptr const& p ) CDS_NOEXCEPT
272             {
273                 assert( current_block_ != nullptr );
274                 assert( current_block_->first() <= current_cell_ );
275                 assert( current_cell_ < current_block_->last() );
276                 //assert( &p != current_cell_ );
277
278                 *current_cell_ = p;
279                 if ( ++current_cell_ == current_block_->last() ) {
280                     // goto next block if exists
281                     if ( current_block_->next_ ) {
282                         current_block_ = current_block_->next_;
283                         current_cell_ = current_block_->first();
284                         return true;
285                     }
286
287                     // no free block
288                     // smr::scan() extend retired_array if needed
289                     return false;
290                 }
291
292                 return true;
293             }
294
295             bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
296             {                
297                 bool ret = push( *p );
298                 assert( ret );
299                 return ret;
300             }
301
302         private: // called by smr
303             void init()
304             {
305                 if ( list_head_ == nullptr ) {
306                     retired_block* block = retired_allocator::instance().alloc();
307                     assert( block->next_ == nullptr );
308
309                     current_block_ =
310                         list_head_ =
311                         list_tail_ = block;
312                     current_cell_ = block->first();
313
314                     block_count_ = 1;
315                 }
316             }
317
318             void fini()
319             {
320                 retired_allocator& alloc = retired_allocator::instance();
321                 for ( retired_block* p = list_head_; p; ) {
322                     retired_block* next = p->next_;
323                     alloc.free( p );
324                     p = next;
325                 }
326
327                 current_block_ =
328                     list_head_ =
329                     list_tail_ = nullptr;
330                 current_cell_ = nullptr;
331
332                 block_count_ = 0;
333             }
334
335             void extend()
336             {
337                 assert( list_head_ != nullptr );
338                 assert( current_block_ == list_tail_ );
339                 assert( current_cell_ == current_block_->last() );
340
341                 retired_block* block = retired_allocator::instance().alloc();
342                 assert( block->next_ == nullptr );
343
344                 list_tail_ = list_tail_->next_ = block;
345                 current_cell_ = block->first();
346                 ++block_count_;
347             }
348
349             bool empty() const
350             {
351                 return current_block_ == nullptr
352                     || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
353             }
354
355         private:
356             retired_block*          current_block_;
357             retired_ptr*            current_cell_;  // in current_block_
358
359             retired_block*          list_head_;
360             retired_block*          list_tail_;
361             size_t                  block_count_;
362         };
363
364         /// Per-thread data
365         struct thread_data {
366             thread_hp_storage   hazards_;   ///< Hazard pointers private to the thread
367             retired_array       retired_;   ///< Retired data private to the thread
368
369             char pad1_[cds::c_nCacheLineSize];
370             atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
371             char pad2_[cds::c_nCacheLineSize];
372
373             thread_data( guard* guards, size_t guard_count )
374                 : hazards_( guards, guard_count )
375                 , sync_( 0 )
376             {}
377
378             thread_data() = delete;
379             thread_data( thread_data const& ) = delete;
380             thread_data( thread_data&& ) = delete;
381
382             void sync()
383             {
384                 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
385             }
386         };
387
388         // Hazard Pointer SMR (Safe Memory Reclamation)
389         class smr
390         {
391             struct thread_record;
392
393         public:
394             /// Returns the instance of Hazard Pointer \ref smr
395             static smr& instance()
396             {
397 #       ifdef CDS_DISABLE_SMR_EXCEPTION
398                 assert( instance_ != nullptr );
399 #       else
400                 if ( !instance_ )
401                     CDS_THROW_EXCEPTION( not_initialized() );
402 #       endif
403                 return *instance_;
404             }
405
406             /// Creates Dynamic Hazard Pointer SMR singleton
407             /**
408                 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
409                 Otherwise it does nothing.
410
411                 The Michael's HP reclamation schema depends of three parameters:
412                 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
413                 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
414                 the function uses maximum of HP count for CDS library
415                 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
416                 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
417                 <tt> nHazardPtrCount * nMaxThreadCount </tt>
418                 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
419             */
420             static CDS_EXPORT_API void construct(
421                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
422             );
423
424             //@cond
425             // for back-copatibility
426             static void Construct(
427                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
428             )
429             {
430                 construct( nInitialHazardPtrCount );
431             }
432             //@endcond
433
434             /// Destroys global instance of \ref smr
435             /**
436                 The parameter \p bDetachAll should be used carefully: if its value is \p true,
437                 then the object destroyed automatically detaches all attached threads. This feature
438                 can be useful when you have no control over the thread termination, for example,
439                 when \p libcds is injected into existing external thread.
440             */
441             static CDS_EXPORT_API void destruct(
442                 bool bDetachAll = false     ///< Detach all threads
443             );
444
445             //@cond
446             // for back-copatibility
447             static void Destruct(
448                 bool bDetachAll = false     ///< Detach all threads
449             )
450             {
451                 destruct( bDetachAll );
452             }
453             //@endcond
454
455             /// Checks if global SMR object is constructed and may be used
456             static bool isUsed() CDS_NOEXCEPT
457             {
458                 return instance_ != nullptr;
459             }
460
461             /// Set memory management functions
462             /**
463                 @note This function may be called <b>BEFORE</b> creating an instance
464                 of Dynamic Hazard Pointer SMR
465
466                 SMR object allocates some memory for thread-specific data and for
467                 creating SMR object.
468                 By default, a standard \p new and \p delete operators are used for this.
469             */
470             static CDS_EXPORT_API void set_memory_allocator(
471                 void* ( *alloc_func )( size_t size ),
472                 void( *free_func )( void * p )
473             );
474
475             /// Returns thread-local data for the current thread
476             static CDS_EXPORT_API thread_data* tls();
477
478             static CDS_EXPORT_API void attach_thread();
479             static CDS_EXPORT_API void detach_thread();
480
481         public: // for internal use only
482             /// The main garbage collecting function
483             CDS_EXPORT_API void scan( thread_data* pRec );
484
485             /// Helper scan routine
486             /**
487                 The function guarantees that every node that is eligible for reuse is eventually freed, barring
488                 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
489                 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
490                 to thread's list of reclaimed pointers.
491
492                 The function is called internally by \p scan().
493             */
494             CDS_EXPORT_API void help_scan( thread_data* pThis );
495
496             hp_allocator& get_hp_allocator()
497             {
498                 return hp_allocator_;
499             }
500
501             retired_allocator& get_retired_allocator()
502             {
503                 return retired_allocator_;
504             }
505
506         private:
507             CDS_EXPORT_API smr(
508                 size_t nInitialHazardPtrCount
509             );
510
511             CDS_EXPORT_API ~smr();
512
513             CDS_EXPORT_API void detach_all_thread();
514
515         private:
516             //@cond
517             CDS_EXPORT_API thread_record* create_thread_data();
518             CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
519
520             /// Allocates Hazard Pointer SMR thread private data
521             CDS_EXPORT_API thread_record* alloc_thread_data();
522
523             /// Free HP SMR thread-private data
524             CDS_EXPORT_API void free_thread_data( thread_record* pRec );
525             //@endcond
526
527         private:
528             static CDS_EXPORT_API smr* instance_;
529
530             atomics::atomic< thread_record*>    thread_list_;   ///< Head of thread list
531             size_t const        initial_hazard_count_;  ///< initial number of hazard pointers per thread
532             hp_allocator        hp_allocator_;
533             retired_allocator   retired_allocator_;
534
535             // temporaries
536             std::atomic<size_t> last_plist_size_;   ///< HP array size in last scan() call
537         };
538
539         // for backward compatibility
540         typedef smr GarbageCollector;
541
542
543         // inlines
544         inline hp_allocator& hp_allocator::instance()
545         {
546             return smr::instance().get_hp_allocator();
547         }
548
549         inline retired_allocator& retired_allocator::instance()
550         {
551             return smr::instance().get_retired_allocator();
552         }
553
554     } // namespace dhp
555
556
557         /// Dynamic Hazard Pointer garbage collector
558     /**  @ingroup cds_garbage_collector
559         @headerfile cds/gc/dhp.h
560
561         Implementation of Dynamic Hazard Pointer garbage collector.
562
563         Sources:
564             - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
565             - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
566             - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
567
568         Dynamic Hazard Pointers SMR (safe memory reclamation) provides an unbounded number of hazard pointer per thread
569         despite of classic Hazard Pointer SMR in which the count of the hazard pointef per thread is limited.
570
571         See \ref cds_how_to_use "How to use" section for details how to apply garbage collector.
572     */
573     class DHP
574     {
575     public:
576         /// Native guarded pointer type
577         typedef void* guarded_pointer;
578
579         /// Atomic reference
580         template <typename T> using atomic_ref = atomics::atomic<T *>;
581
582         /// Atomic type
583         /**
584             @headerfile cds/gc/dhp.h
585         */
586         template <typename T> using atomic_type = atomics::atomic<T>;
587
588         /// Atomic marked pointer
589         template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
590
591
592         /// Dynamic Hazard Pointer guard
593         /**
594             A guard is a hazard pointer.
595             Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
596
597             \p %Guard object is movable but not copyable.
598
599             The guard object can be in two states:
600             - unlinked - the guard is not linked with any internal hazard pointer.
601               In this state no operation except \p link() and move assignment is supported.
602             - linked (default) - the guard allocates an internal hazard pointer and fully operable.
603
604             Due to performance reason the implementation does not check state of the guard in runtime.
605
606             @warning Move assignment can transfer the guard in unlinked state, use with care.
607         */
608         class Guard
609         {
610         public:
611             /// Default ctor allocates a guard (hazard pointer) from thread-private storage
612             Guard() CDS_NOEXCEPT
613                 : guard_( dhp::smr::tls()->hazards_.alloc() )
614             {}
615
616             /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
617             explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
618                 : guard_( nullptr )
619             {}
620
621             /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
622             Guard( Guard&& src ) CDS_NOEXCEPT
623                 : guard_( src.guard_ )
624             {
625                 src.guard_ = nullptr;
626             }
627
628             /// Move assignment: the internal guards are swapped between \p src and \p this
629             /**
630                 @warning \p src will become in unlinked state if \p this was unlinked on entry.
631             */
632             Guard& operator=( Guard&& src ) CDS_NOEXCEPT
633             {
634                 std::swap( guard_, src.guard_ );
635                 return *this;
636             }
637
638             /// Copy ctor is prohibited - the guard is not copyable
639             Guard( Guard const& ) = delete;
640
641             /// Copy assignment is prohibited
642             Guard& operator=( Guard const& ) = delete;
643
644             /// Frees the internal hazard pointer if the guard is in linked state
645             ~Guard()
646             {
647                 unlink();
648             }
649
650             /// Checks if the guard object linked with any internal hazard pointer
651             bool is_linked() const
652             {
653                 return guard_ != nullptr;
654             }
655
656             /// Links the guard with internal hazard pointer if the guard is in unlinked state
657             void link()
658             {
659                 if ( !guard_ )
660                     guard_ = dhp::smr::tls()->hazards_.alloc();
661             }
662
663             /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
664             void unlink()
665             {
666                 if ( guard_ ) {
667                     dhp::smr::tls()->hazards_.free( guard_ );
668                     guard_ = nullptr;
669                 }
670             }
671
672             /// Protects a pointer of type <tt> atomic<T*> </tt>
673             /**
674                 Return the value of \p toGuard
675
676                 The function tries to load \p toGuard and to store it
677                 to the HP slot repeatedly until the guard's value equals \p toGuard
678             */
679             template <typename T>
680             T protect( atomics::atomic<T> const& toGuard )
681             {
682                 assert( guard_ != nullptr );
683
684                 T pCur = toGuard.load(atomics::memory_order_acquire);
685                 T pRet;
686                 do {
687                     pRet = assign( pCur );
688                     pCur = toGuard.load(atomics::memory_order_acquire);
689                 } while ( pRet != pCur );
690                 return pCur;
691             }
692
693             /// Protects a converted pointer of type <tt> atomic<T*> </tt>
694             /**
695                 Return the value of \p toGuard
696
697                 The function tries to load \p toGuard and to store result of \p f functor
698                 to the HP slot repeatedly until the guard's value equals \p toGuard.
699
700                 The function is useful for intrusive containers when \p toGuard is a node pointer
701                 that should be converted to a pointer to the value type before guarding.
702                 The parameter \p f of type Func is a functor that makes this conversion:
703                 \code
704                     struct functor {
705                         value_type * operator()( T * p );
706                     };
707                 \endcode
708                 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
709             */
710             template <typename T, class Func>
711             T protect( atomics::atomic<T> const& toGuard, Func f )
712             {
713                 assert( guard_ != nullptr );
714
715                 T pCur = toGuard.load(atomics::memory_order_acquire);
716                 T pRet;
717                 do {
718                     pRet = pCur;
719                     assign( f( pCur ));
720                     pCur = toGuard.load(atomics::memory_order_acquire);
721                 } while ( pRet != pCur );
722                 return pCur;
723             }
724
725             /// Store \p p to the guard
726             /**
727                 The function is just an assignment, no loop is performed.
728                 Can be used for a pointer that cannot be changed concurrently
729                 or for already guarded pointer.
730             */
731             template <typename T>
732             T* assign( T* p )
733             {
734                 assert( guard_ != nullptr );
735
736                 guard_->set( p );
737                 dhp::smr::tls()->sync();
738                 return p;
739             }
740
741             //@cond
742             std::nullptr_t assign( std::nullptr_t )
743             {
744                 assert( guard_ != nullptr );
745
746                 clear();
747                 return nullptr;
748             }
749             //@endcond
750
751             /// Store marked pointer \p p to the guard
752             /**
753                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
754                 Can be used for a marked pointer that cannot be changed concurrently
755                 or for already guarded pointer.
756             */
757             template <typename T, int BITMASK>
758             T* assign( cds::details::marked_ptr<T, BITMASK> p )
759             {
760                 return assign( p.ptr());
761             }
762
763             /// Copy from \p src guard to \p this guard
764             void copy( Guard const& src )
765             {
766                 assign( src.get_native());
767             }
768
769             /// Clears value of the guard
770             void clear()
771             {
772                 assert( guard_ != nullptr );
773
774                 guard_->clear();
775             }
776
777             /// Gets the value currently protected (relaxed read)
778             template <typename T>
779             T * get() const
780             {
781                 assert( guard_ != nullptr );
782                 return guard_->get_as<T>();
783             }
784
785             /// Gets native guarded pointer stored
786             void* get_native() const
787             {
788                 assert( guard_ != nullptr );
789                 return guard_->get();
790             }
791
792             //@cond
793             dhp::guard* release()
794             {
795                 dhp::guard* g = guard_;
796                 guard_ = nullptr;
797                 return g;
798             }
799
800             dhp::guard*& guard_ref()
801             {
802                 return guard_;
803             }
804             //@endcond
805
806         private:
807             //@cond
808             dhp::guard* guard_;
809             //@endcond
810         };
811
812         /// Array of Dynamic Hazard Pointer guards
813         /**
814             The class is intended for allocating an array of hazard pointer guards.
815             Template parameter \p Count defines the size of the array.
816
817             A \p %GuardArray object is not copy- and move-constructible
818             and not copy- and move-assignable.
819         */
820         template <size_t Count>
821         class GuardArray
822         {
823         public:
824             /// Rebind array for other size \p OtherCount
825             template <size_t OtherCount>
826             struct rebind {
827                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
828             };
829
830             /// Array capacity
831             static CDS_CONSTEXPR const size_t c_nCapacity = Count;
832
833         public:
834             /// Default ctor allocates \p Count hazard pointers
835             GuardArray()
836             {
837                 dhp::smr::tls()->hazards_.alloc( guards_ );
838             }
839
840             /// Move ctor is prohibited
841             GuardArray( GuardArray&& ) = delete;
842
843             /// Move assignment is prohibited
844             GuardArray& operator=( GuardArray&& ) = delete;
845
846             /// Copy ctor is prohibited
847             GuardArray( GuardArray const& ) = delete;
848
849             /// Copy assignment is prohibited
850             GuardArray& operator=( GuardArray const& ) = delete;
851
852             /// Frees allocated hazard pointers
853             ~GuardArray()
854             {
855                 dhp::smr::tls()->hazards_.free( guards_ );
856             }
857
858             /// Protects a pointer of type \p atomic<T*>
859             /**
860                 Return the value of \p toGuard
861
862                 The function tries to load \p toGuard and to store it
863                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
864             */
865             template <typename T>
866             T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
867             {
868                 assert( nIndex < capacity() );
869
870                 T pRet;
871                 do {
872                     pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
873                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
874
875                 return pRet;
876             }
877
878             /// Protects a pointer of type \p atomic<T*>
879             /**
880                 Return the value of \p toGuard
881
882                 The function tries to load \p toGuard and to store it
883                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
884
885                 The function is useful for intrusive containers when \p toGuard is a node pointer
886                 that should be converted to a pointer to the value type before guarding.
887                 The parameter \p f of type Func is a functor to make that conversion:
888                 \code
889                     struct functor {
890                         value_type * operator()( T * p );
891                     };
892                 \endcode
893                 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
894             */
895             template <typename T, class Func>
896             T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
897             {
898                 assert( nIndex < capacity() );
899
900                 T pRet;
901                 do {
902                     assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
903                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
904
905                 return pRet;
906             }
907
908             /// Store \p p to the slot \p nIndex
909             /**
910                 The function is just an assignment, no loop is performed.
911             */
912             template <typename T>
913             T * assign( size_t nIndex, T * p )
914             {
915                 assert( nIndex < capacity() );
916
917                 guards_.set( nIndex, p );
918                 dhp::smr::tls()->sync();
919                 return p;
920             }
921
922             /// Store marked pointer \p p to the guard
923             /**
924                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
925                 Can be used for a marked pointer that cannot be changed concurrently
926                 or for already guarded pointer.
927             */
928             template <typename T, int Bitmask>
929             T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
930             {
931                 return assign( nIndex, p.ptr());
932             }
933
934             /// Copy guarded value from \p src guard to slot at index \p nIndex
935             void copy( size_t nIndex, Guard const& src )
936             {
937                 assign( nIndex, src.get_native());
938             }
939
940             /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
941             void copy( size_t nDestIndex, size_t nSrcIndex )
942             {
943                 assign( nDestIndex, get_native( nSrcIndex ));
944             }
945
946             /// Clear value of the slot \p nIndex
947             void clear( size_t nIndex )
948             {
949                 guards_.clear( nIndex );
950             }
951
952             /// Get current value of slot \p nIndex
953             template <typename T>
954             T * get( size_t nIndex ) const
955             {
956                 assert( nIndex < capacity() );
957                 return guards_[nIndex]->template get_as<T>();
958             }
959
960             /// Get native guarded pointer stored
961             guarded_pointer get_native( size_t nIndex ) const
962             {
963                 assert( nIndex < capacity() );
964                 return guards_[nIndex]->get();
965             }
966
967             //@cond
968             dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
969             {
970                 return guards_.release( nIndex );
971             }
972             //@endcond
973
974             /// Capacity of the guard array
975             static CDS_CONSTEXPR size_t capacity()
976             {
977                 return Count;
978             }
979
980         private:
981             //@cond
982             dhp::guard_array<c_nCapacity> guards_;
983             //@endcond
984         };
985
986         /// Guarded pointer
987         /**
988             A guarded pointer is a pair of a pointer and GC's guard.
989             Usually, it is used for returning a pointer to the item from an lock-free container.
990             The guard prevents the pointer to be early disposed (freed) by GC.
991             After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
992
993             Template arguments:
994             - \p GuardedType - a type which the guard stores
995             - \p ValueType - a value type
996             - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
997
998             For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
999             In such case the \p %guarded_ptr is:
1000             @code
1001             typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1002             @endcode
1003
1004             For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1005             For example:
1006             @code
1007             struct foo {
1008                 int const   key;
1009                 std::string value;
1010             };
1011
1012             struct value_accessor {
1013                 std::string* operator()( foo* pFoo ) const
1014                 {
1015                     return &(pFoo->value);
1016                 }
1017             };
1018
1019             // Guarded ptr
1020             typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1021             @endcode
1022
1023             You don't need use this class directly.
1024             All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1025         */
1026         template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1027         class guarded_ptr
1028         {
1029             //@cond
1030             struct trivial_cast {
1031                 ValueType * operator()( GuardedType * p ) const
1032                 {
1033                     return p;
1034                 }
1035             };
1036
1037             template <typename GT, typename VT, typename C> friend class guarded_ptr;
1038             //@endcond
1039
1040         public:
1041             typedef GuardedType guarded_type; ///< Guarded type
1042             typedef ValueType   value_type;   ///< Value type
1043
1044             /// Functor for casting \p guarded_type to \p value_type
1045             typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1046
1047         public:
1048             /// Creates empty guarded pointer
1049             guarded_ptr() CDS_NOEXCEPT
1050                 : guard_( nullptr )
1051             {}
1052
1053             //@cond
1054             explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1055                 : guard_( g )
1056             {}
1057
1058             /// Initializes guarded pointer with \p p
1059             explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1060                 : guard_( nullptr )
1061             {
1062                 reset( p );
1063             }
1064             explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1065                 : guard_( nullptr )
1066             {}
1067             //@endcond
1068
1069             /// Move ctor
1070             guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1071                 : guard_( gp.guard_ )
1072             {
1073                 gp.guard_ = nullptr;
1074             }
1075
1076             /// Move ctor
1077             template <typename GT, typename VT, typename C>
1078             guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1079                 : guard_( gp.guard_ )
1080             {
1081                 gp.guard_ = nullptr;
1082             }
1083
1084             /// Ctor from \p Guard
1085             explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1086                 : guard_( g.release())
1087             {}
1088
1089             /// The guarded pointer is not copy-constructible
1090             guarded_ptr( guarded_ptr const& gp ) = delete;
1091
1092             /// Clears the guarded pointer
1093             /**
1094                 \ref release is called if guarded pointer is not \ref empty
1095             */
1096             ~guarded_ptr() CDS_NOEXCEPT
1097             {
1098                 release();
1099             }
1100
1101             /// Move-assignment operator
1102             guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1103             {
1104                 std::swap( guard_, gp.guard_ );
1105                 return *this;
1106             }
1107
1108             /// Move-assignment from \p Guard
1109             guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1110             {
1111                 std::swap( guard_, g.guard_ref());
1112                 return *this;
1113             }
1114
1115             /// The guarded pointer is not copy-assignable
1116             guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1117
1118             /// Returns a pointer to guarded value
1119             value_type * operator ->() const CDS_NOEXCEPT
1120             {
1121                 assert( !empty());
1122                 return value_cast()( guard_->get_as<guarded_type>() );
1123             }
1124
1125             /// Returns a reference to guarded value
1126             value_type& operator *() CDS_NOEXCEPT
1127             {
1128                 assert( !empty());
1129                 return *value_cast()( guard_->get_as<guarded_type>() );
1130             }
1131
1132             /// Returns const reference to guarded value
1133             value_type const& operator *() const CDS_NOEXCEPT
1134             {
1135                 assert( !empty());
1136                 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1137             }
1138
1139             /// Checks if the guarded pointer is \p nullptr
1140             bool empty() const CDS_NOEXCEPT
1141             {
1142                 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1143             }
1144
1145             /// \p bool operator returns <tt>!empty()</tt>
1146             explicit operator bool() const CDS_NOEXCEPT
1147             {
1148                 return !empty();
1149             }
1150
1151             /// Clears guarded pointer
1152             /**
1153                 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1154                 Dereferncing the guarded pointer after \p release() is dangerous.
1155             */
1156             void release() CDS_NOEXCEPT
1157             {
1158                 free_guard();
1159             }
1160
1161             //@cond
1162             // For internal use only!!!
1163             void reset(guarded_type * p) CDS_NOEXCEPT
1164             {
1165                 alloc_guard();
1166                 assert( guard_ );
1167                 guard_->set( p );
1168             }
1169
1170             //@endcond
1171
1172         private:
1173             //@cond
1174             void alloc_guard()
1175             {
1176                 if ( !guard_ )
1177                     guard_ = dhp::smr::tls()->hazards_.alloc();
1178             }
1179
1180             void free_guard()
1181             {
1182                 if ( guard_ ) {
1183                     dhp::smr::tls()->hazards_.free( guard_ );
1184                     guard_ = nullptr;
1185                 }
1186             }
1187             //@endcond
1188
1189         private:
1190             //@cond
1191             dhp::guard* guard_;
1192             //@endcond
1193         };
1194
1195     public:
1196         /// Initializes %DHP memory manager singleton
1197         /**
1198             Constructor creates and initializes %DHP global object.
1199             %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1200             it is created in the beginning of \p main() function.
1201             After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1202
1203             \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1204                 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1205                 By perforce the local thread's guard pool is grown automatically from common pool.
1206                 When the thread terminated its guard pool is backed to common GC's pool.
1207         */
1208         DHP(
1209             size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
1210         )
1211         {
1212             dhp::smr::construct( nInitialHazardPtrCount );
1213         }
1214
1215         /// Destroys %DHP memory manager
1216         /**
1217             The destructor destroys %DHP global object. After calling of this function you may \b NOT
1218             use CDS data structures based on \p %cds::gc::DHP.
1219             Usually, %DHP object is destroyed at the end of your \p main().
1220         */
1221         ~DHP()
1222         {
1223             dhp::GarbageCollector::destruct( true );
1224         }
1225
1226         /// Checks if count of hazard pointer is no less than \p nCountNeeded
1227         /**
1228             The function always returns \p true since the guard count is unlimited for
1229             \p %gc::DHP garbage collector.
1230         */
1231         static CDS_CONSTEXPR bool check_available_guards(
1232 #ifdef CDS_DOXYGEN_INVOKED
1233             size_t nCountNeeded,
1234 #else
1235             size_t
1236 #endif
1237         )
1238         {
1239             return true;
1240         }
1241
1242         /// Set memory management functions
1243         /**
1244             @note This function may be called <b>BEFORE</b> creating an instance
1245             of Dynamic Hazard Pointer SMR
1246
1247             SMR object allocates some memory for thread-specific data and for
1248             creating SMR object.
1249             By default, a standard \p new and \p delete operators are used for this.
1250         */
1251         static void set_memory_allocator(
1252             void* ( *alloc_func )( size_t size ),   ///< \p malloc() function
1253             void( *free_func )( void * p )          ///< \p free() function
1254         )
1255         {
1256             dhp::smr::set_memory_allocator( alloc_func, free_func );
1257         }
1258
1259         /// Retire pointer \p p with function \p pFunc
1260         /**
1261             The function places pointer \p p to array of pointers ready for removing.
1262             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1263             \p func is a disposer: when \p p can be safely removed, \p func is called.
1264         */
1265         template <typename T>
1266         static void retire( T * p, void (* func)(T *))
1267         {
1268             dhp::thread_data* rec = dhp::smr::tls();
1269             if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
1270                 dhp::smr::instance().scan( rec );
1271         }
1272
1273         /// Retire pointer \p p with functor of type \p Disposer
1274         /**
1275             The function places pointer \p p to array of pointers ready for removing.
1276             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1277
1278             Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1279             \code
1280             template <typename T>
1281             struct disposer {
1282                 void operator()( T * p )    ;   // disposing operator
1283             };
1284             \endcode
1285             Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1286             - it should be stateless functor
1287             - it should be default-constructible
1288             - the result of functor call with argument \p p should not depend on where the functor will be called.
1289
1290             \par Examples:
1291             Operator \p delete functor:
1292             \code
1293             template <typename T>
1294             struct disposer {
1295                 void operator ()( T * p ) {
1296                     delete p;
1297                 }
1298             };
1299
1300             // How to call HP::retire method
1301             int * p = new int;
1302
1303             // ... use p in lock-free manner
1304
1305             cds::gc::DHP::retire<disposer>( p ) ;   // place p to retired pointer array of DHP SMR
1306             \endcode
1307
1308             Functor based on \p std::allocator :
1309             \code
1310             template <typename Alloc = std::allocator<int> >
1311             struct disposer {
1312                 template <typename T>
1313                 void operator()( T * p ) {
1314                     typedef typename Alloc::templare rebind<T>::other alloc_t;
1315                     alloc_t a;
1316                     a.destroy( p );
1317                     a.deallocate( p, 1 );
1318                 }
1319             };
1320             \endcode
1321         */
1322         template <class Disposer, typename T>
1323         static void retire( T * p )
1324         {
1325             if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1326                 scan();
1327         }
1328
1329         /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1330         static bool isUsed()
1331         {
1332             return dhp::smr::isUsed();
1333         }
1334
1335         /// Forced GC cycle call for current thread
1336         /**
1337             Usually, this function should not be called directly.
1338         */
1339         static void scan()
1340         {
1341             dhp::smr::instance().scan( dhp::smr::tls() );
1342         }
1343
1344         /// Synonym for \p scan()
1345         static void force_dispose()
1346         {
1347             scan();
1348         }
1349     };
1350
1351 }} // namespace cds::gc
1352
1353 #endif // #ifndef CDSLIB_GC_DHP_SMR_H
1354
1355