7d0b5d7d7a31da59fa077f80082d768bd6c79193
[libcds.git] / cds / gc / dhp_smr.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
33
34 #include <exception>
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/intrusive/free_list_selector.h>
37 #include <cds/details/throw_exception.h>
38 #include <cds/details/static_functor.h>
39 #include <cds/details/marked_ptr.h>
40 #include <cds/user_setup/cache_line.h>
41
42 namespace cds { namespace gc {
43     namespace dhp {
44         using namespace cds::gc::hp::common;
45
46         /// Exception "Dynamic Hazard Pointer SMR is not initialized"
47         class not_initialized: public std::runtime_error
48         {
49         public:
50             not_initialized()
51                 : std::runtime_error( "Global DHP SMR object is not initialized" )
52             {}
53         };
54
55         struct guard_block: public cds::intrusive::FreeListImpl::node
56         {
57             guard_block*    next_;  // next block in the thread list
58
59             guard_block()
60                 : next_( nullptr )
61             {}
62
63             guard* first()
64             {
65                 return reinterpret_cast<guard*>( this + 1 );
66             }
67         };
68
69         /// \p guard_block allocator (global object)
70         class hp_allocator
71         {
72             friend class smr;
73         public:
74             static hp_allocator& instance();
75
76             CDS_EXPORT_API guard_block* alloc();
77             void free( guard_block* block )
78             {
79                 free_list_.put( block );
80             }
81
82         private:
83             hp_allocator()
84             {}
85             CDS_EXPORT_API ~hp_allocator();
86
87         private:
88             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
89         };
90
91         /// Per-thread hazard pointer storage
92         class thread_hp_storage 
93         {
94             friend class smr;
95         public:
96             thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
97                 : free_head_( arr )
98                 , extended_list_( nullptr )
99                 , array_( arr )
100                 , initial_capacity_( nSize )
101             {
102                 // Initialize guards
103                 new( arr ) guard[nSize];
104             }
105
106             thread_hp_storage() = delete;
107             thread_hp_storage( thread_hp_storage const& ) = delete;
108             thread_hp_storage( thread_hp_storage&& ) = delete;
109
110             ~thread_hp_storage()
111             {
112                 clear();
113             }
114
115             guard* alloc()
116             {
117                 if ( cds_unlikely( free_head_ == nullptr )) {
118                     extend();
119                     assert( free_head_ != nullptr );
120                 }
121
122                 guard* g = free_head_;
123                 free_head_ = g->next_;
124                 return g;
125             }
126
127             void free( guard* g ) CDS_NOEXCEPT
128             {
129                 if ( g ) {
130                     g->clear();
131                     g->next_ = free_head_;
132                     free_head_ = g;
133                 }
134             }
135
136             template< size_t Capacity>
137             size_t alloc( guard_array<Capacity>& arr )
138             {
139                 for ( size_t i = 0; i < Capacity; ++i ) {
140                     if ( cds_unlikely( free_head_ == nullptr ))
141                         extend();
142                     arr.reset( i, free_head_ );
143                     free_head_ = free_head_->next_;
144                 }
145                 return Capacity;
146             }
147
148             template <size_t Capacity>
149             void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
150             {
151                 guard* gList = free_head_;
152                 for ( size_t i = 0; i < Capacity; ++i ) {
153                     guard* g = arr[i];
154                     if ( g ) {
155                         g->clear();
156                         g->next_ = gList;
157                         gList = g;
158                     }
159                 }
160                 free_head_ = gList;
161             }
162
163             void clear()
164             {
165                 // clear array_
166                 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
167                     cur->clear();
168
169                 // free all extended blocks
170                 hp_allocator& alloc = hp_allocator::instance();
171                 for ( guard_block* p = extended_list_; p; ) {
172                     guard_block* next = p->next_;
173                     alloc.free( p );
174                     p = next;
175                 }
176
177                 extended_list_ = nullptr;
178             }
179
180             void init()
181             {
182                 assert( extended_list_ == nullptr );
183
184                 guard* p = array_;
185                 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
186                     p->next_ = p + 1;
187                 p->next_ = nullptr;
188                 free_head_ = array_;
189             }
190
191         private:
192             void extend()
193             {
194                 assert( free_head_ == nullptr );
195
196                 guard_block* block = hp_allocator::instance().alloc();
197                 block->next_ = extended_list_;
198                 extended_list_ = block;
199                 free_head_ = block->first();
200             }
201
202         private:
203             guard*          free_head_;        ///< Head of free guard list
204             guard_block*    extended_list_;    ///< Head of extended guard blocks allocated for the thread
205             guard* const    array_;            ///< initial HP array
206             size_t const    initial_capacity_; ///< Capacity of \p array_
207         };
208
209         struct retired_block: public cds::intrusive::FreeListImpl::node
210         {
211             retired_block*  next_;  ///< Next block in thread-private retired array
212
213             static size_t const c_capacity = 256;
214
215             retired_block()
216                 : next_( nullptr )
217             {}
218
219             retired_ptr* first() const
220             {
221                 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
222             }
223
224             retired_ptr* last() const
225             {
226                 return first() + c_capacity;
227             }
228         };
229
230         class retired_allocator
231         {
232             friend class smr;
233         public:
234             static retired_allocator& instance();
235
236             CDS_EXPORT_API retired_block* alloc();
237             void free( retired_block* block )
238             {
239                 block->next_ = nullptr;
240                 free_list_.put( block );
241             }
242
243         private:
244             retired_allocator()
245             {}
246             CDS_EXPORT_API ~retired_allocator();
247
248         private:
249             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
250         };
251
252         /// Per-thread retired array
253         class retired_array
254         {
255             friend class smr;
256         public:
257             retired_array() CDS_NOEXCEPT
258                 : current_block_( nullptr )
259                 , current_cell_( nullptr )
260                 , list_head_( nullptr )
261                 , list_tail_( nullptr )
262                 , block_count_(0)
263             {}
264
265             retired_array( retired_array const& ) = delete;
266             retired_array( retired_array&& ) = delete;
267
268             ~retired_array()
269             {
270                 assert( empty());
271                 fini();
272             }
273
274             bool push( retired_ptr const& p ) CDS_NOEXCEPT
275             {
276                 assert( current_block_ != nullptr );
277                 assert( current_block_->first() <= current_cell_ );
278                 assert( current_cell_ < current_block_->last() );
279                 //assert( &p != current_cell_ );
280
281                 *current_cell_ = p;
282                 if ( ++current_cell_ == current_block_->last() ) {
283                     // goto next block if exists
284                     if ( current_block_->next_ ) {
285                         current_block_ = current_block_->next_;
286                         current_cell_ = current_block_->first();
287                         return true;
288                     }
289
290                     // no free block
291                     // smr::scan() extend retired_array if needed
292                     return false;
293                 }
294
295                 return true;
296             }
297
298             bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
299             {                
300                 bool ret = push( *p );
301                 assert( ret );
302                 return ret;
303             }
304
305         private: // called by smr
306             void init()
307             {
308                 if ( list_head_ == nullptr ) {
309                     retired_block* block = retired_allocator::instance().alloc();
310                     assert( block->next_ == nullptr );
311
312                     current_block_ =
313                         list_head_ =
314                         list_tail_ = block;
315                     current_cell_ = block->first();
316
317                     block_count_ = 1;
318                 }
319             }
320
321             void fini()
322             {
323                 retired_allocator& alloc = retired_allocator::instance();
324                 for ( retired_block* p = list_head_; p; ) {
325                     retired_block* next = p->next_;
326                     alloc.free( p );
327                     p = next;
328                 }
329
330                 current_block_ =
331                     list_head_ =
332                     list_tail_ = nullptr;
333                 current_cell_ = nullptr;
334
335                 block_count_ = 0;
336             }
337
338             void extend()
339             {
340                 assert( list_head_ != nullptr );
341                 assert( current_block_ == list_tail_ );
342                 assert( current_cell_ == current_block_->last() );
343
344                 retired_block* block = retired_allocator::instance().alloc();
345                 assert( block->next_ == nullptr );
346
347                 list_tail_ = list_tail_->next_ = block;
348                 current_cell_ = block->first();
349                 ++block_count_;
350             }
351
352             bool empty() const
353             {
354                 return current_block_ == nullptr
355                     || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
356             }
357
358         private:
359             retired_block*          current_block_;
360             retired_ptr*            current_cell_;  // in current_block_
361
362             retired_block*          list_head_;
363             retired_block*          list_tail_;
364             size_t                  block_count_;
365         };
366
367         /// Per-thread data
368         struct thread_data {
369             thread_hp_storage   hazards_;   ///< Hazard pointers private to the thread
370             retired_array       retired_;   ///< Retired data private to the thread
371
372             char pad1_[cds::c_nCacheLineSize];
373             atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
374             char pad2_[cds::c_nCacheLineSize];
375
376             // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
377             // cppcheck-suppress uninitMemberVar
378             thread_data( guard* guards, size_t guard_count )
379                 : hazards_( guards, guard_count )
380                 , sync_( 0 )
381             {}
382
383             thread_data() = delete;
384             thread_data( thread_data const& ) = delete;
385             thread_data( thread_data&& ) = delete;
386
387             void sync()
388             {
389                 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
390             }
391         };
392
393         // Hazard Pointer SMR (Safe Memory Reclamation)
394         class smr
395         {
396             struct thread_record;
397
398         public:
399             /// Returns the instance of Hazard Pointer \ref smr
400             static smr& instance()
401             {
402 #       ifdef CDS_DISABLE_SMR_EXCEPTION
403                 assert( instance_ != nullptr );
404 #       else
405                 if ( !instance_ )
406                     CDS_THROW_EXCEPTION( not_initialized() );
407 #       endif
408                 return *instance_;
409             }
410
411             /// Creates Dynamic Hazard Pointer SMR singleton
412             /**
413                 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
414                 Otherwise it does nothing.
415
416                 The Michael's HP reclamation schema depends of three parameters:
417                 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
418                 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
419                 the function uses maximum of HP count for CDS library
420                 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
421                 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
422                 <tt> nHazardPtrCount * nMaxThreadCount </tt>
423                 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
424             */
425             static CDS_EXPORT_API void construct(
426                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
427             );
428
429             //@cond
430             // for back-copatibility
431             static void Construct(
432                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
433             )
434             {
435                 construct( nInitialHazardPtrCount );
436             }
437             //@endcond
438
439             /// Destroys global instance of \ref smr
440             /**
441                 The parameter \p bDetachAll should be used carefully: if its value is \p true,
442                 then the object destroyed automatically detaches all attached threads. This feature
443                 can be useful when you have no control over the thread termination, for example,
444                 when \p libcds is injected into existing external thread.
445             */
446             static CDS_EXPORT_API void destruct(
447                 bool bDetachAll = false     ///< Detach all threads
448             );
449
450             //@cond
451             // for back-copatibility
452             static void Destruct(
453                 bool bDetachAll = false     ///< Detach all threads
454             )
455             {
456                 destruct( bDetachAll );
457             }
458             //@endcond
459
460             /// Checks if global SMR object is constructed and may be used
461             static bool isUsed() CDS_NOEXCEPT
462             {
463                 return instance_ != nullptr;
464             }
465
466             /// Set memory management functions
467             /**
468                 @note This function may be called <b>BEFORE</b> creating an instance
469                 of Dynamic Hazard Pointer SMR
470
471                 SMR object allocates some memory for thread-specific data and for
472                 creating SMR object.
473                 By default, a standard \p new and \p delete operators are used for this.
474             */
475             static CDS_EXPORT_API void set_memory_allocator(
476                 void* ( *alloc_func )( size_t size ),
477                 void( *free_func )( void * p )
478             );
479
480             /// Returns thread-local data for the current thread
481             static CDS_EXPORT_API thread_data* tls();
482
483             static CDS_EXPORT_API void attach_thread();
484             static CDS_EXPORT_API void detach_thread();
485
486         public: // for internal use only
487             /// The main garbage collecting function
488             CDS_EXPORT_API void scan( thread_data* pRec );
489
490             /// Helper scan routine
491             /**
492                 The function guarantees that every node that is eligible for reuse is eventually freed, barring
493                 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
494                 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
495                 to thread's list of reclaimed pointers.
496
497                 The function is called internally by \p scan().
498             */
499             CDS_EXPORT_API void help_scan( thread_data* pThis );
500
501             hp_allocator& get_hp_allocator()
502             {
503                 return hp_allocator_;
504             }
505
506             retired_allocator& get_retired_allocator()
507             {
508                 return retired_allocator_;
509             }
510
511         private:
512             CDS_EXPORT_API explicit smr(
513                 size_t nInitialHazardPtrCount
514             );
515
516             CDS_EXPORT_API ~smr();
517
518             CDS_EXPORT_API void detach_all_thread();
519
520         private:
521             //@cond
522             CDS_EXPORT_API thread_record* create_thread_data();
523             static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
524
525             /// Allocates Hazard Pointer SMR thread private data
526             CDS_EXPORT_API thread_record* alloc_thread_data();
527
528             /// Free HP SMR thread-private data
529             CDS_EXPORT_API void free_thread_data( thread_record* pRec );
530             //@endcond
531
532         private:
533             static CDS_EXPORT_API smr* instance_;
534
535             atomics::atomic< thread_record*>    thread_list_;   ///< Head of thread list
536             size_t const        initial_hazard_count_;  ///< initial number of hazard pointers per thread
537             hp_allocator        hp_allocator_;
538             retired_allocator   retired_allocator_;
539
540             // temporaries
541             std::atomic<size_t> last_plist_size_;   ///< HP array size in last scan() call
542         };
543
544         // for backward compatibility
545         typedef smr GarbageCollector;
546
547
548         // inlines
549         inline hp_allocator& hp_allocator::instance()
550         {
551             return smr::instance().get_hp_allocator();
552         }
553
554         inline retired_allocator& retired_allocator::instance()
555         {
556             return smr::instance().get_retired_allocator();
557         }
558
559     } // namespace dhp
560
561
562         /// Dynamic Hazard Pointer garbage collector
563     /**  @ingroup cds_garbage_collector
564         @headerfile cds/gc/dhp.h
565
566         Implementation of Dynamic Hazard Pointer garbage collector.
567
568         Sources:
569             - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
570             - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
571             - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
572
573         Dynamic Hazard Pointers SMR (safe memory reclamation) provides an unbounded number of hazard pointer per thread
574         despite of classic Hazard Pointer SMR in which the count of the hazard pointef per thread is limited.
575
576         See \ref cds_how_to_use "How to use" section for details how to apply garbage collector.
577     */
578     class DHP
579     {
580     public:
581         /// Native guarded pointer type
582         typedef void* guarded_pointer;
583
584         /// Atomic reference
585         template <typename T> using atomic_ref = atomics::atomic<T *>;
586
587         /// Atomic type
588         /**
589             @headerfile cds/gc/dhp.h
590         */
591         template <typename T> using atomic_type = atomics::atomic<T>;
592
593         /// Atomic marked pointer
594         template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
595
596
597         /// Dynamic Hazard Pointer guard
598         /**
599             A guard is a hazard pointer.
600             Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
601
602             \p %Guard object is movable but not copyable.
603
604             The guard object can be in two states:
605             - unlinked - the guard is not linked with any internal hazard pointer.
606               In this state no operation except \p link() and move assignment is supported.
607             - linked (default) - the guard allocates an internal hazard pointer and fully operable.
608
609             Due to performance reason the implementation does not check state of the guard in runtime.
610
611             @warning Move assignment can transfer the guard in unlinked state, use with care.
612         */
613         class Guard
614         {
615         public:
616             /// Default ctor allocates a guard (hazard pointer) from thread-private storage
617             Guard() CDS_NOEXCEPT
618                 : guard_( dhp::smr::tls()->hazards_.alloc() )
619             {}
620
621             /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
622             explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
623                 : guard_( nullptr )
624             {}
625
626             /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
627             Guard( Guard&& src ) CDS_NOEXCEPT
628                 : guard_( src.guard_ )
629             {
630                 src.guard_ = nullptr;
631             }
632
633             /// Move assignment: the internal guards are swapped between \p src and \p this
634             /**
635                 @warning \p src will become in unlinked state if \p this was unlinked on entry.
636             */
637             Guard& operator=( Guard&& src ) CDS_NOEXCEPT
638             {
639                 std::swap( guard_, src.guard_ );
640                 return *this;
641             }
642
643             /// Copy ctor is prohibited - the guard is not copyable
644             Guard( Guard const& ) = delete;
645
646             /// Copy assignment is prohibited
647             Guard& operator=( Guard const& ) = delete;
648
649             /// Frees the internal hazard pointer if the guard is in linked state
650             ~Guard()
651             {
652                 unlink();
653             }
654
655             /// Checks if the guard object linked with any internal hazard pointer
656             bool is_linked() const
657             {
658                 return guard_ != nullptr;
659             }
660
661             /// Links the guard with internal hazard pointer if the guard is in unlinked state
662             void link()
663             {
664                 if ( !guard_ )
665                     guard_ = dhp::smr::tls()->hazards_.alloc();
666             }
667
668             /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
669             void unlink()
670             {
671                 if ( guard_ ) {
672                     dhp::smr::tls()->hazards_.free( guard_ );
673                     guard_ = nullptr;
674                 }
675             }
676
677             /// Protects a pointer of type <tt> atomic<T*> </tt>
678             /**
679                 Return the value of \p toGuard
680
681                 The function tries to load \p toGuard and to store it
682                 to the HP slot repeatedly until the guard's value equals \p toGuard
683             */
684             template <typename T>
685             T protect( atomics::atomic<T> const& toGuard )
686             {
687                 assert( guard_ != nullptr );
688
689                 T pCur = toGuard.load(atomics::memory_order_acquire);
690                 T pRet;
691                 do {
692                     pRet = assign( pCur );
693                     pCur = toGuard.load(atomics::memory_order_acquire);
694                 } while ( pRet != pCur );
695                 return pCur;
696             }
697
698             /// Protects a converted pointer of type <tt> atomic<T*> </tt>
699             /**
700                 Return the value of \p toGuard
701
702                 The function tries to load \p toGuard and to store result of \p f functor
703                 to the HP slot repeatedly until the guard's value equals \p toGuard.
704
705                 The function is useful for intrusive containers when \p toGuard is a node pointer
706                 that should be converted to a pointer to the value type before guarding.
707                 The parameter \p f of type Func is a functor that makes this conversion:
708                 \code
709                     struct functor {
710                         value_type * operator()( T * p );
711                     };
712                 \endcode
713                 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
714             */
715             template <typename T, class Func>
716             T protect( atomics::atomic<T> const& toGuard, Func f )
717             {
718                 assert( guard_ != nullptr );
719
720                 T pCur = toGuard.load(atomics::memory_order_acquire);
721                 T pRet;
722                 do {
723                     pRet = pCur;
724                     assign( f( pCur ));
725                     pCur = toGuard.load(atomics::memory_order_acquire);
726                 } while ( pRet != pCur );
727                 return pCur;
728             }
729
730             /// Store \p p to the guard
731             /**
732                 The function is just an assignment, no loop is performed.
733                 Can be used for a pointer that cannot be changed concurrently
734                 or for already guarded pointer.
735             */
736             template <typename T>
737             T* assign( T* p )
738             {
739                 assert( guard_ != nullptr );
740
741                 guard_->set( p );
742                 dhp::smr::tls()->sync();
743                 return p;
744             }
745
746             //@cond
747             std::nullptr_t assign( std::nullptr_t )
748             {
749                 assert( guard_ != nullptr );
750
751                 clear();
752                 return nullptr;
753             }
754             //@endcond
755
756             /// Store marked pointer \p p to the guard
757             /**
758                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
759                 Can be used for a marked pointer that cannot be changed concurrently
760                 or for already guarded pointer.
761             */
762             template <typename T, int BITMASK>
763             T* assign( cds::details::marked_ptr<T, BITMASK> p )
764             {
765                 return assign( p.ptr());
766             }
767
768             /// Copy from \p src guard to \p this guard
769             void copy( Guard const& src )
770             {
771                 assign( src.get_native());
772             }
773
774             /// Clears value of the guard
775             void clear()
776             {
777                 assert( guard_ != nullptr );
778
779                 guard_->clear();
780             }
781
782             /// Gets the value currently protected (relaxed read)
783             template <typename T>
784             T * get() const
785             {
786                 assert( guard_ != nullptr );
787                 return guard_->get_as<T>();
788             }
789
790             /// Gets native guarded pointer stored
791             void* get_native() const
792             {
793                 assert( guard_ != nullptr );
794                 return guard_->get();
795             }
796
797             //@cond
798             dhp::guard* release()
799             {
800                 dhp::guard* g = guard_;
801                 guard_ = nullptr;
802                 return g;
803             }
804
805             dhp::guard*& guard_ref()
806             {
807                 return guard_;
808             }
809             //@endcond
810
811         private:
812             //@cond
813             dhp::guard* guard_;
814             //@endcond
815         };
816
817         /// Array of Dynamic Hazard Pointer guards
818         /**
819             The class is intended for allocating an array of hazard pointer guards.
820             Template parameter \p Count defines the size of the array.
821
822             A \p %GuardArray object is not copy- and move-constructible
823             and not copy- and move-assignable.
824         */
825         template <size_t Count>
826         class GuardArray
827         {
828         public:
829             /// Rebind array for other size \p OtherCount
830             template <size_t OtherCount>
831             struct rebind {
832                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
833             };
834
835             /// Array capacity
836             static CDS_CONSTEXPR const size_t c_nCapacity = Count;
837
838         public:
839             /// Default ctor allocates \p Count hazard pointers
840             GuardArray()
841             {
842                 dhp::smr::tls()->hazards_.alloc( guards_ );
843             }
844
845             /// Move ctor is prohibited
846             GuardArray( GuardArray&& ) = delete;
847
848             /// Move assignment is prohibited
849             GuardArray& operator=( GuardArray&& ) = delete;
850
851             /// Copy ctor is prohibited
852             GuardArray( GuardArray const& ) = delete;
853
854             /// Copy assignment is prohibited
855             GuardArray& operator=( GuardArray const& ) = delete;
856
857             /// Frees allocated hazard pointers
858             ~GuardArray()
859             {
860                 dhp::smr::tls()->hazards_.free( guards_ );
861             }
862
863             /// Protects a pointer of type \p atomic<T*>
864             /**
865                 Return the value of \p toGuard
866
867                 The function tries to load \p toGuard and to store it
868                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
869             */
870             template <typename T>
871             T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
872             {
873                 assert( nIndex < capacity() );
874
875                 T pRet;
876                 do {
877                     pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
878                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
879
880                 return pRet;
881             }
882
883             /// Protects a pointer of type \p atomic<T*>
884             /**
885                 Return the value of \p toGuard
886
887                 The function tries to load \p toGuard and to store it
888                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
889
890                 The function is useful for intrusive containers when \p toGuard is a node pointer
891                 that should be converted to a pointer to the value type before guarding.
892                 The parameter \p f of type Func is a functor to make that conversion:
893                 \code
894                     struct functor {
895                         value_type * operator()( T * p );
896                     };
897                 \endcode
898                 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
899             */
900             template <typename T, class Func>
901             T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
902             {
903                 assert( nIndex < capacity() );
904
905                 T pRet;
906                 do {
907                     assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
908                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
909
910                 return pRet;
911             }
912
913             /// Store \p p to the slot \p nIndex
914             /**
915                 The function is just an assignment, no loop is performed.
916             */
917             template <typename T>
918             T * assign( size_t nIndex, T * p )
919             {
920                 assert( nIndex < capacity() );
921
922                 guards_.set( nIndex, p );
923                 dhp::smr::tls()->sync();
924                 return p;
925             }
926
927             /// Store marked pointer \p p to the guard
928             /**
929                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
930                 Can be used for a marked pointer that cannot be changed concurrently
931                 or for already guarded pointer.
932             */
933             template <typename T, int Bitmask>
934             T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
935             {
936                 return assign( nIndex, p.ptr());
937             }
938
939             /// Copy guarded value from \p src guard to slot at index \p nIndex
940             void copy( size_t nIndex, Guard const& src )
941             {
942                 assign( nIndex, src.get_native());
943             }
944
945             /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
946             void copy( size_t nDestIndex, size_t nSrcIndex )
947             {
948                 assign( nDestIndex, get_native( nSrcIndex ));
949             }
950
951             /// Clear value of the slot \p nIndex
952             void clear( size_t nIndex )
953             {
954                 guards_.clear( nIndex );
955             }
956
957             /// Get current value of slot \p nIndex
958             template <typename T>
959             T * get( size_t nIndex ) const
960             {
961                 assert( nIndex < capacity() );
962                 return guards_[nIndex]->template get_as<T>();
963             }
964
965             /// Get native guarded pointer stored
966             guarded_pointer get_native( size_t nIndex ) const
967             {
968                 assert( nIndex < capacity() );
969                 return guards_[nIndex]->get();
970             }
971
972             //@cond
973             dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
974             {
975                 return guards_.release( nIndex );
976             }
977             //@endcond
978
979             /// Capacity of the guard array
980             static CDS_CONSTEXPR size_t capacity()
981             {
982                 return Count;
983             }
984
985         private:
986             //@cond
987             dhp::guard_array<c_nCapacity> guards_;
988             //@endcond
989         };
990
991         /// Guarded pointer
992         /**
993             A guarded pointer is a pair of a pointer and GC's guard.
994             Usually, it is used for returning a pointer to the item from an lock-free container.
995             The guard prevents the pointer to be early disposed (freed) by GC.
996             After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
997
998             Template arguments:
999             - \p GuardedType - a type which the guard stores
1000             - \p ValueType - a value type
1001             - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1002
1003             For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1004             In such case the \p %guarded_ptr is:
1005             @code
1006             typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1007             @endcode
1008
1009             For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1010             For example:
1011             @code
1012             struct foo {
1013                 int const   key;
1014                 std::string value;
1015             };
1016
1017             struct value_accessor {
1018                 std::string* operator()( foo* pFoo ) const
1019                 {
1020                     return &(pFoo->value);
1021                 }
1022             };
1023
1024             // Guarded ptr
1025             typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1026             @endcode
1027
1028             You don't need use this class directly.
1029             All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1030         */
1031         template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1032         class guarded_ptr
1033         {
1034             //@cond
1035             struct trivial_cast {
1036                 ValueType * operator()( GuardedType * p ) const
1037                 {
1038                     return p;
1039                 }
1040             };
1041
1042             template <typename GT, typename VT, typename C> friend class guarded_ptr;
1043             //@endcond
1044
1045         public:
1046             typedef GuardedType guarded_type; ///< Guarded type
1047             typedef ValueType   value_type;   ///< Value type
1048
1049             /// Functor for casting \p guarded_type to \p value_type
1050             typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1051
1052         public:
1053             /// Creates empty guarded pointer
1054             guarded_ptr() CDS_NOEXCEPT
1055                 : guard_( nullptr )
1056             {}
1057
1058             //@cond
1059             explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1060                 : guard_( g )
1061             {}
1062
1063             /// Initializes guarded pointer with \p p
1064             explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1065                 : guard_( nullptr )
1066             {
1067                 reset( p );
1068             }
1069             explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1070                 : guard_( nullptr )
1071             {}
1072             //@endcond
1073
1074             /// Move ctor
1075             guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1076                 : guard_( gp.guard_ )
1077             {
1078                 gp.guard_ = nullptr;
1079             }
1080
1081             /// Move ctor
1082             template <typename GT, typename VT, typename C>
1083             guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1084                 : guard_( gp.guard_ )
1085             {
1086                 gp.guard_ = nullptr;
1087             }
1088
1089             /// Ctor from \p Guard
1090             explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1091                 : guard_( g.release())
1092             {}
1093
1094             /// The guarded pointer is not copy-constructible
1095             guarded_ptr( guarded_ptr const& gp ) = delete;
1096
1097             /// Clears the guarded pointer
1098             /**
1099                 \ref release is called if guarded pointer is not \ref empty
1100             */
1101             ~guarded_ptr() CDS_NOEXCEPT
1102             {
1103                 release();
1104             }
1105
1106             /// Move-assignment operator
1107             guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1108             {
1109                 std::swap( guard_, gp.guard_ );
1110                 return *this;
1111             }
1112
1113             /// Move-assignment from \p Guard
1114             guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1115             {
1116                 std::swap( guard_, g.guard_ref());
1117                 return *this;
1118             }
1119
1120             /// The guarded pointer is not copy-assignable
1121             guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1122
1123             /// Returns a pointer to guarded value
1124             value_type * operator ->() const CDS_NOEXCEPT
1125             {
1126                 assert( !empty());
1127                 return value_cast()( guard_->get_as<guarded_type>() );
1128             }
1129
1130             /// Returns a reference to guarded value
1131             value_type& operator *() CDS_NOEXCEPT
1132             {
1133                 assert( !empty());
1134                 return *value_cast()( guard_->get_as<guarded_type>() );
1135             }
1136
1137             /// Returns const reference to guarded value
1138             value_type const& operator *() const CDS_NOEXCEPT
1139             {
1140                 assert( !empty());
1141                 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1142             }
1143
1144             /// Checks if the guarded pointer is \p nullptr
1145             bool empty() const CDS_NOEXCEPT
1146             {
1147                 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1148             }
1149
1150             /// \p bool operator returns <tt>!empty()</tt>
1151             explicit operator bool() const CDS_NOEXCEPT
1152             {
1153                 return !empty();
1154             }
1155
1156             /// Clears guarded pointer
1157             /**
1158                 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1159                 Dereferncing the guarded pointer after \p release() is dangerous.
1160             */
1161             void release() CDS_NOEXCEPT
1162             {
1163                 free_guard();
1164             }
1165
1166             //@cond
1167             // For internal use only!!!
1168             void reset(guarded_type * p) CDS_NOEXCEPT
1169             {
1170                 alloc_guard();
1171                 assert( guard_ );
1172                 guard_->set( p );
1173             }
1174
1175             //@endcond
1176
1177         private:
1178             //@cond
1179             void alloc_guard()
1180             {
1181                 if ( !guard_ )
1182                     guard_ = dhp::smr::tls()->hazards_.alloc();
1183             }
1184
1185             void free_guard()
1186             {
1187                 if ( guard_ ) {
1188                     dhp::smr::tls()->hazards_.free( guard_ );
1189                     guard_ = nullptr;
1190                 }
1191             }
1192             //@endcond
1193
1194         private:
1195             //@cond
1196             dhp::guard* guard_;
1197             //@endcond
1198         };
1199
1200     public:
1201         /// Initializes %DHP memory manager singleton
1202         /**
1203             Constructor creates and initializes %DHP global object.
1204             %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1205             it is created in the beginning of \p main() function.
1206             After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1207
1208             \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1209                 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1210                 By perforce the local thread's guard pool is grown automatically from common pool.
1211                 When the thread terminated its guard pool is backed to common GC's pool.
1212         */
1213         explicit DHP(
1214             size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
1215         )
1216         {
1217             dhp::smr::construct( nInitialHazardPtrCount );
1218         }
1219
1220         /// Destroys %DHP memory manager
1221         /**
1222             The destructor destroys %DHP global object. After calling of this function you may \b NOT
1223             use CDS data structures based on \p %cds::gc::DHP.
1224             Usually, %DHP object is destroyed at the end of your \p main().
1225         */
1226         ~DHP()
1227         {
1228             dhp::GarbageCollector::destruct( true );
1229         }
1230
1231         /// Checks if count of hazard pointer is no less than \p nCountNeeded
1232         /**
1233             The function always returns \p true since the guard count is unlimited for
1234             \p %gc::DHP garbage collector.
1235         */
1236         static CDS_CONSTEXPR bool check_available_guards(
1237 #ifdef CDS_DOXYGEN_INVOKED
1238             size_t nCountNeeded,
1239 #else
1240             size_t
1241 #endif
1242         )
1243         {
1244             return true;
1245         }
1246
1247         /// Set memory management functions
1248         /**
1249             @note This function may be called <b>BEFORE</b> creating an instance
1250             of Dynamic Hazard Pointer SMR
1251
1252             SMR object allocates some memory for thread-specific data and for
1253             creating SMR object.
1254             By default, a standard \p new and \p delete operators are used for this.
1255         */
1256         static void set_memory_allocator(
1257             void* ( *alloc_func )( size_t size ),   ///< \p malloc() function
1258             void( *free_func )( void * p )          ///< \p free() function
1259         )
1260         {
1261             dhp::smr::set_memory_allocator( alloc_func, free_func );
1262         }
1263
1264         /// Retire pointer \p p with function \p pFunc
1265         /**
1266             The function places pointer \p p to array of pointers ready for removing.
1267             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1268             \p func is a disposer: when \p p can be safely removed, \p func is called.
1269         */
1270         template <typename T>
1271         static void retire( T * p, void (* func)(T *))
1272         {
1273             dhp::thread_data* rec = dhp::smr::tls();
1274             if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
1275                 dhp::smr::instance().scan( rec );
1276         }
1277
1278         /// Retire pointer \p p with functor of type \p Disposer
1279         /**
1280             The function places pointer \p p to array of pointers ready for removing.
1281             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1282
1283             Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1284             \code
1285             template <typename T>
1286             struct disposer {
1287                 void operator()( T * p )    ;   // disposing operator
1288             };
1289             \endcode
1290             Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1291             - it should be stateless functor
1292             - it should be default-constructible
1293             - the result of functor call with argument \p p should not depend on where the functor will be called.
1294
1295             \par Examples:
1296             Operator \p delete functor:
1297             \code
1298             template <typename T>
1299             struct disposer {
1300                 void operator ()( T * p ) {
1301                     delete p;
1302                 }
1303             };
1304
1305             // How to call HP::retire method
1306             int * p = new int;
1307
1308             // ... use p in lock-free manner
1309
1310             cds::gc::DHP::retire<disposer>( p ) ;   // place p to retired pointer array of DHP SMR
1311             \endcode
1312
1313             Functor based on \p std::allocator :
1314             \code
1315             template <typename Alloc = std::allocator<int> >
1316             struct disposer {
1317                 template <typename T>
1318                 void operator()( T * p ) {
1319                     typedef typename Alloc::templare rebind<T>::other alloc_t;
1320                     alloc_t a;
1321                     a.destroy( p );
1322                     a.deallocate( p, 1 );
1323                 }
1324             };
1325             \endcode
1326         */
1327         template <class Disposer, typename T>
1328         static void retire( T * p )
1329         {
1330             if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1331                 scan();
1332         }
1333
1334         /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1335         static bool isUsed()
1336         {
1337             return dhp::smr::isUsed();
1338         }
1339
1340         /// Forced GC cycle call for current thread
1341         /**
1342             Usually, this function should not be called directly.
1343         */
1344         static void scan()
1345         {
1346             dhp::smr::instance().scan( dhp::smr::tls() );
1347         }
1348
1349         /// Synonym for \p scan()
1350         static void force_dispose()
1351         {
1352             scan();
1353         }
1354     };
1355
1356 }} // namespace cds::gc
1357
1358 #endif // #ifndef CDSLIB_GC_DHP_SMR_H
1359
1360