Renamed HP/DHP header files
[libcds.git] / cds / gc / dhp.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
33
34 #include <exception>
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/intrusive/free_list_selector.h>
37 #include <cds/details/throw_exception.h>
38 #include <cds/details/static_functor.h>
39 #include <cds/details/marked_ptr.h>
40 #include <cds/user_setup/cache_line.h>
41
42 namespace cds { namespace gc {
43
44     /// Dynamic (adaptive) Hazard Pointer implementation details
45     namespace dhp {
46         using namespace cds::gc::hp::common;
47
48         /// Exception "Dynamic Hazard Pointer SMR is not initialized"
49         class not_initialized: public std::runtime_error
50         {
51         public:
52             //@cond
53             not_initialized()
54                 : std::runtime_error( "Global DHP SMR object is not initialized" )
55             {}
56             //@endcond
57         };
58
59         //@cond
60         struct guard_block: public cds::intrusive::FreeListImpl::node
61         {
62             guard_block*    next_;  // next block in the thread list
63
64             guard_block()
65                 : next_( nullptr )
66             {}
67
68             guard* first()
69             {
70                 return reinterpret_cast<guard*>( this + 1 );
71             }
72         };
73         //@endcond
74
75         //@cond
76         /// \p guard_block allocator (global object)
77         class hp_allocator
78         {
79             friend class smr;
80         public:
81             static hp_allocator& instance();
82
83             CDS_EXPORT_API guard_block* alloc();
84             void free( guard_block* block )
85             {
86                 free_list_.put( block );
87             }
88
89         private:
90             hp_allocator()
91             {}
92             CDS_EXPORT_API ~hp_allocator();
93
94         private:
95             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
96         };
97         //@endcond
98
99         //@cond
100         /// Per-thread hazard pointer storage
101         class thread_hp_storage 
102         {
103             friend class smr;
104         public:
105             thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
106                 : free_head_( arr )
107                 , extended_list_( nullptr )
108                 , array_( arr )
109                 , initial_capacity_( nSize )
110             {
111                 // Initialize guards
112                 new( arr ) guard[nSize];
113             }
114
115             thread_hp_storage() = delete;
116             thread_hp_storage( thread_hp_storage const& ) = delete;
117             thread_hp_storage( thread_hp_storage&& ) = delete;
118
119             ~thread_hp_storage()
120             {
121                 clear();
122             }
123
124             guard* alloc()
125             {
126                 if ( cds_unlikely( free_head_ == nullptr )) {
127                     extend();
128                     assert( free_head_ != nullptr );
129                 }
130
131                 guard* g = free_head_;
132                 free_head_ = g->next_;
133                 return g;
134             }
135
136             void free( guard* g ) CDS_NOEXCEPT
137             {
138                 if ( g ) {
139                     g->clear();
140                     g->next_ = free_head_;
141                     free_head_ = g;
142                 }
143             }
144
145             template< size_t Capacity>
146             size_t alloc( guard_array<Capacity>& arr )
147             {
148                 for ( size_t i = 0; i < Capacity; ++i ) {
149                     if ( cds_unlikely( free_head_ == nullptr ))
150                         extend();
151                     arr.reset( i, free_head_ );
152                     free_head_ = free_head_->next_;
153                 }
154                 return Capacity;
155             }
156
157             template <size_t Capacity>
158             void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
159             {
160                 guard* gList = free_head_;
161                 for ( size_t i = 0; i < Capacity; ++i ) {
162                     guard* g = arr[i];
163                     if ( g ) {
164                         g->clear();
165                         g->next_ = gList;
166                         gList = g;
167                     }
168                 }
169                 free_head_ = gList;
170             }
171
172             void clear()
173             {
174                 // clear array_
175                 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
176                     cur->clear();
177
178                 // free all extended blocks
179                 hp_allocator& alloc = hp_allocator::instance();
180                 for ( guard_block* p = extended_list_; p; ) {
181                     guard_block* next = p->next_;
182                     alloc.free( p );
183                     p = next;
184                 }
185
186                 extended_list_ = nullptr;
187             }
188
189             void init()
190             {
191                 assert( extended_list_ == nullptr );
192
193                 guard* p = array_;
194                 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
195                     p->next_ = p + 1;
196                 p->next_ = nullptr;
197                 free_head_ = array_;
198             }
199
200         private:
201             void extend()
202             {
203                 assert( free_head_ == nullptr );
204
205                 guard_block* block = hp_allocator::instance().alloc();
206                 block->next_ = extended_list_;
207                 extended_list_ = block;
208                 free_head_ = block->first();
209             }
210
211         private:
212             guard*          free_head_;        ///< Head of free guard list
213             guard_block*    extended_list_;    ///< Head of extended guard blocks allocated for the thread
214             guard* const    array_;            ///< initial HP array
215             size_t const    initial_capacity_; ///< Capacity of \p array_
216         };
217         //@endcond
218
219         //@cond
220         struct retired_block: public cds::intrusive::FreeListImpl::node
221         {
222             retired_block*  next_;  ///< Next block in thread-private retired array
223
224             static size_t const c_capacity = 256;
225
226             retired_block()
227                 : next_( nullptr )
228             {}
229
230             retired_ptr* first() const
231             {
232                 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
233             }
234
235             retired_ptr* last() const
236             {
237                 return first() + c_capacity;
238             }
239         };
240         //@endcond
241
242         //@cond
243         class retired_allocator
244         {
245             friend class smr;
246         public:
247             static retired_allocator& instance();
248
249             CDS_EXPORT_API retired_block* alloc();
250             void free( retired_block* block )
251             {
252                 block->next_ = nullptr;
253                 free_list_.put( block );
254             }
255
256         private:
257             retired_allocator()
258             {}
259             CDS_EXPORT_API ~retired_allocator();
260
261         private:
262             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
263         };
264         //@endcond
265
266         //@cond
267         /// Per-thread retired array
268         class retired_array
269         {
270             friend class smr;
271         public:
272             retired_array() CDS_NOEXCEPT
273                 : current_block_( nullptr )
274                 , current_cell_( nullptr )
275                 , list_head_( nullptr )
276                 , list_tail_( nullptr )
277                 , block_count_(0)
278             {}
279
280             retired_array( retired_array const& ) = delete;
281             retired_array( retired_array&& ) = delete;
282
283             ~retired_array()
284             {
285                 assert( empty());
286                 fini();
287             }
288
289             bool push( retired_ptr const& p ) CDS_NOEXCEPT
290             {
291                 assert( current_block_ != nullptr );
292                 assert( current_block_->first() <= current_cell_ );
293                 assert( current_cell_ < current_block_->last() );
294                 //assert( &p != current_cell_ );
295
296                 *current_cell_ = p;
297                 if ( ++current_cell_ == current_block_->last() ) {
298                     // goto next block if exists
299                     if ( current_block_->next_ ) {
300                         current_block_ = current_block_->next_;
301                         current_cell_ = current_block_->first();
302                         return true;
303                     }
304
305                     // no free block
306                     // smr::scan() extend retired_array if needed
307                     return false;
308                 }
309
310                 return true;
311             }
312
313             bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
314             {                
315                 bool ret = push( *p );
316                 assert( ret );
317                 return ret;
318             }
319
320         private: // called by smr
321             void init()
322             {
323                 if ( list_head_ == nullptr ) {
324                     retired_block* block = retired_allocator::instance().alloc();
325                     assert( block->next_ == nullptr );
326
327                     current_block_ =
328                         list_head_ =
329                         list_tail_ = block;
330                     current_cell_ = block->first();
331
332                     block_count_ = 1;
333                 }
334             }
335
336             void fini()
337             {
338                 retired_allocator& alloc = retired_allocator::instance();
339                 for ( retired_block* p = list_head_; p; ) {
340                     retired_block* next = p->next_;
341                     alloc.free( p );
342                     p = next;
343                 }
344
345                 current_block_ =
346                     list_head_ =
347                     list_tail_ = nullptr;
348                 current_cell_ = nullptr;
349
350                 block_count_ = 0;
351             }
352
353             void extend()
354             {
355                 assert( list_head_ != nullptr );
356                 assert( current_block_ == list_tail_ );
357                 assert( current_cell_ == current_block_->last() );
358
359                 retired_block* block = retired_allocator::instance().alloc();
360                 assert( block->next_ == nullptr );
361
362                 list_tail_ = list_tail_->next_ = block;
363                 current_cell_ = block->first();
364                 ++block_count_;
365             }
366
367             bool empty() const
368             {
369                 return current_block_ == nullptr
370                     || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
371             }
372
373         private:
374             retired_block*          current_block_;
375             retired_ptr*            current_cell_;  // in current_block_
376
377             retired_block*          list_head_;
378             retired_block*          list_tail_;
379             size_t                  block_count_;
380         };
381         //@endcond
382
383         //@cond
384         /// Per-thread data
385         struct thread_data {
386             thread_hp_storage   hazards_;   ///< Hazard pointers private to the thread
387             retired_array       retired_;   ///< Retired data private to the thread
388
389             char pad1_[cds::c_nCacheLineSize];
390             atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
391             char pad2_[cds::c_nCacheLineSize];
392
393             // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
394             // cppcheck-suppress uninitMemberVar
395             thread_data( guard* guards, size_t guard_count )
396                 : hazards_( guards, guard_count )
397                 , sync_( 0 )
398             {}
399
400             thread_data() = delete;
401             thread_data( thread_data const& ) = delete;
402             thread_data( thread_data&& ) = delete;
403
404             void sync()
405             {
406                 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
407             }
408         };
409         //@endcond
410
411         //@cond
412         // Dynmic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation)
413         class smr
414         {
415             struct thread_record;
416
417         public:
418             /// Returns the instance of Hazard Pointer \ref smr
419             static smr& instance()
420             {
421 #       ifdef CDS_DISABLE_SMR_EXCEPTION
422                 assert( instance_ != nullptr );
423 #       else
424                 if ( !instance_ )
425                     CDS_THROW_EXCEPTION( not_initialized() );
426 #       endif
427                 return *instance_;
428             }
429
430             /// Creates Dynamic Hazard Pointer SMR singleton
431             /**
432                 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
433                 Otherwise it does nothing.
434
435                 The Michael's HP reclamation schema depends of three parameters:
436                 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
437                 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
438                 the function uses maximum of HP count for CDS library
439                 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
440                 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
441                 <tt> nHazardPtrCount * nMaxThreadCount </tt>
442                 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
443             */
444             static CDS_EXPORT_API void construct(
445                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
446             );
447
448             // for back-copatibility
449             static void Construct(
450                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
451             )
452             {
453                 construct( nInitialHazardPtrCount );
454             }
455
456             /// Destroys global instance of \ref smr
457             /**
458                 The parameter \p bDetachAll should be used carefully: if its value is \p true,
459                 then the object destroyed automatically detaches all attached threads. This feature
460                 can be useful when you have no control over the thread termination, for example,
461                 when \p libcds is injected into existing external thread.
462             */
463             static CDS_EXPORT_API void destruct(
464                 bool bDetachAll = false     ///< Detach all threads
465             );
466
467             // for back-compatibility
468             static void Destruct(
469                 bool bDetachAll = false     ///< Detach all threads
470             )
471             {
472                 destruct( bDetachAll );
473             }
474
475             /// Checks if global SMR object is constructed and may be used
476             static bool isUsed() CDS_NOEXCEPT
477             {
478                 return instance_ != nullptr;
479             }
480
481             /// Set memory management functions
482             /**
483                 @note This function may be called <b>BEFORE</b> creating an instance
484                 of Dynamic Hazard Pointer SMR
485
486                 SMR object allocates some memory for thread-specific data and for
487                 creating SMR object.
488                 By default, a standard \p new and \p delete operators are used for this.
489             */
490             static CDS_EXPORT_API void set_memory_allocator(
491                 void* ( *alloc_func )( size_t size ),
492                 void( *free_func )( void * p )
493             );
494
495             /// Returns thread-local data for the current thread
496             static CDS_EXPORT_API thread_data* tls();
497
498             static CDS_EXPORT_API void attach_thread();
499             static CDS_EXPORT_API void detach_thread();
500
501         public: // for internal use only
502             /// The main garbage collecting function
503             CDS_EXPORT_API void scan( thread_data* pRec );
504
505             /// Helper scan routine
506             /**
507                 The function guarantees that every node that is eligible for reuse is eventually freed, barring
508                 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
509                 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
510                 to thread's list of reclaimed pointers.
511
512                 The function is called internally by \p scan().
513             */
514             CDS_EXPORT_API void help_scan( thread_data* pThis );
515
516             hp_allocator& get_hp_allocator()
517             {
518                 return hp_allocator_;
519             }
520
521             retired_allocator& get_retired_allocator()
522             {
523                 return retired_allocator_;
524             }
525
526         private:
527             CDS_EXPORT_API explicit smr(
528                 size_t nInitialHazardPtrCount
529             );
530
531             CDS_EXPORT_API ~smr();
532
533             CDS_EXPORT_API void detach_all_thread();
534
535         private:
536             CDS_EXPORT_API thread_record* create_thread_data();
537             static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
538
539             /// Allocates Hazard Pointer SMR thread private data
540             CDS_EXPORT_API thread_record* alloc_thread_data();
541
542             /// Free HP SMR thread-private data
543             CDS_EXPORT_API void free_thread_data( thread_record* pRec );
544
545         private:
546             static CDS_EXPORT_API smr* instance_;
547
548             atomics::atomic< thread_record*>    thread_list_;   ///< Head of thread list
549             size_t const        initial_hazard_count_;  ///< initial number of hazard pointers per thread
550             hp_allocator        hp_allocator_;
551             retired_allocator   retired_allocator_;
552
553             // temporaries
554             std::atomic<size_t> last_plist_size_;   ///< HP array size in last scan() call
555         };
556         //@endcond
557
558         //@cond
559         // for backward compatibility
560         typedef smr GarbageCollector;
561
562
563         // inlines
564         inline hp_allocator& hp_allocator::instance()
565         {
566             return smr::instance().get_hp_allocator();
567         }
568
569         inline retired_allocator& retired_allocator::instance()
570         {
571             return smr::instance().get_retired_allocator();
572         }
573         //@endcond
574
575     } // namespace dhp
576
577
578     /// Dynamic (adaptie) Hazard Pointer SMR
579     /**  @ingroup cds_garbage_collector
580
581         Implementation of Dynamic (adaptive) Hazard Pointer SMR
582
583         Sources:
584             - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
585             - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
586             - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
587
588         %DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation"
589
590         See \ref cds_how_to_use "How to use" section for details how to apply SMR.
591     */
592     class DHP
593     {
594     public:
595         /// Native guarded pointer type
596         typedef void* guarded_pointer;
597
598         /// Atomic reference
599         template <typename T> using atomic_ref = atomics::atomic<T *>;
600
601         /// Atomic type
602         /**
603             @headerfile cds/gc/dhp.h
604         */
605         template <typename T> using atomic_type = atomics::atomic<T>;
606
607         /// Atomic marked pointer
608         template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
609
610
611         /// Dynamic Hazard Pointer guard
612         /**
613             A guard is a hazard pointer.
614             Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
615
616             \p %Guard object is movable but not copyable.
617
618             The guard object can be in two states:
619             - unlinked - the guard is not linked with any internal hazard pointer.
620               In this state no operation except \p link() and move assignment is supported.
621             - linked (default) - the guard allocates an internal hazard pointer and fully operable.
622
623             Due to performance reason the implementation does not check state of the guard in runtime.
624
625             @warning Move assignment can transfer the guard in unlinked state, use with care.
626         */
627         class Guard
628         {
629         public:
630             /// Default ctor allocates a guard (hazard pointer) from thread-private storage
631             Guard() CDS_NOEXCEPT
632                 : guard_( dhp::smr::tls()->hazards_.alloc() )
633             {}
634
635             /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
636             explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
637                 : guard_( nullptr )
638             {}
639
640             /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
641             Guard( Guard&& src ) CDS_NOEXCEPT
642                 : guard_( src.guard_ )
643             {
644                 src.guard_ = nullptr;
645             }
646
647             /// Move assignment: the internal guards are swapped between \p src and \p this
648             /**
649                 @warning \p src will become in unlinked state if \p this was unlinked on entry.
650             */
651             Guard& operator=( Guard&& src ) CDS_NOEXCEPT
652             {
653                 std::swap( guard_, src.guard_ );
654                 return *this;
655             }
656
657             /// Copy ctor is prohibited - the guard is not copyable
658             Guard( Guard const& ) = delete;
659
660             /// Copy assignment is prohibited
661             Guard& operator=( Guard const& ) = delete;
662
663             /// Frees the internal hazard pointer if the guard is in linked state
664             ~Guard()
665             {
666                 unlink();
667             }
668
669             /// Checks if the guard object linked with any internal hazard pointer
670             bool is_linked() const
671             {
672                 return guard_ != nullptr;
673             }
674
675             /// Links the guard with internal hazard pointer if the guard is in unlinked state
676             void link()
677             {
678                 if ( !guard_ )
679                     guard_ = dhp::smr::tls()->hazards_.alloc();
680             }
681
682             /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
683             void unlink()
684             {
685                 if ( guard_ ) {
686                     dhp::smr::tls()->hazards_.free( guard_ );
687                     guard_ = nullptr;
688                 }
689             }
690
691             /// Protects a pointer of type <tt> atomic<T*> </tt>
692             /**
693                 Return the value of \p toGuard
694
695                 The function tries to load \p toGuard and to store it
696                 to the HP slot repeatedly until the guard's value equals \p toGuard
697             */
698             template <typename T>
699             T protect( atomics::atomic<T> const& toGuard )
700             {
701                 assert( guard_ != nullptr );
702
703                 T pCur = toGuard.load(atomics::memory_order_acquire);
704                 T pRet;
705                 do {
706                     pRet = assign( pCur );
707                     pCur = toGuard.load(atomics::memory_order_acquire);
708                 } while ( pRet != pCur );
709                 return pCur;
710             }
711
712             /// Protects a converted pointer of type <tt> atomic<T*> </tt>
713             /**
714                 Return the value of \p toGuard
715
716                 The function tries to load \p toGuard and to store result of \p f functor
717                 to the HP slot repeatedly until the guard's value equals \p toGuard.
718
719                 The function is useful for intrusive containers when \p toGuard is a node pointer
720                 that should be converted to a pointer to the value type before guarding.
721                 The parameter \p f of type Func is a functor that makes this conversion:
722                 \code
723                     struct functor {
724                         value_type * operator()( T * p );
725                     };
726                 \endcode
727                 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
728             */
729             template <typename T, class Func>
730             T protect( atomics::atomic<T> const& toGuard, Func f )
731             {
732                 assert( guard_ != nullptr );
733
734                 T pCur = toGuard.load(atomics::memory_order_acquire);
735                 T pRet;
736                 do {
737                     pRet = pCur;
738                     assign( f( pCur ));
739                     pCur = toGuard.load(atomics::memory_order_acquire);
740                 } while ( pRet != pCur );
741                 return pCur;
742             }
743
744             /// Store \p p to the guard
745             /**
746                 The function is just an assignment, no loop is performed.
747                 Can be used for a pointer that cannot be changed concurrently
748                 or for already guarded pointer.
749             */
750             template <typename T>
751             T* assign( T* p )
752             {
753                 assert( guard_ != nullptr );
754
755                 guard_->set( p );
756                 dhp::smr::tls()->sync();
757                 return p;
758             }
759
760             //@cond
761             std::nullptr_t assign( std::nullptr_t )
762             {
763                 assert( guard_ != nullptr );
764
765                 clear();
766                 return nullptr;
767             }
768             //@endcond
769
770             /// Store marked pointer \p p to the guard
771             /**
772                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
773                 Can be used for a marked pointer that cannot be changed concurrently
774                 or for already guarded pointer.
775             */
776             template <typename T, int BITMASK>
777             T* assign( cds::details::marked_ptr<T, BITMASK> p )
778             {
779                 return assign( p.ptr());
780             }
781
782             /// Copy from \p src guard to \p this guard
783             void copy( Guard const& src )
784             {
785                 assign( src.get_native());
786             }
787
788             /// Clears value of the guard
789             void clear()
790             {
791                 assert( guard_ != nullptr );
792
793                 guard_->clear();
794             }
795
796             /// Gets the value currently protected (relaxed read)
797             template <typename T>
798             T * get() const
799             {
800                 assert( guard_ != nullptr );
801                 return guard_->get_as<T>();
802             }
803
804             /// Gets native guarded pointer stored
805             void* get_native() const
806             {
807                 assert( guard_ != nullptr );
808                 return guard_->get();
809             }
810
811             //@cond
812             dhp::guard* release()
813             {
814                 dhp::guard* g = guard_;
815                 guard_ = nullptr;
816                 return g;
817             }
818
819             dhp::guard*& guard_ref()
820             {
821                 return guard_;
822             }
823             //@endcond
824
825         private:
826             //@cond
827             dhp::guard* guard_;
828             //@endcond
829         };
830
831         /// Array of Dynamic Hazard Pointer guards
832         /**
833             The class is intended for allocating an array of hazard pointer guards.
834             Template parameter \p Count defines the size of the array.
835
836             A \p %GuardArray object is not copy- and move-constructible
837             and not copy- and move-assignable.
838         */
839         template <size_t Count>
840         class GuardArray
841         {
842         public:
843             /// Rebind array for other size \p OtherCount
844             template <size_t OtherCount>
845             struct rebind {
846                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
847             };
848
849             /// Array capacity
850             static CDS_CONSTEXPR const size_t c_nCapacity = Count;
851
852         public:
853             /// Default ctor allocates \p Count hazard pointers
854             GuardArray()
855             {
856                 dhp::smr::tls()->hazards_.alloc( guards_ );
857             }
858
859             /// Move ctor is prohibited
860             GuardArray( GuardArray&& ) = delete;
861
862             /// Move assignment is prohibited
863             GuardArray& operator=( GuardArray&& ) = delete;
864
865             /// Copy ctor is prohibited
866             GuardArray( GuardArray const& ) = delete;
867
868             /// Copy assignment is prohibited
869             GuardArray& operator=( GuardArray const& ) = delete;
870
871             /// Frees allocated hazard pointers
872             ~GuardArray()
873             {
874                 dhp::smr::tls()->hazards_.free( guards_ );
875             }
876
877             /// Protects a pointer of type \p atomic<T*>
878             /**
879                 Return the value of \p toGuard
880
881                 The function tries to load \p toGuard and to store it
882                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
883             */
884             template <typename T>
885             T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
886             {
887                 assert( nIndex < capacity() );
888
889                 T pRet;
890                 do {
891                     pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
892                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
893
894                 return pRet;
895             }
896
897             /// Protects a pointer of type \p atomic<T*>
898             /**
899                 Return the value of \p toGuard
900
901                 The function tries to load \p toGuard and to store it
902                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
903
904                 The function is useful for intrusive containers when \p toGuard is a node pointer
905                 that should be converted to a pointer to the value type before guarding.
906                 The parameter \p f of type Func is a functor to make that conversion:
907                 \code
908                     struct functor {
909                         value_type * operator()( T * p );
910                     };
911                 \endcode
912                 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
913             */
914             template <typename T, class Func>
915             T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
916             {
917                 assert( nIndex < capacity() );
918
919                 T pRet;
920                 do {
921                     assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
922                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
923
924                 return pRet;
925             }
926
927             /// Store \p p to the slot \p nIndex
928             /**
929                 The function is just an assignment, no loop is performed.
930             */
931             template <typename T>
932             T * assign( size_t nIndex, T * p )
933             {
934                 assert( nIndex < capacity() );
935
936                 guards_.set( nIndex, p );
937                 dhp::smr::tls()->sync();
938                 return p;
939             }
940
941             /// Store marked pointer \p p to the guard
942             /**
943                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
944                 Can be used for a marked pointer that cannot be changed concurrently
945                 or for already guarded pointer.
946             */
947             template <typename T, int Bitmask>
948             T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
949             {
950                 return assign( nIndex, p.ptr());
951             }
952
953             /// Copy guarded value from \p src guard to slot at index \p nIndex
954             void copy( size_t nIndex, Guard const& src )
955             {
956                 assign( nIndex, src.get_native());
957             }
958
959             /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
960             void copy( size_t nDestIndex, size_t nSrcIndex )
961             {
962                 assign( nDestIndex, get_native( nSrcIndex ));
963             }
964
965             /// Clear value of the slot \p nIndex
966             void clear( size_t nIndex )
967             {
968                 guards_.clear( nIndex );
969             }
970
971             /// Get current value of slot \p nIndex
972             template <typename T>
973             T * get( size_t nIndex ) const
974             {
975                 assert( nIndex < capacity() );
976                 return guards_[nIndex]->template get_as<T>();
977             }
978
979             /// Get native guarded pointer stored
980             guarded_pointer get_native( size_t nIndex ) const
981             {
982                 assert( nIndex < capacity() );
983                 return guards_[nIndex]->get();
984             }
985
986             //@cond
987             dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
988             {
989                 return guards_.release( nIndex );
990             }
991             //@endcond
992
993             /// Capacity of the guard array
994             static CDS_CONSTEXPR size_t capacity()
995             {
996                 return Count;
997             }
998
999         private:
1000             //@cond
1001             dhp::guard_array<c_nCapacity> guards_;
1002             //@endcond
1003         };
1004
1005         /// Guarded pointer
1006         /**
1007             A guarded pointer is a pair of a pointer and GC's guard.
1008             Usually, it is used for returning a pointer to the item from an lock-free container.
1009             The guard prevents the pointer to be early disposed (freed) by GC.
1010             After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1011
1012             Template arguments:
1013             - \p GuardedType - a type which the guard stores
1014             - \p ValueType - a value type
1015             - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1016
1017             For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1018             In such case the \p %guarded_ptr is:
1019             @code
1020             typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1021             @endcode
1022
1023             For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1024             For example:
1025             @code
1026             struct foo {
1027                 int const   key;
1028                 std::string value;
1029             };
1030
1031             struct value_accessor {
1032                 std::string* operator()( foo* pFoo ) const
1033                 {
1034                     return &(pFoo->value);
1035                 }
1036             };
1037
1038             // Guarded ptr
1039             typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1040             @endcode
1041
1042             You don't need use this class directly.
1043             All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1044         */
1045         template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1046         class guarded_ptr
1047         {
1048             //@cond
1049             struct trivial_cast {
1050                 ValueType * operator()( GuardedType * p ) const
1051                 {
1052                     return p;
1053                 }
1054             };
1055
1056             template <typename GT, typename VT, typename C> friend class guarded_ptr;
1057             //@endcond
1058
1059         public:
1060             typedef GuardedType guarded_type; ///< Guarded type
1061             typedef ValueType   value_type;   ///< Value type
1062
1063             /// Functor for casting \p guarded_type to \p value_type
1064             typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1065
1066         public:
1067             /// Creates empty guarded pointer
1068             guarded_ptr() CDS_NOEXCEPT
1069                 : guard_( nullptr )
1070             {}
1071
1072             //@cond
1073             explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1074                 : guard_( g )
1075             {}
1076
1077             /// Initializes guarded pointer with \p p
1078             explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1079                 : guard_( nullptr )
1080             {
1081                 reset( p );
1082             }
1083             explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1084                 : guard_( nullptr )
1085             {}
1086             //@endcond
1087
1088             /// Move ctor
1089             guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1090                 : guard_( gp.guard_ )
1091             {
1092                 gp.guard_ = nullptr;
1093             }
1094
1095             /// Move ctor
1096             template <typename GT, typename VT, typename C>
1097             guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1098                 : guard_( gp.guard_ )
1099             {
1100                 gp.guard_ = nullptr;
1101             }
1102
1103             /// Ctor from \p Guard
1104             explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1105                 : guard_( g.release())
1106             {}
1107
1108             /// The guarded pointer is not copy-constructible
1109             guarded_ptr( guarded_ptr const& gp ) = delete;
1110
1111             /// Clears the guarded pointer
1112             /**
1113                 \ref release is called if guarded pointer is not \ref empty
1114             */
1115             ~guarded_ptr() CDS_NOEXCEPT
1116             {
1117                 release();
1118             }
1119
1120             /// Move-assignment operator
1121             guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1122             {
1123                 std::swap( guard_, gp.guard_ );
1124                 return *this;
1125             }
1126
1127             /// Move-assignment from \p Guard
1128             guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1129             {
1130                 std::swap( guard_, g.guard_ref());
1131                 return *this;
1132             }
1133
1134             /// The guarded pointer is not copy-assignable
1135             guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1136
1137             /// Returns a pointer to guarded value
1138             value_type * operator ->() const CDS_NOEXCEPT
1139             {
1140                 assert( !empty());
1141                 return value_cast()( guard_->get_as<guarded_type>() );
1142             }
1143
1144             /// Returns a reference to guarded value
1145             value_type& operator *() CDS_NOEXCEPT
1146             {
1147                 assert( !empty());
1148                 return *value_cast()( guard_->get_as<guarded_type>() );
1149             }
1150
1151             /// Returns const reference to guarded value
1152             value_type const& operator *() const CDS_NOEXCEPT
1153             {
1154                 assert( !empty());
1155                 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1156             }
1157
1158             /// Checks if the guarded pointer is \p nullptr
1159             bool empty() const CDS_NOEXCEPT
1160             {
1161                 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1162             }
1163
1164             /// \p bool operator returns <tt>!empty()</tt>
1165             explicit operator bool() const CDS_NOEXCEPT
1166             {
1167                 return !empty();
1168             }
1169
1170             /// Clears guarded pointer
1171             /**
1172                 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1173                 Dereferncing the guarded pointer after \p release() is dangerous.
1174             */
1175             void release() CDS_NOEXCEPT
1176             {
1177                 free_guard();
1178             }
1179
1180             //@cond
1181             // For internal use only!!!
1182             void reset(guarded_type * p) CDS_NOEXCEPT
1183             {
1184                 alloc_guard();
1185                 assert( guard_ );
1186                 guard_->set( p );
1187             }
1188
1189             //@endcond
1190
1191         private:
1192             //@cond
1193             void alloc_guard()
1194             {
1195                 if ( !guard_ )
1196                     guard_ = dhp::smr::tls()->hazards_.alloc();
1197             }
1198
1199             void free_guard()
1200             {
1201                 if ( guard_ ) {
1202                     dhp::smr::tls()->hazards_.free( guard_ );
1203                     guard_ = nullptr;
1204                 }
1205             }
1206             //@endcond
1207
1208         private:
1209             //@cond
1210             dhp::guard* guard_;
1211             //@endcond
1212         };
1213
1214     public:
1215         /// Initializes %DHP memory manager singleton
1216         /**
1217             Constructor creates and initializes %DHP global object.
1218             %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1219             it is created in the beginning of \p main() function.
1220             After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1221
1222             \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1223                 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1224                 By perforce the local thread's guard pool is grown automatically from common pool.
1225                 When the thread terminated its guard pool is backed to common GC's pool.
1226         */
1227         explicit DHP(
1228             size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
1229         )
1230         {
1231             dhp::smr::construct( nInitialHazardPtrCount );
1232         }
1233
1234         /// Destroys %DHP memory manager
1235         /**
1236             The destructor destroys %DHP global object. After calling of this function you may \b NOT
1237             use CDS data structures based on \p %cds::gc::DHP.
1238             Usually, %DHP object is destroyed at the end of your \p main().
1239         */
1240         ~DHP()
1241         {
1242             dhp::GarbageCollector::destruct( true );
1243         }
1244
1245         /// Checks if count of hazard pointer is no less than \p nCountNeeded
1246         /**
1247             The function always returns \p true since the guard count is unlimited for
1248             \p %gc::DHP garbage collector.
1249         */
1250         static CDS_CONSTEXPR bool check_available_guards(
1251 #ifdef CDS_DOXYGEN_INVOKED
1252             size_t nCountNeeded,
1253 #else
1254             size_t
1255 #endif
1256         )
1257         {
1258             return true;
1259         }
1260
1261         /// Set memory management functions
1262         /**
1263             @note This function may be called <b>BEFORE</b> creating an instance
1264             of Dynamic Hazard Pointer SMR
1265
1266             SMR object allocates some memory for thread-specific data and for creating SMR object.
1267             By default, a standard \p new and \p delete operators are used for this.
1268         */
1269         static void set_memory_allocator(
1270             void* ( *alloc_func )( size_t size ),   ///< \p malloc() function
1271             void( *free_func )( void * p )          ///< \p free() function
1272         )
1273         {
1274             dhp::smr::set_memory_allocator( alloc_func, free_func );
1275         }
1276
1277         /// Retire pointer \p p with function \p pFunc
1278         /**
1279             The function places pointer \p p to array of pointers ready for removing.
1280             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1281             \p func is a disposer: when \p p can be safely removed, \p func is called.
1282         */
1283         template <typename T>
1284         static void retire( T * p, void (* func)(T *))
1285         {
1286             dhp::thread_data* rec = dhp::smr::tls();
1287             if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
1288                 dhp::smr::instance().scan( rec );
1289         }
1290
1291         /// Retire pointer \p p with functor of type \p Disposer
1292         /**
1293             The function places pointer \p p to array of pointers ready for removing.
1294             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1295
1296             Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1297             \code
1298             template <typename T>
1299             struct disposer {
1300                 void operator()( T * p )    ;   // disposing operator
1301             };
1302             \endcode
1303             Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1304             - it should be stateless functor
1305             - it should be default-constructible
1306             - the result of functor call with argument \p p should not depend on where the functor will be called.
1307
1308             \par Examples:
1309             Operator \p delete functor:
1310             \code
1311             template <typename T>
1312             struct disposer {
1313                 void operator ()( T * p ) {
1314                     delete p;
1315                 }
1316             };
1317
1318             // How to call HP::retire method
1319             int * p = new int;
1320
1321             // ... use p in lock-free manner
1322
1323             cds::gc::DHP::retire<disposer>( p ) ;   // place p to retired pointer array of DHP SMR
1324             \endcode
1325
1326             Functor based on \p std::allocator :
1327             \code
1328             template <typename Alloc = std::allocator<int> >
1329             struct disposer {
1330                 template <typename T>
1331                 void operator()( T * p ) {
1332                     typedef typename Alloc::templare rebind<T>::other alloc_t;
1333                     alloc_t a;
1334                     a.destroy( p );
1335                     a.deallocate( p, 1 );
1336                 }
1337             };
1338             \endcode
1339         */
1340         template <class Disposer, typename T>
1341         static void retire( T * p )
1342         {
1343             if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1344                 scan();
1345         }
1346
1347         /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1348         static bool isUsed()
1349         {
1350             return dhp::smr::isUsed();
1351         }
1352
1353         /// Forced GC cycle call for current thread
1354         /**
1355             Usually, this function should not be called directly.
1356         */
1357         static void scan()
1358         {
1359             dhp::smr::instance().scan( dhp::smr::tls() );
1360         }
1361
1362         /// Synonym for \p scan()
1363         static void force_dispose()
1364         {
1365             scan();
1366         }
1367     };
1368
1369 }} // namespace cds::gc
1370
1371 #endif // #ifndef CDSLIB_GC_DHP_SMR_H
1372
1373