Fixed minor compiler warnings
[libcds.git] / cds / gc / dhp.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
33
34 #include <exception>
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/details/lib.h>
37 #include <cds/threading/model.h>
38 #include <cds/intrusive/free_list_selector.h>
39 #include <cds/details/throw_exception.h>
40 #include <cds/details/static_functor.h>
41 #include <cds/details/marked_ptr.h>
42 #include <cds/user_setup/cache_line.h>
43
44 namespace cds { namespace gc {
45
46     /// Dynamic (adaptive) Hazard Pointer implementation details
47     namespace dhp {
48         using namespace cds::gc::hp::common;
49
50         /// Exception "Dynamic Hazard Pointer SMR is not initialized"
51         class not_initialized: public std::runtime_error
52         {
53         public:
54             //@cond
55             not_initialized()
56                 : std::runtime_error( "Global DHP SMR object is not initialized" )
57             {}
58             //@endcond
59         };
60
61         //@cond
62         struct guard_block: public cds::intrusive::FreeListImpl::node
63         {
64             guard_block*    next_;  // next block in the thread list
65
66             guard_block()
67                 : next_( nullptr )
68             {}
69
70             guard* first()
71             {
72                 return reinterpret_cast<guard*>( this + 1 );
73             }
74         };
75         //@endcond
76
77         //@cond
78         /// \p guard_block allocator (global object)
79         class hp_allocator
80         {
81             friend class smr;
82         public:
83             static hp_allocator& instance();
84
85             CDS_EXPORT_API guard_block* alloc();
86             void free( guard_block* block )
87             {
88                 free_list_.put( block );
89             }
90
91         private:
92             hp_allocator()
93             {}
94             CDS_EXPORT_API ~hp_allocator();
95
96         private:
97             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
98         };
99         //@endcond
100
101         //@cond
102         /// Per-thread hazard pointer storage
103         class thread_hp_storage 
104         {
105             friend class smr;
106         public:
107             thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
108                 : free_head_( arr )
109                 , extended_list_( nullptr )
110                 , array_( arr )
111                 , initial_capacity_( nSize )
112             {
113                 // Initialize guards
114                 new( arr ) guard[nSize];
115             }
116
117             thread_hp_storage() = delete;
118             thread_hp_storage( thread_hp_storage const& ) = delete;
119             thread_hp_storage( thread_hp_storage&& ) = delete;
120
121             ~thread_hp_storage()
122             {
123                 clear();
124             }
125
126             guard* alloc()
127             {
128                 if ( cds_unlikely( free_head_ == nullptr )) {
129                     extend();
130                     assert( free_head_ != nullptr );
131                 }
132
133                 guard* g = free_head_;
134                 free_head_ = g->next_;
135                 return g;
136             }
137
138             void free( guard* g ) CDS_NOEXCEPT
139             {
140                 if ( g ) {
141                     g->clear();
142                     g->next_ = free_head_;
143                     free_head_ = g;
144                 }
145             }
146
147             template< size_t Capacity>
148             size_t alloc( guard_array<Capacity>& arr )
149             {
150                 for ( size_t i = 0; i < Capacity; ++i ) {
151                     if ( cds_unlikely( free_head_ == nullptr ))
152                         extend();
153                     arr.reset( i, free_head_ );
154                     free_head_ = free_head_->next_;
155                 }
156                 return Capacity;
157             }
158
159             template <size_t Capacity>
160             void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
161             {
162                 guard* gList = free_head_;
163                 for ( size_t i = 0; i < Capacity; ++i ) {
164                     guard* g = arr[i];
165                     if ( g ) {
166                         g->clear();
167                         g->next_ = gList;
168                         gList = g;
169                     }
170                 }
171                 free_head_ = gList;
172             }
173
174             void clear()
175             {
176                 // clear array_
177                 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
178                     cur->clear();
179
180                 // free all extended blocks
181                 hp_allocator& alloc = hp_allocator::instance();
182                 for ( guard_block* p = extended_list_; p; ) {
183                     guard_block* next = p->next_;
184                     alloc.free( p );
185                     p = next;
186                 }
187
188                 extended_list_ = nullptr;
189             }
190
191             void init()
192             {
193                 assert( extended_list_ == nullptr );
194
195                 guard* p = array_;
196                 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
197                     p->next_ = p + 1;
198                 p->next_ = nullptr;
199                 free_head_ = array_;
200             }
201
202         private:
203             void extend()
204             {
205                 assert( free_head_ == nullptr );
206
207                 guard_block* block = hp_allocator::instance().alloc();
208                 block->next_ = extended_list_;
209                 extended_list_ = block;
210                 free_head_ = block->first();
211             }
212
213         private:
214             guard*          free_head_;        ///< Head of free guard list
215             guard_block*    extended_list_;    ///< Head of extended guard blocks allocated for the thread
216             guard* const    array_;            ///< initial HP array
217             size_t const    initial_capacity_; ///< Capacity of \p array_
218         };
219         //@endcond
220
221         //@cond
222         struct retired_block: public cds::intrusive::FreeListImpl::node
223         {
224             retired_block*  next_;  ///< Next block in thread-private retired array
225
226             static size_t const c_capacity = 256;
227
228             retired_block()
229                 : next_( nullptr )
230             {}
231
232             retired_ptr* first() const
233             {
234                 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
235             }
236
237             retired_ptr* last() const
238             {
239                 return first() + c_capacity;
240             }
241         };
242         //@endcond
243
244         //@cond
245         class retired_allocator
246         {
247             friend class smr;
248         public:
249             static retired_allocator& instance();
250
251             CDS_EXPORT_API retired_block* alloc();
252             void free( retired_block* block )
253             {
254                 block->next_ = nullptr;
255                 free_list_.put( block );
256             }
257
258         private:
259             retired_allocator()
260             {}
261             CDS_EXPORT_API ~retired_allocator();
262
263         private:
264             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
265         };
266         //@endcond
267
268         //@cond
269         /// Per-thread retired array
270         class retired_array
271         {
272             friend class smr;
273         public:
274             retired_array() CDS_NOEXCEPT
275                 : current_block_( nullptr )
276                 , current_cell_( nullptr )
277                 , list_head_( nullptr )
278                 , list_tail_( nullptr )
279                 , block_count_(0)
280             {}
281
282             retired_array( retired_array const& ) = delete;
283             retired_array( retired_array&& ) = delete;
284
285             ~retired_array()
286             {
287                 assert( empty());
288                 fini();
289             }
290
291             bool push( retired_ptr const& p ) CDS_NOEXCEPT
292             {
293                 assert( current_block_ != nullptr );
294                 assert( current_block_->first() <= current_cell_ );
295                 assert( current_cell_ < current_block_->last() );
296                 //assert( &p != current_cell_ );
297
298                 *current_cell_ = p;
299                 if ( ++current_cell_ == current_block_->last() ) {
300                     // goto next block if exists
301                     if ( current_block_->next_ ) {
302                         current_block_ = current_block_->next_;
303                         current_cell_ = current_block_->first();
304                         return true;
305                     }
306
307                     // no free block
308                     // smr::scan() extend retired_array if needed
309                     return false;
310                 }
311
312                 return true;
313             }
314
315             bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
316             {                
317                 bool ret = push( *p );
318                 assert( ret );
319                 return ret;
320             }
321
322         private: // called by smr
323             void init()
324             {
325                 if ( list_head_ == nullptr ) {
326                     retired_block* block = retired_allocator::instance().alloc();
327                     assert( block->next_ == nullptr );
328
329                     current_block_ =
330                         list_head_ =
331                         list_tail_ = block;
332                     current_cell_ = block->first();
333
334                     block_count_ = 1;
335                 }
336             }
337
338             void fini()
339             {
340                 retired_allocator& alloc = retired_allocator::instance();
341                 for ( retired_block* p = list_head_; p; ) {
342                     retired_block* next = p->next_;
343                     alloc.free( p );
344                     p = next;
345                 }
346
347                 current_block_ =
348                     list_head_ =
349                     list_tail_ = nullptr;
350                 current_cell_ = nullptr;
351
352                 block_count_ = 0;
353             }
354
355             void extend()
356             {
357                 assert( list_head_ != nullptr );
358                 assert( current_block_ == list_tail_ );
359                 assert( current_cell_ == current_block_->last() );
360
361                 retired_block* block = retired_allocator::instance().alloc();
362                 assert( block->next_ == nullptr );
363
364                 list_tail_ = list_tail_->next_ = block;
365                 current_cell_ = block->first();
366                 ++block_count_;
367             }
368
369             bool empty() const
370             {
371                 return current_block_ == nullptr
372                     || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
373             }
374
375         private:
376             retired_block*          current_block_;
377             retired_ptr*            current_cell_;  // in current_block_
378
379             retired_block*          list_head_;
380             retired_block*          list_tail_;
381             size_t                  block_count_;
382         };
383         //@endcond
384
385         //@cond
386         /// Per-thread data
387         struct thread_data {
388             thread_hp_storage   hazards_;   ///< Hazard pointers private to the thread
389             retired_array       retired_;   ///< Retired data private to the thread
390
391             char pad1_[cds::c_nCacheLineSize];
392             atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
393             char pad2_[cds::c_nCacheLineSize];
394
395             // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
396             // cppcheck-suppress uninitMemberVar
397             thread_data( guard* guards, size_t guard_count )
398                 : hazards_( guards, guard_count )
399                 , sync_( 0 )
400             {}
401
402             thread_data() = delete;
403             thread_data( thread_data const& ) = delete;
404             thread_data( thread_data&& ) = delete;
405
406             void sync()
407             {
408                 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
409             }
410         };
411         //@endcond
412
413         //@cond
414         // Dynmic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation)
415         class smr
416         {
417             struct thread_record;
418
419         public:
420             /// Returns the instance of Hazard Pointer \ref smr
421             static smr& instance()
422             {
423 #       ifdef CDS_DISABLE_SMR_EXCEPTION
424                 assert( instance_ != nullptr );
425 #       else
426                 if ( !instance_ )
427                     CDS_THROW_EXCEPTION( not_initialized() );
428 #       endif
429                 return *instance_;
430             }
431
432             /// Creates Dynamic Hazard Pointer SMR singleton
433             /**
434                 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
435                 Otherwise it does nothing.
436
437                 The Michael's HP reclamation schema depends of three parameters:
438                 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
439                 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
440                 the function uses maximum of HP count for CDS library
441                 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
442                 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
443                 <tt> nHazardPtrCount * nMaxThreadCount </tt>
444                 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
445             */
446             static CDS_EXPORT_API void construct(
447                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
448             );
449
450             // for back-copatibility
451             static void Construct(
452                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
453             )
454             {
455                 construct( nInitialHazardPtrCount );
456             }
457
458             /// Destroys global instance of \ref smr
459             /**
460                 The parameter \p bDetachAll should be used carefully: if its value is \p true,
461                 then the object destroyed automatically detaches all attached threads. This feature
462                 can be useful when you have no control over the thread termination, for example,
463                 when \p libcds is injected into existing external thread.
464             */
465             static CDS_EXPORT_API void destruct(
466                 bool bDetachAll = false     ///< Detach all threads
467             );
468
469             // for back-compatibility
470             static void Destruct(
471                 bool bDetachAll = false     ///< Detach all threads
472             )
473             {
474                 destruct( bDetachAll );
475             }
476
477             /// Checks if global SMR object is constructed and may be used
478             static bool isUsed() CDS_NOEXCEPT
479             {
480                 return instance_ != nullptr;
481             }
482
483             /// Set memory management functions
484             /**
485                 @note This function may be called <b>BEFORE</b> creating an instance
486                 of Dynamic Hazard Pointer SMR
487
488                 SMR object allocates some memory for thread-specific data and for
489                 creating SMR object.
490                 By default, a standard \p new and \p delete operators are used for this.
491             */
492             static CDS_EXPORT_API void set_memory_allocator(
493                 void* ( *alloc_func )( size_t size ),
494                 void( *free_func )( void * p )
495             );
496
497             /// Returns thread-local data for the current thread
498             static CDS_EXPORT_API thread_data* tls();
499
500             static CDS_EXPORT_API void attach_thread();
501             static CDS_EXPORT_API void detach_thread();
502
503         public: // for internal use only
504             /// The main garbage collecting function
505             CDS_EXPORT_API void scan( thread_data* pRec );
506
507             /// Helper scan routine
508             /**
509                 The function guarantees that every node that is eligible for reuse is eventually freed, barring
510                 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
511                 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
512                 to thread's list of reclaimed pointers.
513
514                 The function is called internally by \p scan().
515             */
516             CDS_EXPORT_API void help_scan( thread_data* pThis );
517
518             hp_allocator& get_hp_allocator()
519             {
520                 return hp_allocator_;
521             }
522
523             retired_allocator& get_retired_allocator()
524             {
525                 return retired_allocator_;
526             }
527
528         private:
529             CDS_EXPORT_API explicit smr(
530                 size_t nInitialHazardPtrCount
531             );
532
533             CDS_EXPORT_API ~smr();
534
535             CDS_EXPORT_API void detach_all_thread();
536
537         private:
538             CDS_EXPORT_API thread_record* create_thread_data();
539             static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
540
541             /// Allocates Hazard Pointer SMR thread private data
542             CDS_EXPORT_API thread_record* alloc_thread_data();
543
544             /// Free HP SMR thread-private data
545             CDS_EXPORT_API void free_thread_data( thread_record* pRec );
546
547         private:
548             static CDS_EXPORT_API smr* instance_;
549
550             atomics::atomic< thread_record*>    thread_list_;   ///< Head of thread list
551             size_t const        initial_hazard_count_;  ///< initial number of hazard pointers per thread
552             hp_allocator        hp_allocator_;
553             retired_allocator   retired_allocator_;
554
555             // temporaries
556             std::atomic<size_t> last_plist_size_;   ///< HP array size in last scan() call
557         };
558         //@endcond
559
560         //@cond
561         // for backward compatibility
562         typedef smr GarbageCollector;
563
564
565         // inlines
566         inline hp_allocator& hp_allocator::instance()
567         {
568             return smr::instance().get_hp_allocator();
569         }
570
571         inline retired_allocator& retired_allocator::instance()
572         {
573             return smr::instance().get_retired_allocator();
574         }
575         //@endcond
576
577     } // namespace dhp
578
579
580     /// Dynamic (adaptie) Hazard Pointer SMR
581     /**  @ingroup cds_garbage_collector
582
583         Implementation of Dynamic (adaptive) Hazard Pointer SMR
584
585         Sources:
586             - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
587             - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
588             - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
589
590         %DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation"
591
592         See \ref cds_how_to_use "How to use" section for details how to apply SMR.
593     */
594     class DHP
595     {
596     public:
597         /// Native guarded pointer type
598         typedef void* guarded_pointer;
599
600         /// Atomic reference
601         template <typename T> using atomic_ref = atomics::atomic<T *>;
602
603         /// Atomic type
604         /**
605             @headerfile cds/gc/dhp.h
606         */
607         template <typename T> using atomic_type = atomics::atomic<T>;
608
609         /// Atomic marked pointer
610         template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
611
612
613         /// Dynamic Hazard Pointer guard
614         /**
615             A guard is a hazard pointer.
616             Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
617
618             \p %Guard object is movable but not copyable.
619
620             The guard object can be in two states:
621             - unlinked - the guard is not linked with any internal hazard pointer.
622               In this state no operation except \p link() and move assignment is supported.
623             - linked (default) - the guard allocates an internal hazard pointer and fully operable.
624
625             Due to performance reason the implementation does not check state of the guard in runtime.
626
627             @warning Move assignment can transfer the guard in unlinked state, use with care.
628         */
629         class Guard
630         {
631         public:
632             /// Default ctor allocates a guard (hazard pointer) from thread-private storage
633             Guard() CDS_NOEXCEPT
634                 : guard_( dhp::smr::tls()->hazards_.alloc() )
635             {}
636
637             /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
638             explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
639                 : guard_( nullptr )
640             {}
641
642             /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
643             Guard( Guard&& src ) CDS_NOEXCEPT
644                 : guard_( src.guard_ )
645             {
646                 src.guard_ = nullptr;
647             }
648
649             /// Move assignment: the internal guards are swapped between \p src and \p this
650             /**
651                 @warning \p src will become in unlinked state if \p this was unlinked on entry.
652             */
653             Guard& operator=( Guard&& src ) CDS_NOEXCEPT
654             {
655                 std::swap( guard_, src.guard_ );
656                 return *this;
657             }
658
659             /// Copy ctor is prohibited - the guard is not copyable
660             Guard( Guard const& ) = delete;
661
662             /// Copy assignment is prohibited
663             Guard& operator=( Guard const& ) = delete;
664
665             /// Frees the internal hazard pointer if the guard is in linked state
666             ~Guard()
667             {
668                 unlink();
669             }
670
671             /// Checks if the guard object linked with any internal hazard pointer
672             bool is_linked() const
673             {
674                 return guard_ != nullptr;
675             }
676
677             /// Links the guard with internal hazard pointer if the guard is in unlinked state
678             void link()
679             {
680                 if ( !guard_ )
681                     guard_ = dhp::smr::tls()->hazards_.alloc();
682             }
683
684             /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
685             void unlink()
686             {
687                 if ( guard_ ) {
688                     dhp::smr::tls()->hazards_.free( guard_ );
689                     guard_ = nullptr;
690                 }
691             }
692
693             /// Protects a pointer of type <tt> atomic<T*> </tt>
694             /**
695                 Return the value of \p toGuard
696
697                 The function tries to load \p toGuard and to store it
698                 to the HP slot repeatedly until the guard's value equals \p toGuard
699             */
700             template <typename T>
701             T protect( atomics::atomic<T> const& toGuard )
702             {
703                 assert( guard_ != nullptr );
704
705                 T pCur = toGuard.load(atomics::memory_order_acquire);
706                 T pRet;
707                 do {
708                     pRet = assign( pCur );
709                     pCur = toGuard.load(atomics::memory_order_acquire);
710                 } while ( pRet != pCur );
711                 return pCur;
712             }
713
714             /// Protects a converted pointer of type <tt> atomic<T*> </tt>
715             /**
716                 Return the value of \p toGuard
717
718                 The function tries to load \p toGuard and to store result of \p f functor
719                 to the HP slot repeatedly until the guard's value equals \p toGuard.
720
721                 The function is useful for intrusive containers when \p toGuard is a node pointer
722                 that should be converted to a pointer to the value type before guarding.
723                 The parameter \p f of type Func is a functor that makes this conversion:
724                 \code
725                     struct functor {
726                         value_type * operator()( T * p );
727                     };
728                 \endcode
729                 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
730             */
731             template <typename T, class Func>
732             T protect( atomics::atomic<T> const& toGuard, Func f )
733             {
734                 assert( guard_ != nullptr );
735
736                 T pCur = toGuard.load(atomics::memory_order_acquire);
737                 T pRet;
738                 do {
739                     pRet = pCur;
740                     assign( f( pCur ));
741                     pCur = toGuard.load(atomics::memory_order_acquire);
742                 } while ( pRet != pCur );
743                 return pCur;
744             }
745
746             /// Store \p p to the guard
747             /**
748                 The function is just an assignment, no loop is performed.
749                 Can be used for a pointer that cannot be changed concurrently
750                 or for already guarded pointer.
751             */
752             template <typename T>
753             T* assign( T* p )
754             {
755                 assert( guard_ != nullptr );
756
757                 guard_->set( p );
758                 dhp::smr::tls()->sync();
759                 return p;
760             }
761
762             //@cond
763             std::nullptr_t assign( std::nullptr_t )
764             {
765                 assert( guard_ != nullptr );
766
767                 clear();
768                 return nullptr;
769             }
770             //@endcond
771
772             /// Store marked pointer \p p to the guard
773             /**
774                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
775                 Can be used for a marked pointer that cannot be changed concurrently
776                 or for already guarded pointer.
777             */
778             template <typename T, int BITMASK>
779             T* assign( cds::details::marked_ptr<T, BITMASK> p )
780             {
781                 return assign( p.ptr());
782             }
783
784             /// Copy from \p src guard to \p this guard
785             void copy( Guard const& src )
786             {
787                 assign( src.get_native());
788             }
789
790             /// Clears value of the guard
791             void clear()
792             {
793                 assert( guard_ != nullptr );
794
795                 guard_->clear();
796             }
797
798             /// Gets the value currently protected (relaxed read)
799             template <typename T>
800             T * get() const
801             {
802                 assert( guard_ != nullptr );
803                 return guard_->get_as<T>();
804             }
805
806             /// Gets native guarded pointer stored
807             void* get_native() const
808             {
809                 assert( guard_ != nullptr );
810                 return guard_->get();
811             }
812
813             //@cond
814             dhp::guard* release()
815             {
816                 dhp::guard* g = guard_;
817                 guard_ = nullptr;
818                 return g;
819             }
820
821             dhp::guard*& guard_ref()
822             {
823                 return guard_;
824             }
825             //@endcond
826
827         private:
828             //@cond
829             dhp::guard* guard_;
830             //@endcond
831         };
832
833         /// Array of Dynamic Hazard Pointer guards
834         /**
835             The class is intended for allocating an array of hazard pointer guards.
836             Template parameter \p Count defines the size of the array.
837
838             A \p %GuardArray object is not copy- and move-constructible
839             and not copy- and move-assignable.
840         */
841         template <size_t Count>
842         class GuardArray
843         {
844         public:
845             /// Rebind array for other size \p OtherCount
846             template <size_t OtherCount>
847             struct rebind {
848                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
849             };
850
851             /// Array capacity
852             static CDS_CONSTEXPR const size_t c_nCapacity = Count;
853
854         public:
855             /// Default ctor allocates \p Count hazard pointers
856             GuardArray()
857             {
858                 dhp::smr::tls()->hazards_.alloc( guards_ );
859             }
860
861             /// Move ctor is prohibited
862             GuardArray( GuardArray&& ) = delete;
863
864             /// Move assignment is prohibited
865             GuardArray& operator=( GuardArray&& ) = delete;
866
867             /// Copy ctor is prohibited
868             GuardArray( GuardArray const& ) = delete;
869
870             /// Copy assignment is prohibited
871             GuardArray& operator=( GuardArray const& ) = delete;
872
873             /// Frees allocated hazard pointers
874             ~GuardArray()
875             {
876                 dhp::smr::tls()->hazards_.free( guards_ );
877             }
878
879             /// Protects a pointer of type \p atomic<T*>
880             /**
881                 Return the value of \p toGuard
882
883                 The function tries to load \p toGuard and to store it
884                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
885             */
886             template <typename T>
887             T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
888             {
889                 assert( nIndex < capacity() );
890
891                 T pRet;
892                 do {
893                     pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
894                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
895
896                 return pRet;
897             }
898
899             /// Protects a pointer of type \p atomic<T*>
900             /**
901                 Return the value of \p toGuard
902
903                 The function tries to load \p toGuard and to store it
904                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
905
906                 The function is useful for intrusive containers when \p toGuard is a node pointer
907                 that should be converted to a pointer to the value type before guarding.
908                 The parameter \p f of type Func is a functor to make that conversion:
909                 \code
910                     struct functor {
911                         value_type * operator()( T * p );
912                     };
913                 \endcode
914                 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
915             */
916             template <typename T, class Func>
917             T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
918             {
919                 assert( nIndex < capacity() );
920
921                 T pRet;
922                 do {
923                     assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
924                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
925
926                 return pRet;
927             }
928
929             /// Store \p p to the slot \p nIndex
930             /**
931                 The function is just an assignment, no loop is performed.
932             */
933             template <typename T>
934             T * assign( size_t nIndex, T * p )
935             {
936                 assert( nIndex < capacity() );
937
938                 guards_.set( nIndex, p );
939                 dhp::smr::tls()->sync();
940                 return p;
941             }
942
943             /// Store marked pointer \p p to the guard
944             /**
945                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
946                 Can be used for a marked pointer that cannot be changed concurrently
947                 or for already guarded pointer.
948             */
949             template <typename T, int Bitmask>
950             T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
951             {
952                 return assign( nIndex, p.ptr());
953             }
954
955             /// Copy guarded value from \p src guard to slot at index \p nIndex
956             void copy( size_t nIndex, Guard const& src )
957             {
958                 assign( nIndex, src.get_native());
959             }
960
961             /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
962             void copy( size_t nDestIndex, size_t nSrcIndex )
963             {
964                 assign( nDestIndex, get_native( nSrcIndex ));
965             }
966
967             /// Clear value of the slot \p nIndex
968             void clear( size_t nIndex )
969             {
970                 guards_.clear( nIndex );
971             }
972
973             /// Get current value of slot \p nIndex
974             template <typename T>
975             T * get( size_t nIndex ) const
976             {
977                 assert( nIndex < capacity() );
978                 return guards_[nIndex]->template get_as<T>();
979             }
980
981             /// Get native guarded pointer stored
982             guarded_pointer get_native( size_t nIndex ) const
983             {
984                 assert( nIndex < capacity() );
985                 return guards_[nIndex]->get();
986             }
987
988             //@cond
989             dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
990             {
991                 return guards_.release( nIndex );
992             }
993             //@endcond
994
995             /// Capacity of the guard array
996             static CDS_CONSTEXPR size_t capacity()
997             {
998                 return Count;
999             }
1000
1001         private:
1002             //@cond
1003             dhp::guard_array<c_nCapacity> guards_;
1004             //@endcond
1005         };
1006
1007         /// Guarded pointer
1008         /**
1009             A guarded pointer is a pair of a pointer and GC's guard.
1010             Usually, it is used for returning a pointer to the item from an lock-free container.
1011             The guard prevents the pointer to be early disposed (freed) by GC.
1012             After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1013
1014             Template arguments:
1015             - \p GuardedType - a type which the guard stores
1016             - \p ValueType - a value type
1017             - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1018
1019             For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1020             In such case the \p %guarded_ptr is:
1021             @code
1022             typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1023             @endcode
1024
1025             For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1026             For example:
1027             @code
1028             struct foo {
1029                 int const   key;
1030                 std::string value;
1031             };
1032
1033             struct value_accessor {
1034                 std::string* operator()( foo* pFoo ) const
1035                 {
1036                     return &(pFoo->value);
1037                 }
1038             };
1039
1040             // Guarded ptr
1041             typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1042             @endcode
1043
1044             You don't need use this class directly.
1045             All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1046         */
1047         template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1048         class guarded_ptr
1049         {
1050             //@cond
1051             struct trivial_cast {
1052                 ValueType * operator()( GuardedType * p ) const
1053                 {
1054                     return p;
1055                 }
1056             };
1057
1058             template <typename GT, typename VT, typename C> friend class guarded_ptr;
1059             //@endcond
1060
1061         public:
1062             typedef GuardedType guarded_type; ///< Guarded type
1063             typedef ValueType   value_type;   ///< Value type
1064
1065             /// Functor for casting \p guarded_type to \p value_type
1066             typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1067
1068         public:
1069             /// Creates empty guarded pointer
1070             guarded_ptr() CDS_NOEXCEPT
1071                 : guard_( nullptr )
1072             {}
1073
1074             //@cond
1075             explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1076                 : guard_( g )
1077             {}
1078
1079             /// Initializes guarded pointer with \p p
1080             explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1081                 : guard_( nullptr )
1082             {
1083                 reset( p );
1084             }
1085             explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1086                 : guard_( nullptr )
1087             {}
1088             //@endcond
1089
1090             /// Move ctor
1091             guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1092                 : guard_( gp.guard_ )
1093             {
1094                 gp.guard_ = nullptr;
1095             }
1096
1097             /// Move ctor
1098             template <typename GT, typename VT, typename C>
1099             guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1100                 : guard_( gp.guard_ )
1101             {
1102                 gp.guard_ = nullptr;
1103             }
1104
1105             /// Ctor from \p Guard
1106             explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1107                 : guard_( g.release())
1108             {}
1109
1110             /// The guarded pointer is not copy-constructible
1111             guarded_ptr( guarded_ptr const& gp ) = delete;
1112
1113             /// Clears the guarded pointer
1114             /**
1115                 \ref release is called if guarded pointer is not \ref empty
1116             */
1117             ~guarded_ptr() CDS_NOEXCEPT
1118             {
1119                 release();
1120             }
1121
1122             /// Move-assignment operator
1123             guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1124             {
1125                 std::swap( guard_, gp.guard_ );
1126                 return *this;
1127             }
1128
1129             /// Move-assignment from \p Guard
1130             guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1131             {
1132                 std::swap( guard_, g.guard_ref());
1133                 return *this;
1134             }
1135
1136             /// The guarded pointer is not copy-assignable
1137             guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1138
1139             /// Returns a pointer to guarded value
1140             value_type * operator ->() const CDS_NOEXCEPT
1141             {
1142                 assert( !empty());
1143                 return value_cast()( guard_->get_as<guarded_type>() );
1144             }
1145
1146             /// Returns a reference to guarded value
1147             value_type& operator *() CDS_NOEXCEPT
1148             {
1149                 assert( !empty());
1150                 return *value_cast()( guard_->get_as<guarded_type>() );
1151             }
1152
1153             /// Returns const reference to guarded value
1154             value_type const& operator *() const CDS_NOEXCEPT
1155             {
1156                 assert( !empty());
1157                 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1158             }
1159
1160             /// Checks if the guarded pointer is \p nullptr
1161             bool empty() const CDS_NOEXCEPT
1162             {
1163                 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1164             }
1165
1166             /// \p bool operator returns <tt>!empty()</tt>
1167             explicit operator bool() const CDS_NOEXCEPT
1168             {
1169                 return !empty();
1170             }
1171
1172             /// Clears guarded pointer
1173             /**
1174                 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1175                 Dereferncing the guarded pointer after \p release() is dangerous.
1176             */
1177             void release() CDS_NOEXCEPT
1178             {
1179                 free_guard();
1180             }
1181
1182             //@cond
1183             // For internal use only!!!
1184             void reset(guarded_type * p) CDS_NOEXCEPT
1185             {
1186                 alloc_guard();
1187                 assert( guard_ );
1188                 guard_->set( p );
1189             }
1190
1191             //@endcond
1192
1193         private:
1194             //@cond
1195             void alloc_guard()
1196             {
1197                 if ( !guard_ )
1198                     guard_ = dhp::smr::tls()->hazards_.alloc();
1199             }
1200
1201             void free_guard()
1202             {
1203                 if ( guard_ ) {
1204                     dhp::smr::tls()->hazards_.free( guard_ );
1205                     guard_ = nullptr;
1206                 }
1207             }
1208             //@endcond
1209
1210         private:
1211             //@cond
1212             dhp::guard* guard_;
1213             //@endcond
1214         };
1215
1216     public:
1217         /// Initializes %DHP memory manager singleton
1218         /**
1219             Constructor creates and initializes %DHP global object.
1220             %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1221             it is created in the beginning of \p main() function.
1222             After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1223
1224             \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1225                 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1226                 By perforce the local thread's guard pool is grown automatically from common pool.
1227                 When the thread terminated its guard pool is backed to common GC's pool.
1228         */
1229         explicit DHP(
1230             size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
1231         )
1232         {
1233             dhp::smr::construct( nInitialHazardPtrCount );
1234         }
1235
1236         /// Destroys %DHP memory manager
1237         /**
1238             The destructor destroys %DHP global object. After calling of this function you may \b NOT
1239             use CDS data structures based on \p %cds::gc::DHP.
1240             Usually, %DHP object is destroyed at the end of your \p main().
1241         */
1242         ~DHP()
1243         {
1244             dhp::GarbageCollector::destruct( true );
1245         }
1246
1247         /// Checks if count of hazard pointer is no less than \p nCountNeeded
1248         /**
1249             The function always returns \p true since the guard count is unlimited for
1250             \p %gc::DHP garbage collector.
1251         */
1252         static CDS_CONSTEXPR bool check_available_guards(
1253 #ifdef CDS_DOXYGEN_INVOKED
1254             size_t nCountNeeded,
1255 #else
1256             size_t
1257 #endif
1258         )
1259         {
1260             return true;
1261         }
1262
1263         /// Set memory management functions
1264         /**
1265             @note This function may be called <b>BEFORE</b> creating an instance
1266             of Dynamic Hazard Pointer SMR
1267
1268             SMR object allocates some memory for thread-specific data and for creating SMR object.
1269             By default, a standard \p new and \p delete operators are used for this.
1270         */
1271         static void set_memory_allocator(
1272             void* ( *alloc_func )( size_t size ),   ///< \p malloc() function
1273             void( *free_func )( void * p )          ///< \p free() function
1274         )
1275         {
1276             dhp::smr::set_memory_allocator( alloc_func, free_func );
1277         }
1278
1279         /// Retire pointer \p p with function \p pFunc
1280         /**
1281             The function places pointer \p p to array of pointers ready for removing.
1282             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1283             \p func is a disposer: when \p p can be safely removed, \p func is called.
1284         */
1285         template <typename T>
1286         static void retire( T * p, void (* func)(T *))
1287         {
1288             dhp::thread_data* rec = dhp::smr::tls();
1289             if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
1290                 dhp::smr::instance().scan( rec );
1291         }
1292
1293         /// Retire pointer \p p with functor of type \p Disposer
1294         /**
1295             The function places pointer \p p to array of pointers ready for removing.
1296             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1297
1298             Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1299             \code
1300             template <typename T>
1301             struct disposer {
1302                 void operator()( T * p )    ;   // disposing operator
1303             };
1304             \endcode
1305             Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1306             - it should be stateless functor
1307             - it should be default-constructible
1308             - the result of functor call with argument \p p should not depend on where the functor will be called.
1309
1310             \par Examples:
1311             Operator \p delete functor:
1312             \code
1313             template <typename T>
1314             struct disposer {
1315                 void operator ()( T * p ) {
1316                     delete p;
1317                 }
1318             };
1319
1320             // How to call HP::retire method
1321             int * p = new int;
1322
1323             // ... use p in lock-free manner
1324
1325             cds::gc::DHP::retire<disposer>( p ) ;   // place p to retired pointer array of DHP SMR
1326             \endcode
1327
1328             Functor based on \p std::allocator :
1329             \code
1330             template <typename Alloc = std::allocator<int> >
1331             struct disposer {
1332                 template <typename T>
1333                 void operator()( T * p ) {
1334                     typedef typename Alloc::templare rebind<T>::other alloc_t;
1335                     alloc_t a;
1336                     a.destroy( p );
1337                     a.deallocate( p, 1 );
1338                 }
1339             };
1340             \endcode
1341         */
1342         template <class Disposer, typename T>
1343         static void retire( T * p )
1344         {
1345             if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1346                 scan();
1347         }
1348
1349         /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1350         static bool isUsed()
1351         {
1352             return dhp::smr::isUsed();
1353         }
1354
1355         /// Forced GC cycle call for current thread
1356         /**
1357             Usually, this function should not be called directly.
1358         */
1359         static void scan()
1360         {
1361             dhp::smr::instance().scan( dhp::smr::tls() );
1362         }
1363
1364         /// Synonym for \p scan()
1365         static void force_dispose()
1366         {
1367             scan();
1368         }
1369     };
1370
1371 }} // namespace cds::gc
1372
1373 #endif // #ifndef CDSLIB_GC_DHP_SMR_H
1374
1375