Merged branch 'master' of https://github.com/Nemo1369/libcds
[libcds.git] / cds / gc / dhp_smr.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
33
34 #include <exception>
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/intrusive/free_list_selector.h>
37 #include <cds/details/throw_exception.h>
38 #include <cds/details/static_functor.h>
39 #include <cds/details/marked_ptr.h>
40 #include <cds/user_setup/cache_line.h>
41
42 namespace cds { namespace gc {
43     namespace dhp {
44         using namespace cds::gc::hp::common;
45
46         /// Exception "Dynamic Hazard Pointer SMR is not initialized"
47         class not_initialized: public std::runtime_error
48         {
49         public:
50             not_initialized()
51                 : std::runtime_error( "Global DHP SMR object is not initialized" )
52             {}
53         };
54
55         struct guard_block: public cds::intrusive::FreeListImpl::node
56         {
57             guard_block*    next_;  // next block in the thread list
58
59             guard_block()
60                 : next_( nullptr )
61             {}
62
63             guard* first()
64             {
65                 return reinterpret_cast<guard*>( this + 1 );
66             }
67         };
68
69         /// \p guard_block allocator (global object)
70         class hp_allocator
71         {
72             friend class smr;
73         public:
74             static hp_allocator& instance();
75
76             CDS_EXPORT_API guard_block*    alloc();
77             void            free( guard_block* block )
78             {
79                 free_list_.put( block );
80             }
81
82         private:
83             hp_allocator()
84             {}
85             CDS_EXPORT_API ~hp_allocator();
86
87         private:
88             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
89         };
90
91         /// Per-thread hazard pointer storage
92         class thread_hp_storage 
93         {
94             friend class smr;
95         public:
96             thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
97                 : free_head_( arr )
98                 , extended_list_( nullptr )
99                 , array_( arr )
100                 , initial_capacity_( nSize )
101             {}
102
103             thread_hp_storage() = delete;
104             thread_hp_storage( thread_hp_storage const& ) = delete;
105             thread_hp_storage( thread_hp_storage&& ) = delete;
106
107             ~thread_hp_storage()
108             {
109                 clear();
110             }
111
112             guard* alloc()
113             {
114                 if ( cds_unlikely( free_head_ == nullptr )) {
115                     extend();
116                     assert( free_head_ != nullptr );
117                 }
118
119                 guard* g = free_head_;
120                 free_head_ = g->next_;
121                 return g;
122             }
123
124             void free( guard* g ) CDS_NOEXCEPT
125             {
126                 if ( g ) {
127                     g->clear();
128                     g->next_ = free_head_;
129                     free_head_ = g;
130                 }
131             }
132
133             template< size_t Capacity>
134             size_t alloc( guard_array<Capacity>& arr )
135             {
136                 for ( size_t i = 0; i < Capacity; ++i ) {
137                     if ( cds_unlikely( free_head_ == nullptr ))
138                         extend();
139                     arr.reset( i, free_head_ );
140                     free_head_ = free_head_->next_;
141                 }
142                 return Capacity;
143             }
144
145             template <size_t Capacity>
146             void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
147             {
148                 guard* gList = free_head_;
149                 for ( size_t i = 0; i < Capacity; ++i ) {
150                     guard* g = arr[i];
151                     if ( g ) {
152                         g->clear();
153                         g->next_ = gList;
154                         gList = g;
155                     }
156                 }
157                 free_head_ = gList;
158             }
159
160             void clear()
161             {
162                 // clear array_
163                 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
164                     cur->clear();
165
166                 // free all extended blocks
167                 hp_allocator& alloc = hp_allocator::instance();
168                 for ( guard_block* p = extended_list_; p; ) {
169                     guard_block* next = p->next_;
170                     alloc.free( p );
171                     p = next;
172                 }
173
174                 extended_list_ = nullptr;
175             }
176
177             void init()
178             {
179                 assert( extended_list_ == nullptr );
180
181                 guard* p = array_;
182                 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
183                     p->next_ = p + 1;
184                 p->next_ = nullptr;
185                 free_head_ = array_;
186             }
187
188         private:
189             void extend()
190             {
191                 assert( free_head_ == nullptr );
192
193                 guard_block* block = hp_allocator::instance().alloc();
194                 block->next_ = extended_list_;
195                 extended_list_ = block;
196                 free_head_ = block->first();
197             }
198
199         private:
200             guard*          free_head_;        ///< Head of free guard list
201             guard_block*    extended_list_;    ///< Head of extended guard blocks allocated for the thread
202             guard* const    array_;            ///< initial HP array
203             size_t const    initial_capacity_; ///< Capacity of \p array_
204         };
205
206         struct retired_block: public cds::intrusive::FreeListImpl::node
207         {
208             retired_block*  next_;  ///< Next block in thread-private retired array
209
210             static size_t const c_capacity = 256;
211
212             retired_block()
213                 : next_( nullptr )
214             {}
215
216             retired_ptr* first() const
217             {
218                 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
219             }
220
221             retired_ptr* last() const
222             {
223                 return first() + c_capacity;
224             }
225         };
226
227         class retired_allocator
228         {
229             friend class smr;
230         public:
231             static retired_allocator& instance();
232
233             CDS_EXPORT_API retired_block* alloc();
234             void free( retired_block* block )
235             {
236                 block->next_ = nullptr;
237                 free_list_.put( block );
238             }
239
240         private:
241             retired_allocator()
242             {}
243             CDS_EXPORT_API ~retired_allocator();
244
245         private:
246             cds::intrusive::FreeListImpl    free_list_; ///< list of free \p guard_block
247         };
248
249         /// Per-thread retired array
250         class retired_array
251         {
252             friend class smr;
253         public:
254             retired_array() CDS_NOEXCEPT
255                 : current_block_( nullptr )
256                 , current_cell_( nullptr )
257                 , list_head_( nullptr )
258                 , list_tail_( nullptr )
259                 , block_count_(0)
260             {}
261
262             retired_array( retired_array const& ) = delete;
263             retired_array( retired_array&& ) = delete;
264
265             ~retired_array()
266             {
267                 assert( empty());
268                 fini();
269             }
270
271             bool push( retired_ptr const& p ) CDS_NOEXCEPT
272             {
273                 assert( current_block_ != nullptr );
274                 assert( current_block_->first() <= current_cell_ );
275                 assert( current_cell_ < current_block_->last() );
276                 //assert( &p != current_cell_ );
277
278                 *current_cell_ = p;
279                 if ( ++current_cell_ == current_block_->last() ) {
280                     // goto next block if exists
281                     if ( current_block_->next_ ) {
282                         current_block_ = current_block_->next_;
283                         current_cell_ = current_block_->first();
284                         return true;
285                     }
286
287                     // no free block
288                     // smr::scan() extend retired_array if needed
289                     return false;
290                 }
291
292                 return true;
293             }
294
295             bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
296             {                
297                 bool ret = push( *p );
298                 assert( ret );
299                 return ret;
300             }
301
302         private: // called by smr
303             void init()
304             {
305                 if ( list_head_ == nullptr ) {
306                     retired_block* block = retired_allocator::instance().alloc();
307                     assert( block->next_ == nullptr );
308
309                     current_block_ =
310                         list_head_ =
311                         list_tail_ = block;
312                     current_cell_ = block->first();
313
314                     block_count_ = 1;
315                 }
316             }
317
318             void fini()
319             {
320                 retired_allocator& alloc = retired_allocator::instance();
321                 for ( retired_block* p = list_head_; p; ) {
322                     retired_block* next = p->next_;
323                     alloc.free( p );
324                     p = next;
325                 }
326
327                 current_block_ =
328                     list_head_ =
329                     list_tail_ = nullptr;
330                 current_cell_ = nullptr;
331
332                 block_count_ = 0;
333             }
334
335             void extend()
336             {
337                 assert( list_head_ != nullptr );
338                 assert( current_block_ == list_tail_ );
339                 assert( current_cell_ == current_block_->last() );
340
341                 retired_block* block = retired_allocator::instance().alloc();
342                 assert( block->next_ == nullptr );
343
344                 list_tail_ = list_tail_->next_ = block;
345                 current_cell_ = block->first();
346                 ++block_count_;
347             }
348
349             bool empty() const
350             {
351                 return current_block_ == nullptr
352                     || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
353             }
354
355         private:
356             retired_block*          current_block_;
357             retired_ptr*            current_cell_;  // in current_block_
358
359             retired_block*          list_head_;
360             retired_block*          list_tail_;
361             size_t                  block_count_;
362         };
363
364         /// Per-thread data
365         struct thread_data {
366             thread_hp_storage   hazards_;   ///< Hazard pointers private to the thread
367             retired_array       retired_;   ///< Retired data private to the thread
368
369             char pad1_[cds::c_nCacheLineSize];
370             atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
371             char pad2_[cds::c_nCacheLineSize];
372
373             // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
374             // cppcheck-suppress uninitMemberVar
375             thread_data( guard* guards, size_t guard_count )
376                 : hazards_( guards, guard_count )
377                 , sync_( 0 )
378             {}
379
380             thread_data() = delete;
381             thread_data( thread_data const& ) = delete;
382             thread_data( thread_data&& ) = delete;
383
384             void sync()
385             {
386                 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
387             }
388         };
389
390         // Hazard Pointer SMR (Safe Memory Reclamation)
391         class smr
392         {
393             struct thread_record;
394
395         public:
396             /// Returns the instance of Hazard Pointer \ref smr
397             static smr& instance()
398             {
399 #       ifdef CDS_DISABLE_SMR_EXCEPTION
400                 assert( instance_ != nullptr );
401 #       else
402                 if ( !instance_ )
403                     CDS_THROW_EXCEPTION( not_initialized() );
404 #       endif
405                 return *instance_;
406             }
407
408             /// Creates Dynamic Hazard Pointer SMR singleton
409             /**
410                 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
411                 Otherwise it does nothing.
412
413                 The Michael's HP reclamation schema depends of three parameters:
414                 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
415                 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
416                 the function uses maximum of HP count for CDS library
417                 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
418                 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
419                 <tt> nHazardPtrCount * nMaxThreadCount </tt>
420                 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
421             */
422             static CDS_EXPORT_API void construct(
423                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
424             );
425
426             //@cond
427             // for back-copatibility
428             static void Construct(
429                 size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
430             )
431             {
432                 construct( nInitialHazardPtrCount );
433             }
434             //@endcond
435
436             /// Destroys global instance of \ref smr
437             /**
438                 The parameter \p bDetachAll should be used carefully: if its value is \p true,
439                 then the object destroyed automatically detaches all attached threads. This feature
440                 can be useful when you have no control over the thread termination, for example,
441                 when \p libcds is injected into existing external thread.
442             */
443             static CDS_EXPORT_API void destruct(
444                 bool bDetachAll = false     ///< Detach all threads
445             );
446
447             //@cond
448             // for back-copatibility
449             static void Destruct(
450                 bool bDetachAll = false     ///< Detach all threads
451             )
452             {
453                 destruct( bDetachAll );
454             }
455             //@endcond
456
457             /// Checks if global SMR object is constructed and may be used
458             static bool isUsed() CDS_NOEXCEPT
459             {
460                 return instance_ != nullptr;
461             }
462
463             /// Set memory management functions
464             /**
465                 @note This function may be called <b>BEFORE</b> creating an instance
466                 of Dynamic Hazard Pointer SMR
467
468                 SMR object allocates some memory for thread-specific data and for
469                 creating SMR object.
470                 By default, a standard \p new and \p delete operators are used for this.
471             */
472             static CDS_EXPORT_API void set_memory_allocator(
473                 void* ( *alloc_func )( size_t size ),
474                 void( *free_func )( void * p )
475             );
476
477             /// Returns thread-local data for the current thread
478             static CDS_EXPORT_API thread_data* tls();
479
480             static CDS_EXPORT_API void attach_thread();
481             static CDS_EXPORT_API void detach_thread();
482
483         public: // for internal use only
484             /// The main garbage collecting function
485             CDS_EXPORT_API void scan( thread_data* pRec );
486
487             /// Helper scan routine
488             /**
489                 The function guarantees that every node that is eligible for reuse is eventually freed, barring
490                 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
491                 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
492                 to thread's list of reclaimed pointers.
493
494                 The function is called internally by \p scan().
495             */
496             CDS_EXPORT_API void help_scan( thread_data* pThis );
497
498             hp_allocator& get_hp_allocator()
499             {
500                 return hp_allocator_;
501             }
502
503             retired_allocator& get_retired_allocator()
504             {
505                 return retired_allocator_;
506             }
507
508         private:
509             CDS_EXPORT_API explicit smr(
510                 size_t nInitialHazardPtrCount
511             );
512
513             CDS_EXPORT_API ~smr();
514
515             CDS_EXPORT_API void detach_all_thread();
516
517         private:
518             //@cond
519             CDS_EXPORT_API thread_record* create_thread_data();
520             static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
521
522             /// Allocates Hazard Pointer SMR thread private data
523             CDS_EXPORT_API thread_record* alloc_thread_data();
524
525             /// Free HP SMR thread-private data
526             CDS_EXPORT_API void free_thread_data( thread_record* pRec );
527             //@endcond
528
529         private:
530             static CDS_EXPORT_API smr* instance_;
531
532             atomics::atomic< thread_record*>    thread_list_;   ///< Head of thread list
533             size_t const        initial_hazard_count_;  ///< initial number of hazard pointers per thread
534             hp_allocator        hp_allocator_;
535             retired_allocator   retired_allocator_;
536
537             // temporaries
538             std::atomic<size_t> last_plist_size_;   ///< HP array size in last scan() call
539         };
540
541         // for backward compatibility
542         typedef smr GarbageCollector;
543
544
545         // inlines
546         inline hp_allocator& hp_allocator::instance()
547         {
548             return smr::instance().get_hp_allocator();
549         }
550
551         inline retired_allocator& retired_allocator::instance()
552         {
553             return smr::instance().get_retired_allocator();
554         }
555
556     } // namespace dhp
557
558
559         /// Dynamic Hazard Pointer garbage collector
560     /**  @ingroup cds_garbage_collector
561         @headerfile cds/gc/dhp.h
562
563         Implementation of Dynamic Hazard Pointer garbage collector.
564
565         Sources:
566             - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
567             - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
568             - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
569
570         Dynamic Hazard Pointers SMR (safe memory reclamation) provides an unbounded number of hazard pointer per thread
571         despite of classic Hazard Pointer SMR in which the count of the hazard pointef per thread is limited.
572
573         See \ref cds_how_to_use "How to use" section for details how to apply garbage collector.
574     */
575     class DHP
576     {
577     public:
578         /// Native guarded pointer type
579         typedef void* guarded_pointer;
580
581         /// Atomic reference
582         template <typename T> using atomic_ref = atomics::atomic<T *>;
583
584         /// Atomic type
585         /**
586             @headerfile cds/gc/dhp.h
587         */
588         template <typename T> using atomic_type = atomics::atomic<T>;
589
590         /// Atomic marked pointer
591         template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
592
593
594         /// Dynamic Hazard Pointer guard
595         /**
596             A guard is a hazard pointer.
597             Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
598
599             \p %Guard object is movable but not copyable.
600
601             The guard object can be in two states:
602             - unlinked - the guard is not linked with any internal hazard pointer.
603               In this state no operation except \p link() and move assignment is supported.
604             - linked (default) - the guard allocates an internal hazard pointer and fully operable.
605
606             Due to performance reason the implementation does not check state of the guard in runtime.
607
608             @warning Move assignment can transfer the guard in unlinked state, use with care.
609         */
610         class Guard
611         {
612         public:
613             /// Default ctor allocates a guard (hazard pointer) from thread-private storage
614             Guard() CDS_NOEXCEPT
615                 : guard_( dhp::smr::tls()->hazards_.alloc() )
616             {}
617
618             /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
619             explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
620                 : guard_( nullptr )
621             {}
622
623             /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
624             Guard( Guard&& src ) CDS_NOEXCEPT
625                 : guard_( src.guard_ )
626             {
627                 src.guard_ = nullptr;
628             }
629
630             /// Move assignment: the internal guards are swapped between \p src and \p this
631             /**
632                 @warning \p src will become in unlinked state if \p this was unlinked on entry.
633             */
634             Guard& operator=( Guard&& src ) CDS_NOEXCEPT
635             {
636                 std::swap( guard_, src.guard_ );
637                 return *this;
638             }
639
640             /// Copy ctor is prohibited - the guard is not copyable
641             Guard( Guard const& ) = delete;
642
643             /// Copy assignment is prohibited
644             Guard& operator=( Guard const& ) = delete;
645
646             /// Frees the internal hazard pointer if the guard is in linked state
647             ~Guard()
648             {
649                 unlink();
650             }
651
652             /// Checks if the guard object linked with any internal hazard pointer
653             bool is_linked() const
654             {
655                 return guard_ != nullptr;
656             }
657
658             /// Links the guard with internal hazard pointer if the guard is in unlinked state
659             void link()
660             {
661                 if ( !guard_ )
662                     guard_ = dhp::smr::tls()->hazards_.alloc();
663             }
664
665             /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
666             void unlink()
667             {
668                 if ( guard_ ) {
669                     dhp::smr::tls()->hazards_.free( guard_ );
670                     guard_ = nullptr;
671                 }
672             }
673
674             /// Protects a pointer of type <tt> atomic<T*> </tt>
675             /**
676                 Return the value of \p toGuard
677
678                 The function tries to load \p toGuard and to store it
679                 to the HP slot repeatedly until the guard's value equals \p toGuard
680             */
681             template <typename T>
682             T protect( atomics::atomic<T> const& toGuard )
683             {
684                 assert( guard_ != nullptr );
685
686                 T pCur = toGuard.load(atomics::memory_order_acquire);
687                 T pRet;
688                 do {
689                     pRet = assign( pCur );
690                     pCur = toGuard.load(atomics::memory_order_acquire);
691                 } while ( pRet != pCur );
692                 return pCur;
693             }
694
695             /// Protects a converted pointer of type <tt> atomic<T*> </tt>
696             /**
697                 Return the value of \p toGuard
698
699                 The function tries to load \p toGuard and to store result of \p f functor
700                 to the HP slot repeatedly until the guard's value equals \p toGuard.
701
702                 The function is useful for intrusive containers when \p toGuard is a node pointer
703                 that should be converted to a pointer to the value type before guarding.
704                 The parameter \p f of type Func is a functor that makes this conversion:
705                 \code
706                     struct functor {
707                         value_type * operator()( T * p );
708                     };
709                 \endcode
710                 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
711             */
712             template <typename T, class Func>
713             T protect( atomics::atomic<T> const& toGuard, Func f )
714             {
715                 assert( guard_ != nullptr );
716
717                 T pCur = toGuard.load(atomics::memory_order_acquire);
718                 T pRet;
719                 do {
720                     pRet = pCur;
721                     assign( f( pCur ));
722                     pCur = toGuard.load(atomics::memory_order_acquire);
723                 } while ( pRet != pCur );
724                 return pCur;
725             }
726
727             /// Store \p p to the guard
728             /**
729                 The function is just an assignment, no loop is performed.
730                 Can be used for a pointer that cannot be changed concurrently
731                 or for already guarded pointer.
732             */
733             template <typename T>
734             T* assign( T* p )
735             {
736                 assert( guard_ != nullptr );
737
738                 guard_->set( p );
739                 dhp::smr::tls()->sync();
740                 return p;
741             }
742
743             //@cond
744             std::nullptr_t assign( std::nullptr_t )
745             {
746                 assert( guard_ != nullptr );
747
748                 clear();
749                 return nullptr;
750             }
751             //@endcond
752
753             /// Store marked pointer \p p to the guard
754             /**
755                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
756                 Can be used for a marked pointer that cannot be changed concurrently
757                 or for already guarded pointer.
758             */
759             template <typename T, int BITMASK>
760             T* assign( cds::details::marked_ptr<T, BITMASK> p )
761             {
762                 return assign( p.ptr());
763             }
764
765             /// Copy from \p src guard to \p this guard
766             void copy( Guard const& src )
767             {
768                 assign( src.get_native());
769             }
770
771             /// Clears value of the guard
772             void clear()
773             {
774                 assert( guard_ != nullptr );
775
776                 guard_->clear();
777             }
778
779             /// Gets the value currently protected (relaxed read)
780             template <typename T>
781             T * get() const
782             {
783                 assert( guard_ != nullptr );
784                 return guard_->get_as<T>();
785             }
786
787             /// Gets native guarded pointer stored
788             void* get_native() const
789             {
790                 assert( guard_ != nullptr );
791                 return guard_->get();
792             }
793
794             //@cond
795             dhp::guard* release()
796             {
797                 dhp::guard* g = guard_;
798                 guard_ = nullptr;
799                 return g;
800             }
801
802             dhp::guard*& guard_ref()
803             {
804                 return guard_;
805             }
806             //@endcond
807
808         private:
809             //@cond
810             dhp::guard* guard_;
811             //@endcond
812         };
813
814         /// Array of Dynamic Hazard Pointer guards
815         /**
816             The class is intended for allocating an array of hazard pointer guards.
817             Template parameter \p Count defines the size of the array.
818
819             A \p %GuardArray object is not copy- and move-constructible
820             and not copy- and move-assignable.
821         */
822         template <size_t Count>
823         class GuardArray
824         {
825         public:
826             /// Rebind array for other size \p OtherCount
827             template <size_t OtherCount>
828             struct rebind {
829                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
830             };
831
832             /// Array capacity
833             static CDS_CONSTEXPR const size_t c_nCapacity = Count;
834
835         public:
836             /// Default ctor allocates \p Count hazard pointers
837             GuardArray()
838             {
839                 dhp::smr::tls()->hazards_.alloc( guards_ );
840             }
841
842             /// Move ctor is prohibited
843             GuardArray( GuardArray&& ) = delete;
844
845             /// Move assignment is prohibited
846             GuardArray& operator=( GuardArray&& ) = delete;
847
848             /// Copy ctor is prohibited
849             GuardArray( GuardArray const& ) = delete;
850
851             /// Copy assignment is prohibited
852             GuardArray& operator=( GuardArray const& ) = delete;
853
854             /// Frees allocated hazard pointers
855             ~GuardArray()
856             {
857                 dhp::smr::tls()->hazards_.free( guards_ );
858             }
859
860             /// Protects a pointer of type \p atomic<T*>
861             /**
862                 Return the value of \p toGuard
863
864                 The function tries to load \p toGuard and to store it
865                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
866             */
867             template <typename T>
868             T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
869             {
870                 assert( nIndex < capacity() );
871
872                 T pRet;
873                 do {
874                     pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
875                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
876
877                 return pRet;
878             }
879
880             /// Protects a pointer of type \p atomic<T*>
881             /**
882                 Return the value of \p toGuard
883
884                 The function tries to load \p toGuard and to store it
885                 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
886
887                 The function is useful for intrusive containers when \p toGuard is a node pointer
888                 that should be converted to a pointer to the value type before guarding.
889                 The parameter \p f of type Func is a functor to make that conversion:
890                 \code
891                     struct functor {
892                         value_type * operator()( T * p );
893                     };
894                 \endcode
895                 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
896             */
897             template <typename T, class Func>
898             T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
899             {
900                 assert( nIndex < capacity() );
901
902                 T pRet;
903                 do {
904                     assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
905                 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
906
907                 return pRet;
908             }
909
910             /// Store \p p to the slot \p nIndex
911             /**
912                 The function is just an assignment, no loop is performed.
913             */
914             template <typename T>
915             T * assign( size_t nIndex, T * p )
916             {
917                 assert( nIndex < capacity() );
918
919                 guards_.set( nIndex, p );
920                 dhp::smr::tls()->sync();
921                 return p;
922             }
923
924             /// Store marked pointer \p p to the guard
925             /**
926                 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
927                 Can be used for a marked pointer that cannot be changed concurrently
928                 or for already guarded pointer.
929             */
930             template <typename T, int Bitmask>
931             T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
932             {
933                 return assign( nIndex, p.ptr());
934             }
935
936             /// Copy guarded value from \p src guard to slot at index \p nIndex
937             void copy( size_t nIndex, Guard const& src )
938             {
939                 assign( nIndex, src.get_native());
940             }
941
942             /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
943             void copy( size_t nDestIndex, size_t nSrcIndex )
944             {
945                 assign( nDestIndex, get_native( nSrcIndex ));
946             }
947
948             /// Clear value of the slot \p nIndex
949             void clear( size_t nIndex )
950             {
951                 guards_.clear( nIndex );
952             }
953
954             /// Get current value of slot \p nIndex
955             template <typename T>
956             T * get( size_t nIndex ) const
957             {
958                 assert( nIndex < capacity() );
959                 return guards_[nIndex]->template get_as<T>();
960             }
961
962             /// Get native guarded pointer stored
963             guarded_pointer get_native( size_t nIndex ) const
964             {
965                 assert( nIndex < capacity() );
966                 return guards_[nIndex]->get();
967             }
968
969             //@cond
970             dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
971             {
972                 return guards_.release( nIndex );
973             }
974             //@endcond
975
976             /// Capacity of the guard array
977             static CDS_CONSTEXPR size_t capacity()
978             {
979                 return Count;
980             }
981
982         private:
983             //@cond
984             dhp::guard_array<c_nCapacity> guards_;
985             //@endcond
986         };
987
988         /// Guarded pointer
989         /**
990             A guarded pointer is a pair of a pointer and GC's guard.
991             Usually, it is used for returning a pointer to the item from an lock-free container.
992             The guard prevents the pointer to be early disposed (freed) by GC.
993             After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
994
995             Template arguments:
996             - \p GuardedType - a type which the guard stores
997             - \p ValueType - a value type
998             - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
999
1000             For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1001             In such case the \p %guarded_ptr is:
1002             @code
1003             typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1004             @endcode
1005
1006             For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1007             For example:
1008             @code
1009             struct foo {
1010                 int const   key;
1011                 std::string value;
1012             };
1013
1014             struct value_accessor {
1015                 std::string* operator()( foo* pFoo ) const
1016                 {
1017                     return &(pFoo->value);
1018                 }
1019             };
1020
1021             // Guarded ptr
1022             typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1023             @endcode
1024
1025             You don't need use this class directly.
1026             All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1027         */
1028         template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1029         class guarded_ptr
1030         {
1031             //@cond
1032             struct trivial_cast {
1033                 ValueType * operator()( GuardedType * p ) const
1034                 {
1035                     return p;
1036                 }
1037             };
1038
1039             template <typename GT, typename VT, typename C> friend class guarded_ptr;
1040             //@endcond
1041
1042         public:
1043             typedef GuardedType guarded_type; ///< Guarded type
1044             typedef ValueType   value_type;   ///< Value type
1045
1046             /// Functor for casting \p guarded_type to \p value_type
1047             typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1048
1049         public:
1050             /// Creates empty guarded pointer
1051             guarded_ptr() CDS_NOEXCEPT
1052                 : guard_( nullptr )
1053             {}
1054
1055             //@cond
1056             explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1057                 : guard_( g )
1058             {}
1059
1060             /// Initializes guarded pointer with \p p
1061             explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1062                 : guard_( nullptr )
1063             {
1064                 reset( p );
1065             }
1066             explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1067                 : guard_( nullptr )
1068             {}
1069             //@endcond
1070
1071             /// Move ctor
1072             guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1073                 : guard_( gp.guard_ )
1074             {
1075                 gp.guard_ = nullptr;
1076             }
1077
1078             /// Move ctor
1079             template <typename GT, typename VT, typename C>
1080             guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1081                 : guard_( gp.guard_ )
1082             {
1083                 gp.guard_ = nullptr;
1084             }
1085
1086             /// Ctor from \p Guard
1087             explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1088                 : guard_( g.release())
1089             {}
1090
1091             /// The guarded pointer is not copy-constructible
1092             guarded_ptr( guarded_ptr const& gp ) = delete;
1093
1094             /// Clears the guarded pointer
1095             /**
1096                 \ref release is called if guarded pointer is not \ref empty
1097             */
1098             ~guarded_ptr() CDS_NOEXCEPT
1099             {
1100                 release();
1101             }
1102
1103             /// Move-assignment operator
1104             guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1105             {
1106                 std::swap( guard_, gp.guard_ );
1107                 return *this;
1108             }
1109
1110             /// Move-assignment from \p Guard
1111             guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1112             {
1113                 std::swap( guard_, g.guard_ref());
1114                 return *this;
1115             }
1116
1117             /// The guarded pointer is not copy-assignable
1118             guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1119
1120             /// Returns a pointer to guarded value
1121             value_type * operator ->() const CDS_NOEXCEPT
1122             {
1123                 assert( !empty());
1124                 return value_cast()( guard_->get_as<guarded_type>() );
1125             }
1126
1127             /// Returns a reference to guarded value
1128             value_type& operator *() CDS_NOEXCEPT
1129             {
1130                 assert( !empty());
1131                 return *value_cast()( guard_->get_as<guarded_type>() );
1132             }
1133
1134             /// Returns const reference to guarded value
1135             value_type const& operator *() const CDS_NOEXCEPT
1136             {
1137                 assert( !empty());
1138                 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1139             }
1140
1141             /// Checks if the guarded pointer is \p nullptr
1142             bool empty() const CDS_NOEXCEPT
1143             {
1144                 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1145             }
1146
1147             /// \p bool operator returns <tt>!empty()</tt>
1148             explicit operator bool() const CDS_NOEXCEPT
1149             {
1150                 return !empty();
1151             }
1152
1153             /// Clears guarded pointer
1154             /**
1155                 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1156                 Dereferncing the guarded pointer after \p release() is dangerous.
1157             */
1158             void release() CDS_NOEXCEPT
1159             {
1160                 free_guard();
1161             }
1162
1163             //@cond
1164             // For internal use only!!!
1165             void reset(guarded_type * p) CDS_NOEXCEPT
1166             {
1167                 alloc_guard();
1168                 assert( guard_ );
1169                 guard_->set( p );
1170             }
1171
1172             //@endcond
1173
1174         private:
1175             //@cond
1176             void alloc_guard()
1177             {
1178                 if ( !guard_ )
1179                     guard_ = dhp::smr::tls()->hazards_.alloc();
1180             }
1181
1182             void free_guard()
1183             {
1184                 if ( guard_ ) {
1185                     dhp::smr::tls()->hazards_.free( guard_ );
1186                     guard_ = nullptr;
1187                 }
1188             }
1189             //@endcond
1190
1191         private:
1192             //@cond
1193             dhp::guard* guard_;
1194             //@endcond
1195         };
1196
1197     public:
1198         /// Initializes %DHP memory manager singleton
1199         /**
1200             Constructor creates and initializes %DHP global object.
1201             %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1202             it is created in the beginning of \p main() function.
1203             After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1204
1205             \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1206                 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1207                 By perforce the local thread's guard pool is grown automatically from common pool.
1208                 When the thread terminated its guard pool is backed to common GC's pool.
1209         */
1210         explicit DHP(
1211             size_t nInitialHazardPtrCount = 16  ///< Initial number of hazard pointer per thread
1212         )
1213         {
1214             dhp::smr::construct( nInitialHazardPtrCount );
1215         }
1216
1217         /// Destroys %DHP memory manager
1218         /**
1219             The destructor destroys %DHP global object. After calling of this function you may \b NOT
1220             use CDS data structures based on \p %cds::gc::DHP.
1221             Usually, %DHP object is destroyed at the end of your \p main().
1222         */
1223         ~DHP()
1224         {
1225             dhp::GarbageCollector::destruct( true );
1226         }
1227
1228         /// Checks if count of hazard pointer is no less than \p nCountNeeded
1229         /**
1230             The function always returns \p true since the guard count is unlimited for
1231             \p %gc::DHP garbage collector.
1232         */
1233         static CDS_CONSTEXPR bool check_available_guards(
1234 #ifdef CDS_DOXYGEN_INVOKED
1235             size_t nCountNeeded,
1236 #else
1237             size_t
1238 #endif
1239         )
1240         {
1241             return true;
1242         }
1243
1244         /// Set memory management functions
1245         /**
1246             @note This function may be called <b>BEFORE</b> creating an instance
1247             of Dynamic Hazard Pointer SMR
1248
1249             SMR object allocates some memory for thread-specific data and for
1250             creating SMR object.
1251             By default, a standard \p new and \p delete operators are used for this.
1252         */
1253         static void set_memory_allocator(
1254             void* ( *alloc_func )( size_t size ),   ///< \p malloc() function
1255             void( *free_func )( void * p )          ///< \p free() function
1256         )
1257         {
1258             dhp::smr::set_memory_allocator( alloc_func, free_func );
1259         }
1260
1261         /// Retire pointer \p p with function \p pFunc
1262         /**
1263             The function places pointer \p p to array of pointers ready for removing.
1264             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1265             \p func is a disposer: when \p p can be safely removed, \p func is called.
1266         */
1267         template <typename T>
1268         static void retire( T * p, void (* func)(T *))
1269         {
1270             dhp::thread_data* rec = dhp::smr::tls();
1271             if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
1272                 dhp::smr::instance().scan( rec );
1273         }
1274
1275         /// Retire pointer \p p with functor of type \p Disposer
1276         /**
1277             The function places pointer \p p to array of pointers ready for removing.
1278             (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1279
1280             Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1281             \code
1282             template <typename T>
1283             struct disposer {
1284                 void operator()( T * p )    ;   // disposing operator
1285             };
1286             \endcode
1287             Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1288             - it should be stateless functor
1289             - it should be default-constructible
1290             - the result of functor call with argument \p p should not depend on where the functor will be called.
1291
1292             \par Examples:
1293             Operator \p delete functor:
1294             \code
1295             template <typename T>
1296             struct disposer {
1297                 void operator ()( T * p ) {
1298                     delete p;
1299                 }
1300             };
1301
1302             // How to call HP::retire method
1303             int * p = new int;
1304
1305             // ... use p in lock-free manner
1306
1307             cds::gc::DHP::retire<disposer>( p ) ;   // place p to retired pointer array of DHP SMR
1308             \endcode
1309
1310             Functor based on \p std::allocator :
1311             \code
1312             template <typename Alloc = std::allocator<int> >
1313             struct disposer {
1314                 template <typename T>
1315                 void operator()( T * p ) {
1316                     typedef typename Alloc::templare rebind<T>::other alloc_t;
1317                     alloc_t a;
1318                     a.destroy( p );
1319                     a.deallocate( p, 1 );
1320                 }
1321             };
1322             \endcode
1323         */
1324         template <class Disposer, typename T>
1325         static void retire( T * p )
1326         {
1327             if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1328                 scan();
1329         }
1330
1331         /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1332         static bool isUsed()
1333         {
1334             return dhp::smr::isUsed();
1335         }
1336
1337         /// Forced GC cycle call for current thread
1338         /**
1339             Usually, this function should not be called directly.
1340         */
1341         static void scan()
1342         {
1343             dhp::smr::instance().scan( dhp::smr::tls() );
1344         }
1345
1346         /// Synonym for \p scan()
1347         static void force_dispose()
1348         {
1349             scan();
1350         }
1351     };
1352
1353 }} // namespace cds::gc
1354
1355 #endif // #ifndef CDSLIB_GC_DHP_SMR_H
1356
1357