68a3659b56d700485f84c92b420257a7ae933915
[libcds.git] / cds / gc / details / dhp.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8     
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.     
29 */
30
31 #ifndef CDSLIB_GC_DETAILS_DHP_H
32 #define CDSLIB_GC_DETAILS_DHP_H
33
34 #include <mutex>        // unique_lock
35 #include <cds/algo/atomic.h>
36 #include <cds/algo/int_algo.h>
37 #include <cds/gc/details/retired_ptr.h>
38 #include <cds/details/aligned_allocator.h>
39 #include <cds/details/allocator.h>
40 #include <cds/sync/spinlock.h>
41
42 #if CDS_COMPILER == CDS_COMPILER_MSVC
43 #   pragma warning(push)
44 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
45 #endif
46
47 //@cond
48 namespace cds { namespace gc {
49
50     /// Dynamic Hazard Pointer reclamation schema
51     /**
52         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
53         Use cds::gc::DHP class in your code.
54
55         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
56         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
57         by contructing cds::gc::DHP object in beginning of your main().
58         See cds::gc::DHP class for explanation.
59
60         \par Implementation issues
61             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
62             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
63             of guards allocated from the global list of free guards and the access to the global list is occurred only when
64             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
65             Guards allocated for the thread is push back to the global list only when the thread terminates.
66     */
67     namespace dhp {
68
69         // Forward declarations
70         class Guard;
71         template <size_t Count> class GuardArray;
72         class ThreadGC;
73         class GarbageCollector;
74
75         /// Retired pointer type
76         typedef cds::gc::details::retired_ptr retired_ptr;
77
78         using cds::gc::details::free_retired_ptr_func;
79
80         /// Details of Dynamic Hazard Pointer algorithm
81         namespace details {
82
83             // Forward declaration
84             class liberate_set;
85
86             /// Retired pointer buffer node
87             struct retired_ptr_node {
88                 retired_ptr         m_ptr   ;   ///< retired pointer
89                 atomics::atomic<retired_ptr_node *>  m_pNext     ;   ///< next retired pointer in buffer
90                 atomics::atomic<retired_ptr_node *>  m_pNextFree ;   ///< next item in free list of \p retired_ptr_node
91             };
92
93             /// Internal guard representation
94             struct guard_data {
95                 typedef void * guarded_ptr;  ///< type of value guarded
96
97                 atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
98                 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
99                 atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
100
101                 guard_data * pThreadNext; ///< next item of thread's local list of guards
102
103                 guard_data() CDS_NOEXCEPT
104                     : pPost( nullptr )
105                     , pGlobalNext( nullptr )
106                     , pNextFree( nullptr )
107                     , pThreadNext( nullptr )
108                 {}
109
110                 void init() CDS_NOEXCEPT
111                 {
112                     pPost.store( nullptr, atomics::memory_order_relaxed );
113                 }
114
115                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
116                 bool isFree() const CDS_NOEXCEPT
117                 {
118                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
119                 }
120             };
121
122             /// Guard allocator
123             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
124             class guard_allocator
125             {
126                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
127
128                 atomics::atomic<guard_data *>  m_GuardList;     ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
129                 atomics::atomic<guard_data *>  m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
130                 cds::sync::spin                m_freeListLock;  ///< Access to m_FreeGuardList
131
132                 /*
133                     Unfortunately, access to the list of free guard is lock-based.
134                     Lock-free manipulations with guard free-list are ABA-prone.
135                     TODO: working with m_FreeGuardList in lock-free manner.
136                 */
137
138             private:
139                 /// Allocates new guard from the heap. The function uses aligned allocator
140                 guard_data * allocNew()
141                 {
142                     //TODO: the allocator should make block allocation
143
144                     details::guard_data * pGuard = m_GuardAllocator.New();
145
146                     // Link guard to the list
147                     // m_GuardList is an accumulating list and it cannot support concurrent deletion,
148                     // so, ABA problem is impossible for it
149                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
150                     do {
151                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
152                         // pHead is changed by compare_exchange_weak
153                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
154
155                     pGuard->init();
156                     return pGuard;
157                 }
158
159             public:
160                 // Default ctor
161                 guard_allocator() CDS_NOEXCEPT
162                     : m_GuardList( nullptr )
163                     , m_FreeGuardList( nullptr )
164                 {}
165
166                 // Destructor
167                 ~guard_allocator()
168                 {
169                     guard_data * pNext;
170                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
171                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
172                         m_GuardAllocator.Delete( pData );
173                     }
174                 }
175
176                 /// Allocates a guard from free list or from heap if free list is empty
177                 guard_data * alloc()
178                 {
179                     // Try to pop a guard from free-list
180                     details::guard_data * pGuard;
181
182                     {
183                         std::unique_lock<cds::sync::spin> al( m_freeListLock );
184                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
185                         if ( pGuard )
186                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
187                     }
188                     if ( !pGuard )
189                         return allocNew();
190
191                     pGuard->init();
192                     return pGuard;
193                 }
194
195                 /// Frees guard \p pGuard
196                 /**
197                     The function places the guard \p pGuard into free-list
198                 */
199                 void free( guard_data * pGuard ) CDS_NOEXCEPT
200                 {
201                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
202
203                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
204                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
205                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
206                 }
207
208                 /// Allocates list of guard
209                 /**
210                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
211
212                     cds::gc::dhp::ThreadGC supporting method
213                 */
214                 guard_data * allocList( size_t nCount )
215                 {
216                     assert( nCount != 0 );
217
218                     guard_data * pHead;
219                     guard_data * pLast;
220
221                     pHead =
222                         pLast = alloc();
223
224                     // The guard list allocated is private for the thread,
225                     // so, we can use relaxed memory order
226                     while ( --nCount ) {
227                         guard_data * p = alloc();
228                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
229                         pLast = p;
230                     }
231
232                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
233
234                     return pHead;
235                 }
236
237                 /// Frees list of guards
238                 /**
239                     The list \p pList is linked by guard's \p pThreadNext field.
240
241                     cds::gc::dhp::ThreadGC supporting method
242                 */
243                 void freeList( guard_data * pList ) CDS_NOEXCEPT
244                 {
245                     assert( pList != nullptr );
246
247                     guard_data * pLast = pList;
248                     while ( pLast->pThreadNext ) {
249                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
250                         guard_data * p;
251                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
252                         pLast = p;
253                     }
254
255                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
256                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
257                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
258                 }
259
260                 /// Returns the list's head of guards allocated
261                 guard_data * begin() CDS_NOEXCEPT
262                 {
263                     return m_GuardList.load(atomics::memory_order_acquire);
264                 }
265             };
266
267             /// Retired pointer buffer
268             /**
269                 The buffer of retired nodes ready for liberating.
270                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
271                 retired nodes.
272             */
273             class retired_ptr_buffer
274             {
275                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
276                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
277
278             public:
279                 retired_ptr_buffer() CDS_NOEXCEPT
280                     : m_pHead( nullptr )
281                     , m_nItemCount(0)
282                 {}
283
284                 ~retired_ptr_buffer() CDS_NOEXCEPT
285                 {
286                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
287                 }
288
289                 /// Pushes new node into the buffer. Returns current buffer size
290                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
291                 {
292                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
293                     do {
294                         node.m_pNext.store( pHead, atomics::memory_order_relaxed );
295                         // pHead is changed by compare_exchange_weak
296                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
297
298                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
299                 }
300
301                 /// Pushes [pFirst, pLast] list linked by pNext field.
302                 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
303                 {
304                     assert( pFirst );
305                     assert( pLast );
306
307                     retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
308                     do {
309                         pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
310                         // pHead is changed by compare_exchange_weak
311                     } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
312
313                     return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
314                 }
315
316                 /// Result of \ref dhp_gc_privatize "privatize" function.
317                 /**
318                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
319                 */
320                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
321
322                 /// Gets current list of retired pointer and clears the list
323                 /**@anchor dhp_gc_privatize
324                 */
325                 privatize_result privatize() CDS_NOEXCEPT
326                 {
327                     privatize_result res;
328
329                     // Item counter is needed only as a threshold for \p scan() function
330                     // So, we may clear the item counter without synchronization with m_pHead
331                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
332
333                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
334
335                     return res;
336                 }
337
338                 /// Returns current size of buffer (approximate)
339                 size_t size() const CDS_NOEXCEPT
340                 {
341                     return m_nItemCount.load(atomics::memory_order_relaxed);
342                 }
343             };
344
345             /// Pool of retired pointers
346             /**
347                 The class acts as an allocator of retired node.
348                 Retired pointers are linked in the lock-free list.
349             */
350             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
351             class retired_ptr_pool {
352                 /// Pool item
353                 typedef retired_ptr_node    item;
354
355                 /// Count of items in block
356                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
357
358                 /// Pool block
359                 struct block {
360                     atomics::atomic<block *> pNext;     ///< next block
361                     item        items[m_nItemPerBlock]; ///< item array
362                 };
363
364                 atomics::atomic<block *> m_pBlockListHead;   ///< head of of allocated block list
365
366                 // To solve ABA problem we use epoch-based approach
367                 unsigned int const m_nEpochBitmask;             ///< Epoch bitmask (log2( m_nEpochCount))
368                 atomics::atomic<unsigned int> m_nCurEpoch;      ///< Current epoch
369                 atomics::atomic<item *>* m_pEpochFree;          ///< List of free item per epoch
370                 atomics::atomic<item *>  m_pGlobalFreeHead;     ///< Head of unallocated item list
371
372                 typedef cds::details::Allocator< block, Alloc > block_allocator;
373                 typedef cds::details::Allocator< atomics::atomic<item *>, Alloc > epoch_array_alloc;
374
375             private:
376                 void allocNewBlock()
377                 {
378                     // allocate new block
379                     block * pNew = block_allocator().New();
380
381                     // link items within the block
382                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
383                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
384                         pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
385                         CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
386                     }
387
388                     // links new block to the block list
389                     {
390                         block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
391                         do {
392                             pNew->pNext.store( pHead, atomics::memory_order_relaxed );
393                             // pHead is changed by compare_exchange_weak
394                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
395                     }
396
397                     // links block's items to the free list
398                     {
399                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
400                         do {
401                             pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
402                             // pHead is changed by compare_exchange_weak
403                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
404                     }
405                 }
406
407                 unsigned int current_epoch() const CDS_NOEXCEPT
408                 {
409                     return m_nCurEpoch.load(atomics::memory_order_acquire) & m_nEpochBitmask;
410                 }
411
412                 unsigned int next_epoch() const CDS_NOEXCEPT
413                 {
414                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & m_nEpochBitmask;
415                 }
416
417             public:
418                 retired_ptr_pool( unsigned int nEpochCount = 8 )
419                     : m_pBlockListHead( nullptr )
420                     , m_nEpochBitmask( static_cast<unsigned int>(beans::ceil2(nEpochCount)) - 1 )
421                     , m_nCurEpoch(0)
422                     , m_pEpochFree( epoch_array_alloc().NewArray( m_nEpochBitmask + 1))
423                     , m_pGlobalFreeHead( nullptr )
424                 {
425
426
427                     for (unsigned int i = 0; i <= m_nEpochBitmask; ++i )
428                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
429
430                     allocNewBlock();
431                 }
432
433                 ~retired_ptr_pool()
434                 {
435                     block_allocator a;
436                     block * p;
437                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
438                         p = pBlock->pNext.load( atomics::memory_order_relaxed );
439                         a.Delete( pBlock );
440                     }
441
442                     epoch_array_alloc().Delete( m_pEpochFree, m_nEpochBitmask + 1 );
443                 }
444
445                 /// Increments current epoch
446                 void inc_epoch() CDS_NOEXCEPT
447                 {
448                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
449                 }
450
451                 /// Allocates the new retired pointer
452                 retired_ptr_node&  alloc()
453                 {
454                     unsigned int nEpoch;
455                     item * pItem;
456                     for (;;) {
457                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
458                         if ( !pItem )
459                             goto retry;
460                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
461                                                                          pItem->m_pNextFree.load(atomics::memory_order_acquire),
462                                                                          atomics::memory_order_acquire, atomics::memory_order_relaxed ))
463                         {
464                             goto success;
465                         }
466                     }
467
468                     // Epoch free list is empty
469                     // Alloc from global free list
470                 retry:
471                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
472                     do {
473                         if ( !pItem ) {
474                             allocNewBlock();
475                             goto retry;
476                         }
477                         // pItem is changed by compare_exchange_weak
478                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
479                                                                         pItem->m_pNextFree.load(atomics::memory_order_acquire),
480                                                                         atomics::memory_order_acquire, atomics::memory_order_relaxed ));
481
482                 success:
483                     CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
484                     return *pItem;
485                 }
486
487                 /// Allocates and initializes new retired pointer
488                 retired_ptr_node& alloc( const retired_ptr& p )
489                 {
490                     retired_ptr_node& node = alloc();
491                     node.m_ptr = p;
492                     return node;
493                 }
494
495                 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
496                 /**
497                     The list is linked on the m_pNextFree field
498                 */
499                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
500                 {
501                     assert( pHead != nullptr );
502                     assert( pTail != nullptr );
503
504                     unsigned int nEpoch;
505                     item * pCurHead;
506                     do {
507                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
508                         pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
509                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
510                 }
511             };
512
513             /// Uninitialized guard
514             class guard
515             {
516                 friend class dhp::ThreadGC;
517             protected:
518                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
519
520             public:
521                 /// Initialize empty guard.
522                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
523                     : m_pGuard( nullptr )
524                 {}
525
526                 /// Copy-ctor is disabled
527                 guard( guard const& ) = delete;
528
529                 /// Move-ctor is disabled
530                 guard( guard&& ) = delete;
531
532                 /// Object destructor, does nothing
533                 ~guard() CDS_NOEXCEPT
534                 {}
535
536                 /// Get current guarded pointer
537                 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
538                 {
539                     assert( m_pGuard != nullptr );
540                     return m_pGuard->pPost.load( order );
541                 }
542
543                 /// Guards pointer \p p
544                 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
545                 {
546                     assert( m_pGuard != nullptr );
547                     m_pGuard->pPost.store( p, order );
548                 }
549
550                 /// Clears the guard
551                 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
552                 {
553                     assert( m_pGuard != nullptr );
554                     m_pGuard->pPost.store( nullptr, order );
555                 }
556
557                 /// Guards pointer \p p
558                 template <typename T>
559                 T * operator =(T * p) CDS_NOEXCEPT
560                 {
561                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
562                     return p;
563                 }
564
565                 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
566                 {
567                     clear();
568                     return nullptr;
569                 }
570
571             public: // for ThreadGC.
572                 /*
573                     GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
574                     the compiler produces error: 'cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard' is protected
575                     despite the fact that ThreadGC is declared as friend for guard class.
576                     Therefore, we have to add set_guard/get_guard public functions
577                 */
578                 /// Set guard data
579                 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
580                 {
581                     assert( m_pGuard == nullptr );
582                     m_pGuard = pGuard;
583                 }
584
585                 /// Get current guard data
586                 details::guard_data * get_guard() CDS_NOEXCEPT
587                 {
588                     return m_pGuard;
589                 }
590                 /// Get current guard data
591                 details::guard_data * get_guard() const CDS_NOEXCEPT
592                 {
593                     return m_pGuard;
594                 }
595
596                 details::guard_data * release_guard() CDS_NOEXCEPT
597                 {
598                     details::guard_data * p = m_pGuard;
599                     m_pGuard = nullptr;
600                     return p;
601                 }
602
603                 bool is_initialized() const
604                 {
605                     return m_pGuard != nullptr;
606                 }
607             };
608
609         } // namespace details
610
611         /// Guard
612         /**
613             This class represents auto guard: ctor allocates a guard from guard pool,
614             dtor returns the guard back to the pool of free guard.
615         */
616         class Guard: public details::guard
617         {
618             typedef details::guard    base_class;
619             friend class ThreadGC;
620         public:
621             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
622             Guard(); // inline in dhp_impl.h
623
624             /// Returns guard allocated back to pool of free guards
625             ~Guard();    // inline in dhp_impl.h
626
627             /// Guards pointer \p p
628             template <typename T>
629             T * operator =(T * p) CDS_NOEXCEPT
630             {
631                 return base_class::operator =<T>( p );
632             }
633
634             std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
635             {
636                 return base_class::operator =(nullptr);
637             }
638         };
639
640         /// Array of guards
641         /**
642             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
643             dtor returns the guards allocated back to the pool.
644         */
645         template <size_t Count>
646         class GuardArray
647         {
648             details::guard      m_arr[Count]    ;    ///< array of guard
649             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
650
651         public:
652             /// Rebind array for other size \p OtherCount
653             template <size_t OtherCount>
654             struct rebind {
655                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
656             };
657
658         public:
659             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
660             GuardArray();    // inline in dhp_impl.h
661
662             /// The object is not copy-constructible
663             GuardArray( GuardArray const& ) = delete;
664
665             /// The object is not move-constructible
666             GuardArray( GuardArray&& ) = delete;
667
668             /// Returns guards allocated back to pool
669             ~GuardArray();    // inline in dh_impl.h
670
671             /// Returns the capacity of array
672             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
673             {
674                 return c_nCapacity;
675             }
676
677             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
678             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
679             {
680                 assert( nIndex < capacity() );
681                 return m_arr[nIndex];
682             }
683
684             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
685             const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
686             {
687                 assert( nIndex < capacity() );
688                 return m_arr[nIndex];
689             }
690
691             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
692             template <typename T>
693             void set( size_t nIndex, T * p ) CDS_NOEXCEPT
694             {
695                 assert( nIndex < capacity() );
696                 m_arr[nIndex].set( p );
697             }
698
699             /// Clears (sets to \p nullptr) the guard \p nIndex
700             void clear( size_t nIndex ) CDS_NOEXCEPT
701             {
702                 assert( nIndex < capacity() );
703                 m_arr[nIndex].clear();
704             }
705
706             /// Clears all guards in the array
707             void clearAll() CDS_NOEXCEPT
708             {
709                 for ( size_t i = 0; i < capacity(); ++i )
710                     clear(i);
711             }
712         };
713
714         /// Memory manager (Garbage collector)
715         class CDS_EXPORT_API GarbageCollector
716         {
717         private:
718             friend class ThreadGC;
719
720             /// Internal GC statistics
721             struct internal_stat
722             {
723                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
724                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
725
726                 internal_stat()
727                     : m_nGuardCount(0)
728                     , m_nFreeGuardCount(0)
729                 {}
730             };
731
732         public:
733             /// Exception "No GarbageCollector object is created"
734             class not_initialized : public std::runtime_error
735             {
736             public:
737                 //@cond
738                 not_initialized()
739                     : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
740                 {}
741                 //@endcond
742             };
743
744             /// Internal GC statistics
745             struct InternalState
746             {
747                 size_t m_nGuardCount       ;   ///< Total guard count
748                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
749
750                 //@cond
751                 InternalState()
752                     : m_nGuardCount(0)
753                     , m_nFreeGuardCount(0)
754                 {}
755
756                 InternalState& operator =( internal_stat const& s )
757                 {
758                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
759                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
760
761                     return *this;
762                 }
763                 //@endcond
764             };
765
766         private:
767             static GarbageCollector * m_pManager    ;   ///< GC global instance
768
769             atomics::atomic<size_t>  m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
770             const size_t             m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
771
772             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
773             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
774             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
775
776             internal_stat   m_stat  ;   ///< Internal statistics
777             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
778
779         public:
780             /// Initializes DHP memory manager singleton
781             /**
782                 This member function creates and initializes DHP global object.
783                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
784                 this member function is called in the \p main() function. See cds::gc::dhp for example.
785                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
786
787                 \par Parameters
788                 - \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
789                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
790                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
791                 - \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
792                     is initialized the GC allocates local guard pool for the thread from common guard pool.
793                     By perforce the local thread's guard pool is grown automatically from common pool.
794                     When the thread terminated its guard pool is backed to common GC's pool.
795                 - \p nEpochCount: internally, DHP memory manager uses epoch-based schema to solve
796                     ABA problem for internal data. \p nEpochCount specifies the epoch count,
797                     i.e. the count of simultaneously working threads that remove the elements
798                     of DHP-based concurrent data structure. Default value is 16.
799             */
800             static void CDS_STDCALL Construct(
801                 size_t nLiberateThreshold = 1024
802                 , size_t nInitialThreadGuardCount = 8
803                 , size_t nEpochCount = 16
804             );
805
806             /// Destroys DHP memory manager
807             /**
808                 The member function destroys DHP global object. After calling of this function you may \b NOT
809                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
810                 at the end of your \p main(). See cds::gc::dhp for example.
811             */
812             static void CDS_STDCALL Destruct();
813
814             /// Returns pointer to GarbageCollector instance
815             /**
816                 If DHP GC is not initialized, \p not_initialized exception is thrown
817             */
818             static GarbageCollector&   instance()
819             {
820                 if ( m_pManager == nullptr )
821                     throw not_initialized();
822                 return *m_pManager;
823             }
824
825             /// Checks if global GC object is constructed and may be used
826             static bool isUsed() CDS_NOEXCEPT
827             {
828                 return m_pManager != nullptr;
829             }
830
831         public:
832             //@{
833             /// Internal interface
834
835             /// Allocates a guard
836             details::guard_data * allocGuard()
837             {
838                 return m_GuardPool.alloc();
839             }
840
841             /// Frees guard \p g for reusing in future
842             void freeGuard(details::guard_data * pGuard )
843             {
844                 m_GuardPool.free( pGuard );
845             }
846
847             /// Allocates guard list for a thread.
848             details::guard_data * allocGuardList( size_t nCount )
849             {
850                 return m_GuardPool.allocList( nCount );
851             }
852
853             /// Frees thread's guard list pointed by \p pList
854             void freeGuardList( details::guard_data * pList )
855             {
856                 m_GuardPool.freeList( pList );
857             }
858
859             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
860             /**@anchor dhp_gc_retirePtr
861             */
862             template <typename T>
863             void retirePtr( T * p, void (* pFunc)(T *) )
864             {
865                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
866             }
867
868             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
869             void retirePtr( retired_ptr const& p )
870             {
871                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
872                     scan();
873             }
874
875         protected:
876             /// Liberate function
877             /** @anchor dhp_gc_liberate
878                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
879                 trapped by any guard.
880             */
881             void scan();
882             //@}
883
884         public:
885             /// Get internal statistics
886             InternalState& getInternalState(InternalState& stat) const
887             {
888                 return stat = m_stat;
889             }
890
891             /// Checks if internal statistics enabled
892             bool              isStatisticsEnabled() const
893             {
894                 return m_bStatEnabled;
895             }
896
897             /// Enables/disables internal statistics
898             bool  enableStatistics( bool bEnable )
899             {
900                 bool bEnabled = m_bStatEnabled;
901                 m_bStatEnabled = bEnable;
902                 return bEnabled;
903             }
904
905         private:
906             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount, size_t nEpochCount );
907             ~GarbageCollector();
908         };
909
910         /// Thread GC
911         /**
912             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
913             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
914             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
915             \ref cds_threading "cds::threading::Manager::detachThread()".
916
917             The ThreadGC object maintains two list:
918             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
919             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
920             Free guard list is a subset of thread guard list.
921         */
922         class ThreadGC
923         {
924             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
925             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
926             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
927
928         public:
929             /// Default constructor
930             ThreadGC()
931                 : m_gc( GarbageCollector::instance() )
932                 , m_pList( nullptr )
933                 , m_pFree( nullptr )
934             {}
935
936             /// The object is not copy-constructible
937             ThreadGC( ThreadGC const& ) = delete;
938
939             /// Dtor calls fini()
940             ~ThreadGC()
941             {
942                 fini();
943             }
944
945             /// Initialization. Repeat call is available
946             void init()
947             {
948                 if ( !m_pList ) {
949                     m_pList =
950                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
951                 }
952             }
953
954             /// Finalization. Repeat call is available
955             void fini()
956             {
957                 if ( m_pList ) {
958                     m_gc.freeGuardList( m_pList );
959                     m_pList =
960                         m_pFree = nullptr;
961                 }
962             }
963
964         public:
965             /// Initializes guard \p g
966             void allocGuard( dhp::details::guard& g )
967             {
968                 assert( m_pList != nullptr );
969                 if ( !g.m_pGuard ) {
970                     if ( m_pFree ) {
971                         g.m_pGuard = m_pFree;
972                         m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
973                     }
974                     else {
975                         g.m_pGuard = m_gc.allocGuard();
976                         g.m_pGuard->pThreadNext = m_pList;
977                         m_pList = g.m_pGuard;
978                     }
979                 }
980             }
981
982             /// Frees guard \p g
983             void freeGuard( dhp::details::guard& g )
984             {
985                 assert( m_pList != nullptr );
986                 if ( g.m_pGuard ) {
987                     g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
988                     g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
989                     m_pFree = g.m_pGuard;
990                     g.m_pGuard = nullptr;
991                 }
992             }
993
994             /// Initializes guard array \p arr
995             template <size_t Count>
996             void allocGuard( GuardArray<Count>& arr )
997             {
998                 assert( m_pList != nullptr );
999                 size_t nCount = 0;
1000
1001                 while ( m_pFree && nCount < Count ) {
1002                     arr[nCount].set_guard( m_pFree );
1003                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
1004                     ++nCount;
1005                 }
1006
1007                 while ( nCount < Count ) {
1008                     details::guard& g = arr[nCount++];
1009                     g.set_guard( m_gc.allocGuard() );
1010                     g.get_guard()->pThreadNext = m_pList;
1011                     m_pList = g.get_guard();
1012                 }
1013             }
1014
1015             /// Frees guard array \p arr
1016             template <size_t Count>
1017             void freeGuard( GuardArray<Count>& arr )
1018             {
1019                 assert( m_pList != nullptr );
1020
1021                 details::guard_data * pGuard;
1022                 for ( size_t i = 0; i < Count - 1; ++i ) {
1023                     pGuard = arr[i].get_guard();
1024                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
1025                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
1026                 }
1027                 pGuard = arr[Count-1].get_guard();
1028                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
1029                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
1030                 m_pFree = arr[0].get_guard();
1031             }
1032
1033             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
1034             template <typename T>
1035             void retirePtr( T * p, void (* pFunc)(T *) )
1036             {
1037                 m_gc.retirePtr( p, pFunc );
1038             }
1039
1040             /// Run retiring cycle
1041             void scan()
1042             {
1043                 m_gc.scan();
1044             }
1045         };
1046     }   // namespace dhp
1047 }}  // namespace cds::gc
1048 //@endcond
1049
1050 #if CDS_COMPILER == CDS_COMPILER_MSVC
1051 #   pragma warning(pop)
1052 #endif
1053
1054 #endif // #ifndef CDSLIB_GC_DETAILS_DHP_H