Implemented support for ARMv8 (64 bit arm)
[libcds.git] / cds / gc / details / dhp.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_GC_DETAILS_DHP_H
32 #define CDSLIB_GC_DETAILS_DHP_H
33
34 #include <mutex>        // unique_lock
35 #include <cds/algo/atomic.h>
36 #include <cds/algo/int_algo.h>
37 #include <cds/gc/details/retired_ptr.h>
38 #include <cds/details/aligned_allocator.h>
39 #include <cds/details/allocator.h>
40 #include <cds/sync/spinlock.h>
41
42 #if CDS_COMPILER == CDS_COMPILER_MSVC
43 #   pragma warning(push)
44 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
45 #endif
46
47 //@cond
48 namespace cds { namespace gc {
49
50     /// Dynamic Hazard Pointer reclamation schema
51     /**
52         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
53         Use cds::gc::DHP class in your code.
54
55         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
56         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
57         by contructing cds::gc::DHP object in beginning of your main().
58         See cds::gc::DHP class for explanation.
59
60         \par Implementation issues
61             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
62             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
63             of guards allocated from the global list of free guards and the access to the global list is occurred only when
64             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
65             Guards allocated for the thread is push back to the global list only when the thread terminates.
66     */
67     namespace dhp {
68
69         // Forward declarations
70         class Guard;
71         template <size_t Count> class GuardArray;
72         class ThreadGC;
73         class GarbageCollector;
74
75         /// Retired pointer type
76         typedef cds::gc::details::retired_ptr retired_ptr;
77
78         using cds::gc::details::free_retired_ptr_func;
79
80         /// Details of Dynamic Hazard Pointer algorithm
81         namespace details {
82
83             // Forward declaration
84             class liberate_set;
85
86             /// Retired pointer buffer node
87             struct retired_ptr_node {
88                 retired_ptr         m_ptr   ;   ///< retired pointer
89                 atomics::atomic<retired_ptr_node *>  m_pNext     ;   ///< next retired pointer in buffer
90                 atomics::atomic<retired_ptr_node *>  m_pNextFree ;   ///< next item in free list of \p retired_ptr_node
91             };
92
93             /// Internal guard representation
94             struct guard_data {
95                 typedef void * guarded_ptr;  ///< type of value guarded
96
97                 atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
98                 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
99                 atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
100
101                 guard_data * pThreadNext; ///< next item of thread's local list of guards
102
103                 guard_data() CDS_NOEXCEPT
104                     : pPost( nullptr )
105                     , pGlobalNext( nullptr )
106                     , pNextFree( nullptr )
107                     , pThreadNext( nullptr )
108                 {}
109
110                 void init() CDS_NOEXCEPT
111                 {
112                     pPost.store( nullptr, atomics::memory_order_relaxed );
113                 }
114
115                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
116                 bool isFree() const CDS_NOEXCEPT
117                 {
118                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
119                 }
120
121                 guarded_ptr get( atomics::memory_order order = atomics::memory_order_acquire )
122                 {
123                     return pPost.load( order );
124                 }
125
126                 void set( guarded_ptr p, atomics::memory_order order = atomics::memory_order_release )
127                 {
128                     pPost.store( p, order );
129                 }
130             };
131
132             /// Guard allocator
133             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
134             class guard_allocator
135             {
136                 cds::details::Allocator<details::guard_data>  m_GuardAllocator;   ///< guard allocator
137
138                 atomics::atomic<guard_data *>  m_GuardList;     ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
139                 atomics::atomic<guard_data *>  m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
140                 cds::sync::spin                m_freeListLock;  ///< Access to m_FreeGuardList
141
142                 /*
143                     Unfortunately, access to the list of free guard is lock-based.
144                     Lock-free manipulations with guard free-list are ABA-prone.
145                     TODO: working with m_FreeGuardList in lock-free manner.
146                 */
147
148             private:
149                 /// Allocates new guard from the heap. The function uses aligned allocator
150                 guard_data * allocNew()
151                 {
152                     //TODO: the allocator should make block allocation
153
154                     details::guard_data * pGuard = m_GuardAllocator.New();
155
156                     // Link guard to the list
157                     // m_GuardList is an accumulating list and it cannot support concurrent deletion,
158                     // so, ABA problem is impossible for it
159                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
160                     do {
161                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
162                         // pHead is changed by compare_exchange_weak
163                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
164
165                     pGuard->init();
166                     return pGuard;
167                 }
168
169             public:
170                 // Default ctor
171                 guard_allocator() CDS_NOEXCEPT
172                     : m_GuardList( nullptr )
173                     , m_FreeGuardList( nullptr )
174                 {}
175
176                 // Destructor
177                 ~guard_allocator()
178                 {
179                     guard_data * pNext;
180                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
181                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
182                         m_GuardAllocator.Delete( pData );
183                     }
184                 }
185
186                 /// Allocates a guard from free list or from heap if free list is empty
187                 guard_data* alloc()
188                 {
189                     // Try to pop a guard from free-list
190                     details::guard_data * pGuard;
191
192                     {
193                         std::unique_lock<cds::sync::spin> al( m_freeListLock );
194                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
195                         if ( pGuard )
196                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
197                     }
198                     if ( !pGuard )
199                         return allocNew();
200
201                     pGuard->init();
202                     return pGuard;
203                 }
204
205                 /// Frees guard \p pGuard
206                 /**
207                     The function places the guard \p pGuard into free-list
208                 */
209                 void free( guard_data* pGuard ) CDS_NOEXCEPT
210                 {
211                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
212
213                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
214                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
215                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
216                 }
217
218                 /// Allocates list of guard
219                 /**
220                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
221
222                     cds::gc::dhp::ThreadGC supporting method
223                 */
224                 guard_data * allocList( size_t nCount )
225                 {
226                     assert( nCount != 0 );
227
228                     guard_data * pHead;
229                     guard_data * pLast;
230
231                     pHead =
232                         pLast = alloc();
233
234                     // The guard list allocated is private for the thread,
235                     // so, we can use relaxed memory order
236                     while ( --nCount ) {
237                         guard_data * p = alloc();
238                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
239                         pLast = p;
240                     }
241
242                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
243
244                     return pHead;
245                 }
246
247                 /// Frees list of guards
248                 /**
249                     The list \p pList is linked by guard's \p pThreadNext field.
250
251                     cds::gc::dhp::ThreadGC supporting method
252                 */
253                 void freeList( guard_data * pList ) CDS_NOEXCEPT
254                 {
255                     assert( pList != nullptr );
256
257                     guard_data * pLast = pList;
258                     while ( pLast->pThreadNext ) {
259                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
260                         guard_data * p;
261                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
262                         pLast = p;
263                     }
264
265                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
266                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
267                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
268                 }
269
270                 /// Returns the list's head of guards allocated
271                 guard_data * begin() CDS_NOEXCEPT
272                 {
273                     return m_GuardList.load(atomics::memory_order_acquire);
274                 }
275             };
276
277             /// Retired pointer buffer
278             /**
279                 The buffer of retired nodes ready for liberating.
280                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
281                 retired nodes.
282             */
283             class retired_ptr_buffer
284             {
285                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
286                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
287
288             public:
289                 retired_ptr_buffer() CDS_NOEXCEPT
290                     : m_pHead( nullptr )
291                     , m_nItemCount(0)
292                 {}
293
294                 ~retired_ptr_buffer() CDS_NOEXCEPT
295                 {
296                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
297                 }
298
299                 /// Pushes new node into the buffer. Returns current buffer size
300                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
301                 {
302                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
303                     do {
304                         node.m_pNext.store( pHead, atomics::memory_order_relaxed );
305                         // pHead is changed by compare_exchange_weak
306                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_acquire ));
307
308                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
309                 }
310
311                 /// Pushes [pFirst, pLast] list linked by pNext field.
312                 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
313                 {
314                     assert( pFirst );
315                     assert( pLast );
316
317                     retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
318                     do {
319                         pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
320                         // pHead is changed by compare_exchange_weak
321                     } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_acquire ));
322
323                     return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
324                 }
325
326                 /// Result of \ref dhp_gc_privatize "privatize" function.
327                 /**
328                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
329                 */
330                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
331
332                 /// Gets current list of retired pointer and clears the list
333                 /**@anchor dhp_gc_privatize
334                 */
335                 privatize_result privatize() CDS_NOEXCEPT
336                 {
337                     privatize_result res;
338
339                     // Item counter is needed only as a threshold for \p scan() function
340                     // So, we may clear the item counter without synchronization with m_pHead
341                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
342                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
343                     return res;
344                 }
345
346                 /// Returns current size of buffer (approximate)
347                 size_t size() const CDS_NOEXCEPT
348                 {
349                     return m_nItemCount.load(atomics::memory_order_relaxed);
350                 }
351             };
352
353             /// Pool of retired pointers
354             /**
355                 The class acts as an allocator of retired node.
356                 Retired pointers are linked in the lock-free list.
357             */
358             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
359             class retired_ptr_pool {
360                 /// Pool item
361                 typedef retired_ptr_node    item;
362
363                 /// Count of items in block
364                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
365
366                 /// Pool block
367                 struct block {
368                     atomics::atomic<block *> pNext;     ///< next block
369                     item        items[m_nItemPerBlock]; ///< item array
370                 };
371
372                 atomics::atomic<block *> m_pBlockListHead;   ///< head of of allocated block list
373
374                 // To solve ABA problem we use epoch-based approach
375                 unsigned int const m_nEpochBitmask;             ///< Epoch bitmask (log2( m_nEpochCount))
376                 atomics::atomic<unsigned int> m_nCurEpoch;      ///< Current epoch
377                 atomics::atomic<item *>* m_pEpochFree;          ///< List of free item per epoch
378                 atomics::atomic<item *>  m_pGlobalFreeHead;     ///< Head of unallocated item list
379
380                 typedef cds::details::Allocator< block, Alloc > block_allocator;
381                 typedef cds::details::Allocator< atomics::atomic<item *>, Alloc > epoch_array_alloc;
382
383             private:
384                 void allocNewBlock()
385                 {
386                     // allocate new block
387                     block * pNew = block_allocator().New();
388
389                     // link items within the block
390                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
391                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
392                         pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
393                         CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
394                     }
395
396                     // links new block to the block list
397                     {
398                         block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
399                         do {
400                             pNew->pNext.store( pHead, atomics::memory_order_relaxed );
401                             // pHead is changed by compare_exchange_weak
402                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_acquire ));
403                     }
404
405                     // links block's items to the free list
406                     {
407                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
408                         do {
409                             pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
410                             // pHead is changed by compare_exchange_weak
411                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_acquire ));
412                     }
413                 }
414
415                 unsigned int current_epoch() const CDS_NOEXCEPT
416                 {
417                     return m_nCurEpoch.load(atomics::memory_order_acquire) & m_nEpochBitmask;
418                 }
419
420                 unsigned int next_epoch() const CDS_NOEXCEPT
421                 {
422                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & m_nEpochBitmask;
423                 }
424
425             public:
426                 retired_ptr_pool( unsigned int nEpochCount = 8 )
427                     : m_pBlockListHead( nullptr )
428                     , m_nEpochBitmask( static_cast<unsigned int>(beans::ceil2(nEpochCount)) - 1 )
429                     , m_nCurEpoch(0)
430                     , m_pEpochFree( epoch_array_alloc().NewArray( m_nEpochBitmask + 1))
431                     , m_pGlobalFreeHead( nullptr )
432                 {
433
434
435                     for (unsigned int i = 0; i <= m_nEpochBitmask; ++i )
436                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
437
438                     allocNewBlock();
439                 }
440
441                 ~retired_ptr_pool()
442                 {
443                     block_allocator a;
444                     block * p;
445                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
446                         p = pBlock->pNext.load( atomics::memory_order_relaxed );
447                         a.Delete( pBlock );
448                     }
449
450                     epoch_array_alloc().Delete( m_pEpochFree, m_nEpochBitmask + 1 );
451                 }
452
453                 /// Increments current epoch
454                 void inc_epoch() CDS_NOEXCEPT
455                 {
456                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
457                 }
458
459                 /// Allocates the new retired pointer
460                 retired_ptr_node&  alloc()
461                 {
462                     unsigned int nEpoch;
463                     item * pItem;
464                     for (;;) {
465                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
466                         if ( !pItem )
467                             goto retry;
468                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
469                                                                          pItem->m_pNextFree.load(atomics::memory_order_acquire),
470                                                                          atomics::memory_order_acquire, atomics::memory_order_relaxed ))
471                         {
472                             goto success;
473                         }
474                     }
475
476                     // Epoch free list is empty
477                     // Alloc from global free list
478                 retry:
479                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
480                     do {
481                         if ( !pItem ) {
482                             allocNewBlock();
483                             goto retry;
484                         }
485                         // pItem is changed by compare_exchange_weak
486                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
487                                                                         pItem->m_pNextFree.load(atomics::memory_order_acquire),
488                                                                         atomics::memory_order_acquire, atomics::memory_order_acquire ));
489
490                 success:
491                     CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
492                     return *pItem;
493                 }
494
495                 /// Allocates and initializes new retired pointer
496                 retired_ptr_node& alloc( const retired_ptr& p )
497                 {
498                     retired_ptr_node& node = alloc();
499                     node.m_ptr = p;
500                     return node;
501                 }
502
503                 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
504                 /**
505                     The list is linked on the m_pNextFree field
506                 */
507                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
508                 {
509                     assert( pHead != nullptr );
510                     assert( pTail != nullptr );
511
512                     unsigned int nEpoch;
513                     item * pCurHead;
514                     do {
515                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
516                         pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
517                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
518                 }
519             };
520         } // namespace details
521
522         /// Memory manager (Garbage collector)
523         class CDS_EXPORT_API GarbageCollector
524         {
525         private:
526             friend class ThreadGC;
527
528             /// Internal GC statistics
529             struct internal_stat
530             {
531                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
532                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
533
534                 internal_stat()
535                     : m_nGuardCount(0)
536                     , m_nFreeGuardCount(0)
537                 {}
538             };
539
540         public:
541             /// Exception "No GarbageCollector object is created"
542             class not_initialized : public std::runtime_error
543             {
544             public:
545                 //@cond
546                 not_initialized()
547                     : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
548                 {}
549                 //@endcond
550             };
551
552             /// Internal GC statistics
553             struct InternalState
554             {
555                 size_t m_nGuardCount       ;   ///< Total guard count
556                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
557
558                 //@cond
559                 InternalState()
560                     : m_nGuardCount(0)
561                     , m_nFreeGuardCount(0)
562                 {}
563
564                 InternalState& operator =( internal_stat const& s )
565                 {
566                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
567                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
568
569                     return *this;
570                 }
571                 //@endcond
572             };
573
574         private:
575             static GarbageCollector * m_pManager    ;   ///< GC global instance
576
577             atomics::atomic<size_t>  m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
578             const size_t             m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
579
580             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
581             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
582             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
583
584             internal_stat   m_stat  ;   ///< Internal statistics
585             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
586
587         public:
588             /// Initializes DHP memory manager singleton
589             /**
590                 This member function creates and initializes DHP global object.
591                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
592                 this member function is called in the \p main() function. See cds::gc::dhp for example.
593                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
594
595                 \par Parameters
596                 - \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
597                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
598                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
599                 - \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
600                     is initialized the GC allocates local guard pool for the thread from common guard pool.
601                     By perforce the local thread's guard pool is grown automatically from common pool.
602                     When the thread terminated its guard pool is backed to common GC's pool.
603                 - \p nEpochCount: internally, DHP memory manager uses epoch-based schema to solve
604                     ABA problem for internal data. \p nEpochCount specifies the epoch count,
605                     i.e. the count of simultaneously working threads that remove the elements
606                     of DHP-based concurrent data structure. Default value is 16.
607             */
608             static void CDS_STDCALL Construct(
609                 size_t nLiberateThreshold = 1024
610                 , size_t nInitialThreadGuardCount = 8
611                 , size_t nEpochCount = 16
612             );
613
614             /// Destroys DHP memory manager
615             /**
616                 The member function destroys DHP global object. After calling of this function you may \b NOT
617                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
618                 at the end of your \p main(). See cds::gc::dhp for example.
619             */
620             static void CDS_STDCALL Destruct();
621
622             /// Returns pointer to GarbageCollector instance
623             /**
624                 If DHP GC is not initialized, \p not_initialized exception is thrown
625             */
626             static GarbageCollector&   instance()
627             {
628                 if ( m_pManager == nullptr )
629                     throw not_initialized();
630                 return *m_pManager;
631             }
632
633             /// Checks if global GC object is constructed and may be used
634             static bool isUsed() CDS_NOEXCEPT
635             {
636                 return m_pManager != nullptr;
637             }
638
639         public:
640             //@{
641             /// Internal interface
642
643             /// Allocates a guard
644             details::guard_data * allocGuard()
645             {
646                 return m_GuardPool.alloc();
647             }
648
649             /// Frees guard \p g for reusing in future
650             void freeGuard(details::guard_data * pGuard )
651             {
652                 m_GuardPool.free( pGuard );
653             }
654
655             /// Allocates guard list for a thread.
656             details::guard_data* allocGuardList( size_t nCount )
657             {
658                 return m_GuardPool.allocList( nCount );
659             }
660
661             /// Frees thread's guard list pointed by \p pList
662             void freeGuardList( details::guard_data * pList )
663             {
664                 m_GuardPool.freeList( pList );
665             }
666
667             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
668             /**@anchor dhp_gc_retirePtr
669             */
670             template <typename T>
671             void retirePtr( T * p, void (* pFunc)(T *))
672             {
673                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc )));
674             }
675
676             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
677             void retirePtr( retired_ptr const& p )
678             {
679                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed))
680                     scan();
681             }
682
683         protected:
684             /// Liberate function
685             /** @anchor dhp_gc_liberate
686                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
687                 trapped by any guard.
688             */
689             void scan();
690             //@}
691
692         public:
693             /// Get internal statistics
694             InternalState& getInternalState(InternalState& stat) const
695             {
696                 return stat = m_stat;
697             }
698
699             /// Checks if internal statistics enabled
700             bool              isStatisticsEnabled() const
701             {
702                 return m_bStatEnabled;
703             }
704
705             /// Enables/disables internal statistics
706             bool  enableStatistics( bool bEnable )
707             {
708                 bool bEnabled = m_bStatEnabled;
709                 m_bStatEnabled = bEnable;
710                 return bEnabled;
711             }
712
713         private:
714             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount, size_t nEpochCount );
715             ~GarbageCollector();
716         };
717
718         /// Thread GC
719         /**
720             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
721             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
722             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
723             \ref cds_threading "cds::threading::Manager::detachThread()".
724
725             The ThreadGC object maintains two list:
726             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
727             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
728             Free guard list is a subset of thread guard list.
729         */
730         class ThreadGC
731         {
732             GarbageCollector&        m_gc;      ///< reference to GC singleton
733             details::guard_data *    m_pList;   ///< Local list of guards owned by the thread
734             details::guard_data *    m_pFree;   ///< The list of free guard from m_pList
735
736         public:
737             /// Default constructor
738             ThreadGC()
739                 : m_gc( GarbageCollector::instance())
740                 , m_pList( nullptr )
741                 , m_pFree( nullptr )
742             {}
743
744             /// The object is not copy-constructible
745             ThreadGC( ThreadGC const& ) = delete;
746
747             /// Dtor calls fini()
748             ~ThreadGC()
749             {
750                 fini();
751             }
752
753             /// Initialization. Repeat call is available
754             void init()
755             {
756                 if ( !m_pList ) {
757                     m_pList =
758                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
759                 }
760             }
761
762             /// Finalization. Repeat call is available
763             void fini()
764             {
765                 if ( m_pList ) {
766                     m_gc.freeGuardList( m_pList );
767                     m_pList =
768                         m_pFree = nullptr;
769                 }
770             }
771
772         public:
773             /// Allocates new guard
774             dhp::details::guard_data* allocGuard()
775             {
776                 assert( m_pList != nullptr );
777
778                 dhp::details::guard_data* ret;
779                 if ( cds_likely( m_pFree )) {
780                     ret = m_pFree;
781                     m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
782                 }
783                 else {
784                     ret = m_gc.allocGuard();
785                     ret->pThreadNext = m_pList;
786                     m_pList = ret;
787                 }
788                 return ret;
789             }
790
791             /// Frees guard \p g
792             void freeGuard( dhp::details::guard_data* g )
793             {
794                 assert( m_pList != nullptr );
795                 if ( cds_likely( g )) {
796                     g->pPost.store( nullptr, atomics::memory_order_relaxed );
797                     g->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
798                     m_pFree = g;
799                 }
800             }
801
802             /// Guard array
803             template <size_t Count>
804             using guard_array = dhp::details::guard_data* [Count];
805
806             /// Initializes guard array \p arr
807             template <size_t Count>
808             void allocGuard( guard_array<Count>& arr )
809             {
810                 assert( m_pList != nullptr );
811                 size_t nCount = 0;
812
813                 while ( m_pFree && nCount < Count ) {
814                     arr[nCount] = m_pFree;
815                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
816                     ++nCount;
817                 }
818
819                 while ( nCount < Count ) {
820                     dhp::details::guard_data*& g = arr[nCount];
821                     g = m_gc.allocGuard();
822                     g->pThreadNext = m_pList;
823                     m_pList = g;
824                     ++nCount;
825                 }
826             }
827
828             /// Frees guard array \p arr
829             template <size_t Count>
830             void freeGuard( guard_array<Count>& arr )
831             {
832                 assert( m_pList != nullptr );
833
834                 details::guard_data* first = nullptr;
835                 details::guard_data* last;
836                 for ( size_t i = 0; i < Count; ++i ) {
837                     details::guard_data* guard = arr[i];
838                     if ( cds_likely( guard )) {
839                         guard->pPost.store( nullptr, atomics::memory_order_relaxed );
840                         if ( first )
841                             last->pNextFree.store( guard, atomics::memory_order_relaxed );
842                         else
843                             first = guard;
844                         last = guard;
845                     }
846                 }
847                 if ( first ) {
848                     last->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
849                     m_pFree = first;
850                 }
851             }
852
853             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
854             template <typename T>
855             void retirePtr( T * p, void (* pFunc)(T *))
856             {
857                 m_gc.retirePtr( p, pFunc );
858             }
859
860             /// Run retiring cycle
861             void scan()
862             {
863                 m_gc.scan();
864             }
865         };
866     }   // namespace dhp
867 }}  // namespace cds::gc
868 //@endcond
869
870 #if CDS_COMPILER == CDS_COMPILER_MSVC
871 #   pragma warning(pop)
872 #endif
873
874 #endif // #ifndef CDSLIB_GC_DETAILS_DHP_H