Fixed memor orderding (tsan)
[libcds.git] / cds / gc / details / dhp.h
1 //$$CDS-header$$
2
3 #ifndef CDSLIB_GC_DETAILS_DHP_H
4 #define CDSLIB_GC_DETAILS_DHP_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/algo/atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/sync/spinlock.h>
12
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 #   pragma warning(push)
15 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
16 #endif
17
18 //@cond
19 namespace cds { namespace gc {
20
21     /// Dynamic Hazard Pointer reclamation schema
22     /**
23         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
24         Use cds::gc::DHP class in your code.
25
26         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
27         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
28         by contructing cds::gc::DHP object in beginning of your main().
29         See cds::gc::DHP class for explanation.
30
31         \par Implementation issues
32             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
33             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
34             of guards allocated from the global list of free guards and the access to the global list is occurred only when
35             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
36             Guards allocated for the thread is push back to the global list only when the thread terminates.
37     */
38     namespace dhp {
39
40         // Forward declarations
41         class Guard;
42         template <size_t Count> class GuardArray;
43         class ThreadGC;
44         class GarbageCollector;
45
46         /// Retired pointer type
47         typedef cds::gc::details::retired_ptr retired_ptr;
48
49         using cds::gc::details::free_retired_ptr_func;
50
51         /// Details of Dynamic Hazard Pointer algorithm
52         namespace details {
53
54             // Forward declaration
55             class liberate_set;
56
57             /// Retired pointer buffer node
58             struct retired_ptr_node {
59                 retired_ptr         m_ptr   ;   ///< retired pointer
60                 atomics::atomic<retired_ptr_node *>  m_pNext     ;   ///< next retired pointer in buffer
61                 atomics::atomic<retired_ptr_node *>  m_pNextFree ;   ///< next item in free list of \p retired_ptr_node
62             };
63
64             /// Internal guard representation
65             struct guard_data {
66                 typedef void * guarded_ptr;  ///< type of value guarded
67
68                 atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
69                 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
70                 atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
71
72                 guard_data * pThreadNext; ///< next item of thread's local list of guards
73
74                 guard_data() CDS_NOEXCEPT
75                     : pPost( nullptr )
76                     , pGlobalNext( nullptr )
77                     , pNextFree( nullptr )
78                     , pThreadNext( nullptr )
79                 {}
80
81                 void init() CDS_NOEXCEPT
82                 {
83                     pPost.store( nullptr, atomics::memory_order_relaxed );
84                 }
85
86                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
87                 bool isFree() const CDS_NOEXCEPT
88                 {
89                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
90                 }
91             };
92
93             /// Guard allocator
94             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
95             class guard_allocator
96             {
97                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
98
99                 atomics::atomic<guard_data *>  m_GuardList;     ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
100                 atomics::atomic<guard_data *>  m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
101                 cds::sync::spin                m_freeListLock;  ///< Access to m_FreeGuardList
102
103                 /*
104                     Unfortunately, access to the list of free guard is lock-based.
105                     Lock-free manipulations with guard free-list are ABA-prone.
106                     TODO: working with m_FreeGuardList in lock-free manner.
107                 */
108
109             private:
110                 /// Allocates new guard from the heap. The function uses aligned allocator
111                 guard_data * allocNew()
112                 {
113                     //TODO: the allocator should make block allocation
114
115                     details::guard_data * pGuard = m_GuardAllocator.New();
116
117                     // Link guard to the list
118                     // m_GuardList is an accumulating list and it cannot support concurrent deletion,
119                     // so, ABA problem is impossible for it
120                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
121                     do {
122                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
123                         // pHead is changed by compare_exchange_weak
124                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
125
126                     pGuard->init();
127                     return pGuard;
128                 }
129
130             public:
131                 // Default ctor
132                 guard_allocator() CDS_NOEXCEPT
133                     : m_GuardList( nullptr )
134                     , m_FreeGuardList( nullptr )
135                 {}
136
137                 // Destructor
138                 ~guard_allocator()
139                 {
140                     guard_data * pNext;
141                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
142                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
143                         m_GuardAllocator.Delete( pData );
144                     }
145                 }
146
147                 /// Allocates a guard from free list or from heap if free list is empty
148                 guard_data * alloc()
149                 {
150                     // Try to pop a guard from free-list
151                     details::guard_data * pGuard;
152
153                     {
154                         std::unique_lock<cds::sync::spin> al( m_freeListLock );
155                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
156                         if ( pGuard )
157                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
158                     }
159                     if ( !pGuard )
160                         return allocNew();
161
162                     pGuard->init();
163                     return pGuard;
164                 }
165
166                 /// Frees guard \p pGuard
167                 /**
168                     The function places the guard \p pGuard into free-list
169                 */
170                 void free( guard_data * pGuard ) CDS_NOEXCEPT
171                 {
172                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
173
174                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
175                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
176                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
177                 }
178
179                 /// Allocates list of guard
180                 /**
181                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
182
183                     cds::gc::dhp::ThreadGC supporting method
184                 */
185                 guard_data * allocList( size_t nCount )
186                 {
187                     assert( nCount != 0 );
188
189                     guard_data * pHead;
190                     guard_data * pLast;
191
192                     pHead =
193                         pLast = alloc();
194
195                     // The guard list allocated is private for the thread,
196                     // so, we can use relaxed memory order
197                     while ( --nCount ) {
198                         guard_data * p = alloc();
199                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
200                         pLast = p;
201                     }
202
203                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
204
205                     return pHead;
206                 }
207
208                 /// Frees list of guards
209                 /**
210                     The list \p pList is linked by guard's \p pThreadNext field.
211
212                     cds::gc::dhp::ThreadGC supporting method
213                 */
214                 void freeList( guard_data * pList ) CDS_NOEXCEPT
215                 {
216                     assert( pList != nullptr );
217
218                     guard_data * pLast = pList;
219                     while ( pLast->pThreadNext ) {
220                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
221                         guard_data * p;
222                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
223                         pLast = p;
224                     }
225
226                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
227                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
228                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
229                 }
230
231                 /// Returns the list's head of guards allocated
232                 guard_data * begin() CDS_NOEXCEPT
233                 {
234                     return m_GuardList.load(atomics::memory_order_acquire);
235                 }
236             };
237
238             /// Retired pointer buffer
239             /**
240                 The buffer of retired nodes ready for liberating.
241                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
242                 retired nodes.
243             */
244             class retired_ptr_buffer
245             {
246                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
247                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
248
249             public:
250                 retired_ptr_buffer() CDS_NOEXCEPT
251                     : m_pHead( nullptr )
252                     , m_nItemCount(0)
253                 {}
254
255                 ~retired_ptr_buffer() CDS_NOEXCEPT
256                 {
257                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
258                 }
259
260                 /// Pushes new node into the buffer. Returns current buffer size
261                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
262                 {
263                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
264                     do {
265                         node.m_pNext.store( pHead, atomics::memory_order_relaxed );
266                         // pHead is changed by compare_exchange_weak
267                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
268
269                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
270                 }
271
272                 /// Pushes [pFirst, pLast] list linked by pNext field.
273                 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
274                 {
275                     assert( pFirst );
276                     assert( pLast );
277
278                     retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
279                     do {
280                         pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
281                         // pHead is changed by compare_exchange_weak
282                     } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
283
284                     return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
285                 }
286
287                 /// Result of \ref dhp_gc_privatve "privatize" function.
288                 /**
289                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
290                 */
291                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
292
293                 /// Gets current list of retired pointer and clears the list
294                 /**@anchor dhp_gc_privatve
295                 */
296                 privatize_result privatize() CDS_NOEXCEPT
297                 {
298                     privatize_result res;
299                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
300
301                     // Item counter is needed only as a threshold for \p scan() function
302                     // So, we may clear the item counter without synchronization with m_pHead
303                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
304                     return res;
305                 }
306
307                 /// Returns current size of buffer (approximate)
308                 size_t size() const CDS_NOEXCEPT
309                 {
310                     return m_nItemCount.load(atomics::memory_order_relaxed);
311                 }
312             };
313
314             /// Pool of retired pointers
315             /**
316                 The class acts as an allocator of retired node.
317                 Retired pointers are linked in the lock-free list.
318             */
319             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
320             class retired_ptr_pool {
321                 /// Pool item
322                 typedef retired_ptr_node    item;
323
324                 /// Count of items in block
325                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
326
327                 /// Pool block
328                 struct block {
329                     atomics::atomic<block *> pNext;     ///< next block
330                     item        items[m_nItemPerBlock]; ///< item array
331                 };
332
333                 atomics::atomic<block *> m_pBlockListHead;   ///< head of of allocated block list
334
335                 // To solve ABA problem we use epoch-based approach
336                 static const unsigned int c_nEpochCount = 4;    ///< Max epoch count
337                 atomics::atomic<unsigned int> m_nCurEpoch;      ///< Current epoch
338                 atomics::atomic<item *>  m_pEpochFree[c_nEpochCount];   ///< List of free item per epoch
339                 atomics::atomic<item *>  m_pGlobalFreeHead;     ///< Head of unallocated item list
340
341                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
342
343             private:
344                 void allocNewBlock()
345                 {
346                     // allocate new block
347                     block * pNew = m_BlockAllocator.New();
348
349                     // link items within the block
350                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
351                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
352                         pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
353                         CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
354                     }
355
356                     // links new block to the block list
357                     {
358                         block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
359                         do {
360                             pNew->pNext.store( pHead, atomics::memory_order_relaxed );
361                             // pHead is changed by compare_exchange_weak
362                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
363                     }
364
365                     // links block's items to the free list
366                     {
367                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
368                         do {
369                             pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
370                             // pHead is changed by compare_exchange_weak
371                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
372                     }
373                 }
374
375                 unsigned int current_epoch() const CDS_NOEXCEPT
376                 {
377                     return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
378                 }
379
380                 unsigned int next_epoch() const CDS_NOEXCEPT
381                 {
382                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
383                 }
384
385             public:
386                 retired_ptr_pool()
387                     : m_pBlockListHead( nullptr )
388                     , m_nCurEpoch(0)
389                     , m_pGlobalFreeHead( nullptr )
390                 {
391                     for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
392                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
393
394                     allocNewBlock();
395                 }
396
397                 ~retired_ptr_pool()
398                 {
399                     block * p;
400                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
401                         p = pBlock->pNext.load( atomics::memory_order_relaxed );
402                         m_BlockAllocator.Delete( pBlock );
403                     }
404                 }
405
406                 /// Increments current epoch
407                 void inc_epoch() CDS_NOEXCEPT
408                 {
409                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
410                 }
411
412                 /// Allocates the new retired pointer
413                 retired_ptr_node&  alloc()
414                 {
415                     unsigned int nEpoch;
416                     item * pItem;
417                     for (;;) {
418                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
419                         if ( !pItem )
420                             goto retry;
421                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, 
422                                                                          pItem->m_pNextFree.load(atomics::memory_order_acquire), 
423                                                                          atomics::memory_order_acquire, atomics::memory_order_relaxed ))
424                         { 
425                             goto success;
426                         }
427                     }
428
429                     // Epoch free list is empty
430                     // Alloc from global free list
431                 retry:
432                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
433                     do {
434                         if ( !pItem ) {
435                             allocNewBlock();
436                             goto retry;
437                         }
438                         // pItem is changed by compare_exchange_weak
439                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, 
440                                                                         pItem->m_pNextFree.load(atomics::memory_order_acquire), 
441                                                                         atomics::memory_order_acquire, atomics::memory_order_relaxed ));
442
443                 success:
444                     CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
445                     return *pItem;
446                 }
447
448                 /// Allocates and initializes new retired pointer
449                 retired_ptr_node& alloc( const retired_ptr& p )
450                 {
451                     retired_ptr_node& node = alloc();
452                     node.m_ptr = p;
453                     return node;
454                 }
455
456                 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
457                 /**
458                     The list is linked on the m_pNextFree field
459                 */
460                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
461                 {
462                     assert( pHead != nullptr );
463                     assert( pTail != nullptr );
464
465                     unsigned int nEpoch;
466                     item * pCurHead;
467                     do {
468                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
469                         pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
470                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
471                 }
472             };
473
474             /// Uninitialized guard
475             class guard
476             {
477                 friend class dhp::ThreadGC;
478             protected:
479                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
480
481             public:
482                 /// Initialize empty guard.
483                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
484                     : m_pGuard( nullptr )
485                 {}
486
487                 /// Ã‘opy-ctor is disabled
488                 guard( guard const& ) = delete;
489
490                 /// Move-ctor is disabled
491                 guard( guard&& ) = delete;
492
493                 /// Object destructor, does nothing
494                 ~guard() CDS_NOEXCEPT
495                 {}
496
497                 /// Get current guarded pointer
498                 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
499                 {
500                     assert( m_pGuard != nullptr );
501                     return m_pGuard->pPost.load( order );
502                 }
503
504                 /// Guards pointer \p p
505                 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
506                 {
507                     assert( m_pGuard != nullptr );
508                     m_pGuard->pPost.store( p, order );
509                 }
510
511                 /// Clears the guard
512                 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
513                 {
514                     assert( m_pGuard != nullptr );
515                     m_pGuard->pPost.store( nullptr, order );
516                 }
517
518                 /// Guards pointer \p p
519                 template <typename T>
520                 T * operator =(T * p) CDS_NOEXCEPT
521                 {
522                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
523                     return p;
524                 }
525
526                 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
527                 {
528                     clear();
529                     return nullptr;
530                 }
531
532             public: // for ThreadGC.
533                 /*
534                     GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
535                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
536                     despite the fact that ThreadGC is declared as friend for guard class.
537                     Therefore, we have to add set_guard/get_guard public functions
538                 */
539                 /// Set guard data
540                 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
541                 {
542                     assert( m_pGuard == nullptr );
543                     m_pGuard = pGuard;
544                 }
545
546                 /// Get current guard data
547                 details::guard_data * get_guard() CDS_NOEXCEPT
548                 {
549                     return m_pGuard;
550                 }
551                 /// Get current guard data
552                 details::guard_data * get_guard() const CDS_NOEXCEPT
553                 {
554                     return m_pGuard;
555                 }
556
557                 details::guard_data * release_guard() CDS_NOEXCEPT
558                 {
559                     details::guard_data * p = m_pGuard;
560                     m_pGuard = nullptr;
561                     return p;
562                 }
563
564                 bool is_initialized() const
565                 {
566                     return m_pGuard != nullptr;
567                 }
568             };
569
570         } // namespace details
571
572         /// Guard
573         /**
574             This class represents auto guard: ctor allocates a guard from guard pool,
575             dtor returns the guard back to the pool of free guard.
576         */
577         class Guard: public details::guard
578         {
579             typedef details::guard    base_class;
580             friend class ThreadGC;
581         public:
582             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
583             Guard(); // inline in dhp_impl.h
584
585             /// Returns guard allocated back to pool of free guards
586             ~Guard();    // inline in dhp_impl.h
587
588             /// Guards pointer \p p
589             template <typename T>
590             T * operator =(T * p) CDS_NOEXCEPT
591             {
592                 return base_class::operator =<T>( p );
593             }
594
595             std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
596             {
597                 return base_class::operator =(nullptr);
598             }
599         };
600
601         /// Array of guards
602         /**
603             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
604             dtor returns the guards allocated back to the pool.
605         */
606         template <size_t Count>
607         class GuardArray
608         {
609             details::guard      m_arr[Count]    ;    ///< array of guard
610             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
611
612         public:
613             /// Rebind array for other size \p OtherCount
614             template <size_t OtherCount>
615             struct rebind {
616                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
617             };
618
619         public:
620             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
621             GuardArray();    // inline in dhp_impl.h
622
623             /// The object is not copy-constructible
624             GuardArray( GuardArray const& ) = delete;
625
626             /// The object is not move-constructible
627             GuardArray( GuardArray&& ) = delete;
628
629             /// Returns guards allocated back to pool
630             ~GuardArray();    // inline in dh_impl.h
631
632             /// Returns the capacity of array
633             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
634             {
635                 return c_nCapacity;
636             }
637
638             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
639             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
640             {
641                 assert( nIndex < capacity() );
642                 return m_arr[nIndex];
643             }
644
645             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
646             const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
647             {
648                 assert( nIndex < capacity() );
649                 return m_arr[nIndex];
650             }
651
652             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
653             template <typename T>
654             void set( size_t nIndex, T * p ) CDS_NOEXCEPT
655             {
656                 assert( nIndex < capacity() );
657                 m_arr[nIndex].set( p );
658             }
659
660             /// Clears (sets to \p nullptr) the guard \p nIndex
661             void clear( size_t nIndex ) CDS_NOEXCEPT
662             {
663                 assert( nIndex < capacity() );
664                 m_arr[nIndex].clear();
665             }
666
667             /// Clears all guards in the array
668             void clearAll() CDS_NOEXCEPT
669             {
670                 for ( size_t i = 0; i < capacity(); ++i )
671                     clear(i);
672             }
673         };
674
675         /// Memory manager (Garbage collector)
676         class CDS_EXPORT_API GarbageCollector
677         {
678         private:
679             friend class ThreadGC;
680
681             /// Internal GC statistics
682             struct internal_stat
683             {
684                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
685                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
686
687                 internal_stat()
688                     : m_nGuardCount(0)
689                     , m_nFreeGuardCount(0)
690                 {}
691             };
692
693         public:
694             /// Exception "No GarbageCollector object is created"
695             class not_initialized : public std::runtime_error
696             {
697             public:
698                 //@cond
699                 not_initialized()
700                     : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
701                 {}
702                 //@endcond
703             };
704
705             /// Internal GC statistics
706             struct InternalState
707             {
708                 size_t m_nGuardCount       ;   ///< Total guard count
709                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
710
711                 //@cond
712                 InternalState()
713                     : m_nGuardCount(0)
714                     , m_nFreeGuardCount(0)
715                 {}
716
717                 InternalState& operator =( internal_stat const& s )
718                 {
719                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
720                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
721
722                     return *this;
723                 }
724                 //@endcond
725             };
726
727         private:
728             static GarbageCollector * m_pManager    ;   ///< GC global instance
729
730             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
731             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
732             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
733
734             atomics::atomic<size_t>      m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
735             const size_t    m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
736
737             internal_stat   m_stat  ;   ///< Internal statistics
738             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
739
740         public:
741             /// Initializes DHP memory manager singleton
742             /**
743                 This member function creates and initializes DHP global object.
744                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
745                 this member function is called in the \p main() function. See cds::gc::dhp for example.
746                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
747
748                 \par Parameters
749                 \li \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
750                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
751                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
752                 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
753                     is initialized the GC allocates local guard pool for the thread from common guard pool.
754                     By perforce the local thread's guard pool is grown automatically from common pool.
755                     When the thread terminated its guard pool is backed to common GC's pool.
756
757             */
758             static void CDS_STDCALL Construct(
759                 size_t nLiberateThreshold = 1024
760                 , size_t nInitialThreadGuardCount = 8
761             );
762
763             /// Destroys DHP memory manager
764             /**
765                 The member function destroys DHP global object. After calling of this function you may \b NOT
766                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
767                 at the end of your \p main(). See cds::gc::dhp for example.
768             */
769             static void CDS_STDCALL Destruct();
770
771             /// Returns pointer to GarbageCollector instance
772             /**
773                 If DHP GC is not initialized, \p not_initialized exception is thrown
774             */
775             static GarbageCollector&   instance()
776             {
777                 if ( m_pManager == nullptr )
778                     throw not_initialized();
779                 return *m_pManager;
780             }
781
782             /// Checks if global GC object is constructed and may be used
783             static bool isUsed() CDS_NOEXCEPT
784             {
785                 return m_pManager != nullptr;
786             }
787
788         public:
789             //@{
790             /// Internal interface
791
792             /// Allocates a guard
793             details::guard_data * allocGuard()
794             {
795                 return m_GuardPool.alloc();
796             }
797
798             /// Frees guard \p g for reusing in future
799             void freeGuard(details::guard_data * pGuard )
800             {
801                 m_GuardPool.free( pGuard );
802             }
803
804             /// Allocates guard list for a thread.
805             details::guard_data * allocGuardList( size_t nCount )
806             {
807                 return m_GuardPool.allocList( nCount );
808             }
809
810             /// Frees thread's guard list pointed by \p pList
811             void freeGuardList( details::guard_data * pList )
812             {
813                 m_GuardPool.freeList( pList );
814             }
815
816             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
817             /**@anchor dhp_gc_retirePtr
818             */
819             template <typename T>
820             void retirePtr( T * p, void (* pFunc)(T *) )
821             {
822                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
823             }
824
825             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
826             void retirePtr( retired_ptr const& p )
827             {
828                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
829                     scan();
830             }
831
832         protected:
833             /// Liberate function
834             /** @anchor dhp_gc_liberate
835                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
836                 trapped by any guard.
837             */
838             void scan();
839             //@}
840
841         public:
842             /// Get internal statistics
843             InternalState& getInternalState(InternalState& stat) const
844             {
845                 return stat = m_stat;
846             }
847
848             /// Checks if internal statistics enabled
849             bool              isStatisticsEnabled() const
850             {
851                 return m_bStatEnabled;
852             }
853
854             /// Enables/disables internal statistics
855             bool  enableStatistics( bool bEnable )
856             {
857                 bool bEnabled = m_bStatEnabled;
858                 m_bStatEnabled = bEnable;
859                 return bEnabled;
860             }
861
862         private:
863             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
864             ~GarbageCollector();
865         };
866
867         /// Thread GC
868         /**
869             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
870             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
871             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
872             \ref cds_threading "cds::threading::Manager::detachThread()".
873
874             The ThreadGC object maintains two list:
875             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
876             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
877             Free guard list is a subset of thread guard list.
878         */
879         class ThreadGC
880         {
881             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
882             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
883             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
884
885         public:
886             /// Default constructor
887             ThreadGC()
888                 : m_gc( GarbageCollector::instance() )
889                 , m_pList( nullptr )
890                 , m_pFree( nullptr )
891             {}
892
893             /// The object is not copy-constructible
894             ThreadGC( ThreadGC const& ) = delete;
895
896             /// Dtor calls fini()
897             ~ThreadGC()
898             {
899                 fini();
900             }
901
902             /// Initialization. Repeat call is available
903             void init()
904             {
905                 if ( !m_pList ) {
906                     m_pList =
907                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
908                 }
909             }
910
911             /// Finalization. Repeat call is available
912             void fini()
913             {
914                 if ( m_pList ) {
915                     m_gc.freeGuardList( m_pList );
916                     m_pList =
917                         m_pFree = nullptr;
918                 }
919             }
920
921         public:
922             /// Initializes guard \p g
923             void allocGuard( dhp::details::guard& g )
924             {
925                 assert( m_pList != nullptr );
926                 if ( !g.m_pGuard ) {
927                     if ( m_pFree ) {
928                         g.m_pGuard = m_pFree;
929                         m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
930                     }
931                     else {
932                         g.m_pGuard = m_gc.allocGuard();
933                         g.m_pGuard->pThreadNext = m_pList;
934                         m_pList = g.m_pGuard;
935                     }
936                 }
937             }
938
939             /// Frees guard \p g
940             void freeGuard( dhp::details::guard& g )
941             {
942                 assert( m_pList != nullptr );
943                 if ( g.m_pGuard ) {
944                     g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
945                     g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
946                     m_pFree = g.m_pGuard;
947                     g.m_pGuard = nullptr;
948                 }
949             }
950
951             /// Initializes guard array \p arr
952             template <size_t Count>
953             void allocGuard( GuardArray<Count>& arr )
954             {
955                 assert( m_pList != nullptr );
956                 size_t nCount = 0;
957
958                 while ( m_pFree && nCount < Count ) {
959                     arr[nCount].set_guard( m_pFree );
960                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
961                     ++nCount;
962                 }
963
964                 while ( nCount < Count ) {
965                     details::guard& g = arr[nCount++];
966                     g.set_guard( m_gc.allocGuard() );
967                     g.get_guard()->pThreadNext = m_pList;
968                     m_pList = g.get_guard();
969                 }
970             }
971
972             /// Frees guard array \p arr
973             template <size_t Count>
974             void freeGuard( GuardArray<Count>& arr )
975             {
976                 assert( m_pList != nullptr );
977
978                 details::guard_data * pGuard;
979                 for ( size_t i = 0; i < Count - 1; ++i ) {
980                     pGuard = arr[i].get_guard();
981                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
982                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
983                 }
984                 pGuard = arr[Count-1].get_guard();
985                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
986                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
987                 m_pFree = arr[0].get_guard();
988             }
989
990             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
991             template <typename T>
992             void retirePtr( T * p, void (* pFunc)(T *) )
993             {
994                 m_gc.retirePtr( p, pFunc );
995             }
996
997             /// Run retiring cycle
998             void scan()
999             {
1000                 m_gc.scan();
1001             }
1002         };
1003     }   // namespace dhp
1004 }}  // namespace cds::gc
1005 //@endcond
1006
1007 #if CDS_COMPILER == CDS_COMPILER_MSVC
1008 #   pragma warning(pop)
1009 #endif
1010
1011 #endif // #ifndef CDSLIB_GC_DETAILS_DHP_H