Added internal statistics to MichaelList, IterableList
[libcds.git] / src / dhp_gc.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8     
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.     
29 */
30
31 // Dynamic Hazard Pointer memory manager implementation
32
33 #include <algorithm>   // std::fill
34 #include <functional>  // std::hash
35
36 #include <cds/gc/details/dhp.h>
37 #include <cds/algo/int_algo.h>
38
39 namespace cds { namespace gc { namespace dhp {
40
41     namespace details {
42
43         class liberate_set {
44             typedef retired_ptr_node *  item_type;
45             typedef cds::details::Allocator<item_type, CDS_DEFAULT_ALLOCATOR>   allocator_type;
46
47             size_t const m_nBucketCount;
48             item_type *  m_Buckets;
49
50             item_type&  bucket( retired_ptr_node& node ) const
51             {
52                 return bucket( node.m_ptr.m_p );
53             }
54             item_type&  bucket( guard_data::guarded_ptr p ) const
55             {
56                 return m_Buckets[ std::hash<guard_data::guarded_ptr>()( p ) & (m_nBucketCount - 1)  ];
57             }
58
59         public:
60             liberate_set( size_t nBucketCount )
61                 : m_nBucketCount( nBucketCount )
62             {
63                 assert( nBucketCount > 0 );
64                 assert( (nBucketCount & (nBucketCount - 1)) == 0 );
65
66                 m_Buckets = allocator_type().NewArray( nBucketCount );
67                 std::fill( m_Buckets, m_Buckets + nBucketCount, nullptr );
68             }
69
70             ~liberate_set()
71             {
72                 allocator_type().Delete( m_Buckets, m_nBucketCount );
73             }
74
75             void insert( retired_ptr_node& node )
76             {
77                 node.m_pNext.store( nullptr, atomics::memory_order_relaxed );
78
79                 item_type& refBucket = bucket( node );
80                 if ( refBucket ) {
81                     item_type p = refBucket;
82                     do {
83                         if ( p->m_ptr.m_p == node.m_ptr.m_p ) {
84                             assert( node.m_pNextFree.load( atomics::memory_order_relaxed ) == nullptr );
85
86                             node.m_pNextFree.store( p->m_pNextFree.load( atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
87                             p->m_pNextFree.store( &node, atomics::memory_order_relaxed );
88                             return;
89                         }
90                         p = p->m_pNext.load(atomics::memory_order_relaxed);
91                     } while ( p );
92
93                     node.m_pNext.store( refBucket, atomics::memory_order_relaxed );
94                 }
95                 refBucket = &node;
96             }
97
98             item_type erase( guard_data::guarded_ptr ptr )
99             {
100                 item_type& refBucket = bucket( ptr );
101                 item_type p = refBucket;
102                 item_type pPrev = nullptr;
103
104                 while ( p ) {
105                     if ( p->m_ptr.m_p == ptr ) {
106                         if ( pPrev )
107                             pPrev->m_pNext.store( p->m_pNext.load(atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
108                         else
109                             refBucket = p->m_pNext.load(atomics::memory_order_relaxed);
110                         p->m_pNext.store( nullptr, atomics::memory_order_relaxed );
111                         return p;
112                     }
113                     pPrev = p;
114                     p = p->m_pNext.load( atomics::memory_order_relaxed );
115                 }
116
117                 return nullptr;
118             }
119
120             typedef std::pair<item_type, item_type>     list_range;
121
122             list_range free_all()
123             {
124                 item_type pTail = nullptr;
125                 list_range ret = std::make_pair( pTail, pTail );
126
127                 item_type const * pEndBucket = m_Buckets + m_nBucketCount;
128                 for ( item_type * ppBucket = m_Buckets; ppBucket < pEndBucket; ++ppBucket ) {
129                     item_type pBucket = *ppBucket;
130                     if ( pBucket ) {
131                         if ( !ret.first )
132                             ret.first = pBucket;
133                         else
134                             pTail->m_pNextFree.store( pBucket, atomics::memory_order_relaxed );
135
136                         pTail = pBucket;
137                         for (;;) {
138                             item_type pNext = pTail->m_pNext.load( atomics::memory_order_relaxed );
139                             pTail->m_ptr.free();
140                             pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
141
142                             while ( pTail->m_pNextFree.load( atomics::memory_order_relaxed )) {
143                                 pTail = pTail->m_pNextFree.load( atomics::memory_order_relaxed );
144                                 pTail->m_ptr.free();
145                                 pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
146                             }
147
148                             if ( pNext ) {
149                                 pTail->m_pNextFree.store( pNext, atomics::memory_order_relaxed );
150                                 pTail = pNext;
151                             }
152                             else
153                                 break;
154                         }
155                     }
156                 }
157
158                 if ( pTail )
159                     pTail->m_pNextFree.store( nullptr, atomics::memory_order_relaxed );
160                 ret.second = pTail;
161                 return ret;
162             }
163         };
164     }
165
166     GarbageCollector * GarbageCollector::m_pManager = nullptr;
167
168     void CDS_STDCALL GarbageCollector::Construct(
169         size_t nLiberateThreshold
170         , size_t nInitialThreadGuardCount
171         , size_t nEpochCount
172     )
173     {
174         if ( !m_pManager ) {
175             m_pManager = new GarbageCollector( nLiberateThreshold, nInitialThreadGuardCount, nEpochCount );
176         }
177     }
178
179     void CDS_STDCALL GarbageCollector::Destruct()
180     {
181         delete m_pManager;
182         m_pManager = nullptr;
183     }
184
185     GarbageCollector::GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount, size_t nEpochCount )
186         : m_nLiberateThreshold( nLiberateThreshold ? nLiberateThreshold : 1024 )
187         , m_nInitialThreadGuardCount( nInitialThreadGuardCount ? nInitialThreadGuardCount : 8 )
188         , m_RetiredAllocator( static_cast<unsigned int>( nEpochCount ? nEpochCount : 16 ))
189         , m_bStatEnabled( false )
190     {}
191
192     GarbageCollector::~GarbageCollector()
193     {
194         scan();
195     }
196
197     void GarbageCollector::scan()
198     {
199         details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize();
200         if ( retiredList.first ) {
201
202             size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed);
203             details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ));
204
205             // Get list of retired pointers
206             size_t nRetiredCount = 0;
207             details::retired_ptr_node * pHead = retiredList.first;
208             while ( pHead ) {
209                 details::retired_ptr_node * pNext = pHead->m_pNext.load( atomics::memory_order_relaxed );
210                 pHead->m_pNextFree.store( nullptr, atomics::memory_order_relaxed );
211                 set.insert( *pHead );
212                 pHead = pNext;
213                 ++nRetiredCount;
214             }
215
216             // Liberate cycle
217
218             details::retired_ptr_node * pBusyFirst = nullptr;
219             details::retired_ptr_node * pBusyLast = nullptr;
220             size_t nBusyCount = 0;
221
222             for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
223             {
224                 // get guarded pointer
225                 details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
226
227                 if ( valGuarded ) {
228                     details::retired_ptr_node * pRetired = set.erase( valGuarded );
229                     if ( pRetired ) {
230                         // Retired pointer is being guarded
231                         // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal
232                         // List is linked on m_pNextFree field
233
234                         if ( pBusyLast )
235                             pBusyLast->m_pNext.store( pRetired, atomics::memory_order_relaxed );
236                         else
237                             pBusyFirst = pRetired;
238                         pBusyLast = pRetired;
239                         ++nBusyCount;
240                         details::retired_ptr_node * p = pBusyLast->m_pNextFree.load(atomics::memory_order_relaxed);
241                         while ( p != nullptr ) {
242                             pBusyLast->m_pNext.store( p, atomics::memory_order_relaxed );
243                             pBusyLast = p;
244                             ++nBusyCount;
245                         }
246                     }
247                 }
248             }
249
250             // Place [pBusyList, pBusyLast] back to m_RetiredBuffer
251             if ( pBusyFirst )
252                 m_RetiredBuffer.push_list( pBusyFirst, pBusyLast, nBusyCount );
253
254             // Free all retired pointers
255             details::liberate_set::list_range range = set.free_all();
256
257             m_RetiredAllocator.inc_epoch();
258
259             if ( range.first ) {
260                 assert( range.second != nullptr );
261                 m_RetiredAllocator.free_range( range.first, range.second );
262             }
263             else if ( nRetiredCount >= nLiberateThreshold ) {
264                 // scan() cycle did not free any retired pointer - double scan() threshold
265                 m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed );
266             }
267         }
268     }
269 }}} // namespace cds::gc::dhp