2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Dynamic Hazard Pointer memory manager implementation
33 #include <algorithm> // std::fill
34 #include <functional> // std::hash
36 #include <cds/gc/details/dhp.h>
37 #include <cds/algo/int_algo.h>
39 namespace cds { namespace gc { namespace dhp {
44 typedef retired_ptr_node * item_type;
45 typedef cds::details::Allocator<item_type, CDS_DEFAULT_ALLOCATOR> allocator_type;
47 size_t const m_nBucketCount;
48 item_type * m_Buckets;
50 item_type& bucket( retired_ptr_node& node ) const
52 return bucket( node.m_ptr.m_p );
54 item_type& bucket( guard_data::guarded_ptr p ) const
56 return m_Buckets[ std::hash<guard_data::guarded_ptr>()( p ) & (m_nBucketCount - 1) ];
60 liberate_set( size_t nBucketCount )
61 : m_nBucketCount( nBucketCount )
63 assert( nBucketCount > 0 );
64 assert( (nBucketCount & (nBucketCount - 1)) == 0 );
66 m_Buckets = allocator_type().NewArray( nBucketCount );
67 std::fill( m_Buckets, m_Buckets + nBucketCount, nullptr );
72 allocator_type().Delete( m_Buckets, m_nBucketCount );
75 void insert( retired_ptr_node& node )
77 node.m_pNext.store( nullptr, atomics::memory_order_relaxed );
79 item_type& refBucket = bucket( node );
81 item_type p = refBucket;
83 if ( p->m_ptr.m_p == node.m_ptr.m_p ) {
84 assert( node.m_pNextFree.load( atomics::memory_order_relaxed ) == nullptr );
86 node.m_pNextFree.store( p->m_pNextFree.load( atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
87 p->m_pNextFree.store( &node, atomics::memory_order_relaxed );
90 p = p->m_pNext.load(atomics::memory_order_relaxed);
93 node.m_pNext.store( refBucket, atomics::memory_order_relaxed );
98 item_type erase( guard_data::guarded_ptr ptr )
100 item_type& refBucket = bucket( ptr );
101 item_type p = refBucket;
102 item_type pPrev = nullptr;
105 if ( p->m_ptr.m_p == ptr ) {
107 pPrev->m_pNext.store( p->m_pNext.load(atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
109 refBucket = p->m_pNext.load(atomics::memory_order_relaxed);
110 p->m_pNext.store( nullptr, atomics::memory_order_relaxed );
114 p = p->m_pNext.load( atomics::memory_order_relaxed );
120 typedef std::pair<item_type, item_type> list_range;
122 list_range free_all()
124 item_type pTail = nullptr;
125 list_range ret = std::make_pair( pTail, pTail );
127 item_type const * pEndBucket = m_Buckets + m_nBucketCount;
128 for ( item_type * ppBucket = m_Buckets; ppBucket < pEndBucket; ++ppBucket ) {
129 item_type pBucket = *ppBucket;
134 pTail->m_pNextFree.store( pBucket, atomics::memory_order_relaxed );
138 item_type pNext = pTail->m_pNext.load( atomics::memory_order_relaxed );
140 pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
142 while ( pTail->m_pNextFree.load( atomics::memory_order_relaxed )) {
143 pTail = pTail->m_pNextFree.load( atomics::memory_order_relaxed );
145 pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
149 pTail->m_pNextFree.store( pNext, atomics::memory_order_relaxed );
159 pTail->m_pNextFree.store( nullptr, atomics::memory_order_relaxed );
166 GarbageCollector * GarbageCollector::m_pManager = nullptr;
168 void CDS_STDCALL GarbageCollector::Construct(
169 size_t nLiberateThreshold
170 , size_t nInitialThreadGuardCount
175 m_pManager = new GarbageCollector( nLiberateThreshold, nInitialThreadGuardCount, nEpochCount );
179 void CDS_STDCALL GarbageCollector::Destruct()
182 m_pManager = nullptr;
185 GarbageCollector::GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount, size_t nEpochCount )
186 : m_nLiberateThreshold( nLiberateThreshold ? nLiberateThreshold : 1024 )
187 , m_nInitialThreadGuardCount( nInitialThreadGuardCount ? nInitialThreadGuardCount : 8 )
188 , m_RetiredAllocator( static_cast<unsigned int>( nEpochCount ? nEpochCount : 16 ))
189 , m_bStatEnabled( false )
192 GarbageCollector::~GarbageCollector()
197 void GarbageCollector::scan()
199 details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize();
200 if ( retiredList.first ) {
202 size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed);
203 details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ));
205 // Get list of retired pointers
206 size_t nRetiredCount = 0;
207 details::retired_ptr_node * pHead = retiredList.first;
209 details::retired_ptr_node * pNext = pHead->m_pNext.load( atomics::memory_order_relaxed );
210 pHead->m_pNextFree.store( nullptr, atomics::memory_order_relaxed );
211 set.insert( *pHead );
218 details::retired_ptr_node * pBusyFirst = nullptr;
219 details::retired_ptr_node * pBusyLast = nullptr;
220 size_t nBusyCount = 0;
222 for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
224 // get guarded pointer
225 details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
228 details::retired_ptr_node * pRetired = set.erase( valGuarded );
230 // Retired pointer is being guarded
231 // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal
232 // List is linked on m_pNextFree field
235 pBusyLast->m_pNext.store( pRetired, atomics::memory_order_relaxed );
237 pBusyFirst = pRetired;
238 pBusyLast = pRetired;
240 details::retired_ptr_node * p = pBusyLast->m_pNextFree.load(atomics::memory_order_relaxed);
241 while ( p != nullptr ) {
242 pBusyLast->m_pNext.store( p, atomics::memory_order_relaxed );
250 // Place [pBusyList, pBusyLast] back to m_RetiredBuffer
252 m_RetiredBuffer.push_list( pBusyFirst, pBusyLast, nBusyCount );
254 // Free all retired pointers
255 details::liberate_set::list_range range = set.free_all();
257 m_RetiredAllocator.inc_epoch();
260 assert( range.second != nullptr );
261 m_RetiredAllocator.free_range( range.first, range.second );
263 else if ( nRetiredCount >= nLiberateThreshold ) {
264 // scan() cycle did not free any retired pointer - double scan() threshold
265 m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed );
269 }}} // namespace cds::gc::dhp