2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H
32 #define CDSLIB_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H
36 #include <cds/sync/lock_array.h>
37 #include <cds/os/thread.h>
38 #include <cds/sync/spinlock.h>
40 namespace cds { namespace intrusive { namespace striped_set {
42 /// Lock striping concurrent access policy
44 This is one of available opt::mutex_policy option type for StripedSet
46 Lock striping is very simple technique.
47 The set consists of the bucket table and the array of locks.
48 Initially, the capacity of lock array and bucket table is the same.
49 When set is resized, bucket table capacity will be doubled but lock array will not.
50 The lock \p i protects each bucket \p j, where <tt> j = i mod L </tt>,
51 where \p L - the size of lock array.
53 The policy contains an internal array of \p Lock locks.
56 - \p Lock - the type of mutex. The default is \p std::mutex. The mutex type should be default-constructible.
57 Note that a spin-lock is not so good suitable for lock striping for performance reason.
58 - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR.
60 template <class Lock = std::mutex, class Alloc = CDS_DEFAULT_ALLOCATOR >
64 typedef Lock lock_type ; ///< lock type
65 typedef Alloc allocator_type ; ///< allocator type
67 typedef cds::sync::lock_array< lock_type, cds::sync::pow2_select_policy, allocator_type > lock_array_type ; ///< lock array type
71 lock_array_type m_Locks;
76 class scoped_cell_lock {
77 std::unique_lock< lock_array_type > m_guard;
80 scoped_cell_lock( striping& policy, size_t nHash )
81 : m_guard( policy.m_Locks, nHash )
85 class scoped_full_lock {
86 std::unique_lock< lock_array_type > m_guard;
88 scoped_full_lock( striping& policy )
89 : m_guard( policy.m_Locks )
93 class scoped_resize_lock: public scoped_full_lock {
95 scoped_resize_lock( striping& policy )
96 : scoped_full_lock( policy )
109 size_t nLockCount ///< The size of lock array. Must be power of two.
111 : m_Locks( nLockCount, cds::sync::pow2_select_policy( nLockCount ))
114 /// Returns lock array size
116 Lock array size is unchanged during \p striped object lifetime
118 size_t lock_count() const
120 return m_Locks.size();
124 void resize( size_t /*nNewCapacity*/ )
130 /// Refinable concurrent access policy
132 This is one of available opt::mutex_policy option type for StripedSet
134 Refining is like a striping technique (see striped_set::striping)
135 but it allows growing the size of lock array when resizing the hash table.
136 So, the sizes of hash table and lock array are equal.
139 - \p RecursiveLock - the type of mutex. Reentrant (recursive) mutex is required.
140 The default is \p std::recursive_mutex. The mutex type should be default-constructible.
141 - \p BackOff - back-off strategy. Default is cds::backoff::yield
142 - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR.
145 class RecursiveLock = std::recursive_mutex,
146 typename BackOff = cds::backoff::yield,
147 class Alloc = CDS_DEFAULT_ALLOCATOR>
151 typedef RecursiveLock lock_type ; ///< lock type
152 typedef BackOff back_off ; ///< back-off strategy used
153 typedef Alloc allocator_type; ///< allocator type
157 typedef cds::sync::trivial_select_policy lock_selection_policy;
159 class lock_array_type
160 : public cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type >
161 , public std::enable_shared_from_this< lock_array_type >
163 typedef cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type > lock_array_base;
165 lock_array_type( size_t nCapacity )
166 : lock_array_base( nCapacity )
169 typedef std::shared_ptr< lock_array_type > lock_array_ptr;
170 typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator;
172 typedef unsigned long long owner_t;
173 typedef cds::OS::ThreadId threadId_t;
175 typedef cds::sync::spin spinlock_type;
176 typedef std::unique_lock< spinlock_type > scoped_spinlock;
181 static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1;
183 lock_array_ptr m_arrLocks ; ///< Lock array. The capacity of array is specified in constructor.
184 atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag)
185 atomics::atomic<size_t> m_nCapacity ; ///< Lock array capacity
186 spinlock_type m_access ; ///< access to m_arrLocks
191 struct lock_array_disposer {
192 void operator()( lock_array_type * pArr )
194 // Seems, there is a false positive in std::shared_ptr deallocation in uninstrumented libc++
195 // see, for example, https://groups.google.com/forum/#!topic/thread-sanitizer/eHu4dE_z7Cc
196 // https://reviews.llvm.org/D21609
197 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
198 lock_array_allocator().Delete( pArr );
199 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
203 lock_array_ptr create_lock_array( size_t nCapacity )
205 m_nCapacity.store( nCapacity, atomics::memory_order_relaxed );
206 return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer());
209 lock_type& acquire( size_t nHash )
211 owner_t me = (owner_t) cds::OS::get_current_thread_id();
216 // wait while resizing
218 who = m_Owner.load( atomics::memory_order_acquire );
219 if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask))
224 lock_array_ptr pLocks;
226 scoped_spinlock sl(m_access);
230 lock_type& lock = pLocks->at( nHash & (pLocks->size() - 1));
233 who = m_Owner.load( atomics::memory_order_acquire );
234 if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask)) && m_arrLocks == pLocks )
240 lock_array_ptr acquire_all()
242 owner_t me = (owner_t) cds::OS::get_current_thread_id();
247 // wait while resizing
249 who = m_Owner.load( atomics::memory_order_acquire );
250 if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask))
255 lock_array_ptr pLocks;
257 scoped_spinlock sl(m_access);
263 who = m_Owner.load( atomics::memory_order_acquire );
264 if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask)) && m_arrLocks == pLocks )
267 pLocks->unlock_all();
271 void release_all( lock_array_ptr p )
276 bool acquire_resize()
278 owner_t me = (owner_t) cds::OS::get_current_thread_id();
281 for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) {
283 if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
284 lock_array_ptr pOldLocks = m_arrLocks;
285 size_t const nLockCount = pOldLocks->size();
286 for ( size_t i = 0; i < nLockCount; ++i ) {
287 typename lock_array_type::lock_type& lock = pOldLocks->at(i);
289 while ( !lock.try_lock())
301 void release_resize()
303 m_Owner.store( 0, atomics::memory_order_release );
308 class scoped_cell_lock {
309 std::unique_lock< lock_type > m_guard;
312 scoped_cell_lock( refinable& policy, size_t nHash )
313 : m_guard( policy.acquire( nHash ), std::adopt_lock_t())
317 class scoped_full_lock {
319 lock_array_ptr m_Locks;
321 scoped_full_lock( refinable& policy )
324 m_Locks = policy.acquire_all();
328 m_Policy.release_all( m_Locks );
332 class scoped_resize_lock {
337 scoped_resize_lock( refinable& policy )
340 m_bSucceess = policy.acquire_resize();
343 ~scoped_resize_lock()
346 m_Policy.release_resize();
359 size_t nLockCount ///< Initial size of lock array. Must be power of two.
362 , m_nCapacity( nLockCount )
364 assert( cds::beans::is_power2( nLockCount ));
365 m_arrLocks = create_lock_array( nLockCount );
368 /// Returns lock array size
370 Lock array size is not a constant for \p refinable policy and can be changed when the set is resized.
372 size_t lock_count() const
374 return m_nCapacity.load( atomics::memory_order_relaxed );
377 /// Resize for new capacity
378 void resize( size_t nNewCapacity )
380 // Expect the access is locked by scoped_resize_lock!!!
381 lock_array_ptr pNewArr = create_lock_array( nNewCapacity );
382 scoped_spinlock sl(m_access);
383 m_arrLocks.swap( pNewArr );
387 }}} // namespace cds::intrusive::striped_set