2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_SYNC_SPINLOCK_H
32 #define CDSLIB_SYNC_SPINLOCK_H
34 #include <cds/algo/atomic.h>
35 #include <cds/os/thread.h>
36 #include <cds/algo/backoff_strategy.h>
39 /// Synchronization primitives
43 Simple and light-weight spin-lock critical section
44 It is useful to gain access to small (short-timed) code
48 TATAS (test-and-test-and-lock)
49 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
51 No serialization performed - any of waiting threads may owns the spin-lock.
52 This spin-lock is NOT recursive: the thread owned the lock cannot call \p lock() method without deadlock.
53 The method \p unlock() can call any thread
55 DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
56 - double lock attempt encountered by same thread (deadlock)
57 - unlock by another thread
59 If spin-lock is locked the \p Backoff algorithm is called. Predefined \p backoff::LockDefault class yields current
60 thread and repeats lock attempts later
63 - \p Backoff - backoff strategy. Used when spin lock is locked
65 template <typename Backoff >
69 typedef Backoff backoff_strategy; ///< back-off strategy type
71 atomics::atomic<bool> m_spin; ///< Spin
73 typename OS::ThreadId m_dbgOwnerId; ///< Owner thread id (only for debug mode)
77 /// Construct free (unlocked) spin-lock
78 spin_lock() CDS_NOEXCEPT
80 :m_dbgOwnerId( OS::c_NullThreadId )
83 m_spin.store( false, atomics::memory_order_relaxed );
86 /// Construct spin-lock in specified state
88 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
90 explicit spin_lock( bool bLocked ) CDS_NOEXCEPT
92 : m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
95 m_spin.store( bLocked, atomics::memory_order_relaxed );
98 /// Dummy copy constructor
100 In theory, spin-lock cannot be copied. However, it is not practical.
101 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
102 initializes the spin to free (unlocked) state like default ctor.
104 spin_lock(const spin_lock<Backoff>& ) CDS_NOEXCEPT
107 , m_dbgOwnerId( cds::OS::c_NullThreadId )
111 /// Destructor. On debug time it checks whether spin-lock is free
114 assert( !m_spin.load( atomics::memory_order_relaxed ));
117 /// Check if the spin is locked
118 bool is_locked() const CDS_NOEXCEPT
120 return m_spin.load( atomics::memory_order_relaxed );
123 /// Try to lock the object
125 Returns \p true if locking is succeeded
126 otherwise (if the spin is already locked) returns \p false
128 Debug version: deadlock can be detected
130 bool try_lock() CDS_NOEXCEPT
132 bool bCurrent = false;
134 # ifdef CDS_THREAD_SANITIZER_ENABLED
135 if ( m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
136 CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin );
139 m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
144 m_dbgOwnerId = OS::get_current_thread_id();
150 /// Try to lock the object, repeat \p nTryCount times if failed
152 Returns \p true if locking is succeeded
153 otherwise (if the spin is already locked) returns \p false
155 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
157 backoff_strategy backoff;
158 while ( nTryCount-- ) {
166 /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
167 void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()()))
169 backoff_strategy backoff;
172 assert( m_dbgOwnerId != OS::get_current_thread_id());
175 while ( !try_lock()) {
176 while ( m_spin.load( atomics::memory_order_relaxed )) {
180 assert( m_dbgOwnerId == OS::get_current_thread_id());
183 /// Unlock the spin-lock. Debug version: deadlock may be detected
184 void unlock() CDS_NOEXCEPT
186 assert( m_spin.load( atomics::memory_order_relaxed ));
188 assert( m_dbgOwnerId == OS::get_current_thread_id());
189 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
191 m_spin.store( false, atomics::memory_order_release );
192 CDS_TSAN_ANNOTATE_MUTEX_RELEASED( &m_spin );
196 /// Spin-lock implementation default for the current platform
197 typedef spin_lock<backoff::LockDefault > spin;
199 /// Recursive spin lock.
201 Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
204 - \p Integral one of integral atomic type: <tt>unsigned int</tt>, \p int, and others
205 - \p Backoff backoff strategy. Used when spin lock is locked
207 template <typename Integral, class Backoff>
208 class reentrant_spin_lock
210 typedef OS::ThreadId thread_id ; ///< The type of thread id
213 typedef Integral integral_type ; ///< The integral type
214 typedef Backoff backoff_strategy ; ///< The backoff type
217 atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
218 thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to \p OS::c_NullThreadId
222 void take( thread_id tid ) CDS_NOEXCEPT
227 void free() CDS_NOEXCEPT
229 m_OwnerId = OS::c_NullThreadId;
232 bool is_taken( thread_id tid ) const CDS_NOEXCEPT
234 return m_OwnerId == tid;
237 bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
239 if ( is_taken( tid )) {
240 m_spin.fetch_add( 1, atomics::memory_order_relaxed );
246 bool try_acquire() CDS_NOEXCEPT
248 integral_type nCurrent = 0;
249 # ifdef CDS_THREAD_SANITIZER_ENABLED
250 if ( m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
251 CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin );
256 return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
260 bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
262 backoff_strategy bkoff;
264 while ( nTryCount-- ) {
272 void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
275 backoff_strategy bkoff;
276 while ( !try_acquire()) {
277 while ( m_spin.load( atomics::memory_order_relaxed ))
284 /// Default constructor initializes spin to free (unlocked) state
285 reentrant_spin_lock() CDS_NOEXCEPT
287 , m_OwnerId( OS::c_NullThreadId )
290 /// Dummy copy constructor
292 In theory, spin-lock cannot be copied. However, it is not practical.
293 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
294 initializes the spin to free (unlocked) state like default ctor.
296 reentrant_spin_lock( const reentrant_spin_lock<Integral, Backoff>& ) CDS_NOEXCEPT
298 , m_OwnerId( OS::c_NullThreadId )
301 /// Construct object for specified state
302 explicit reentrant_spin_lock( bool bLocked )
304 , m_OwnerId( OS::c_NullThreadId )
310 /// Checks if the spin is locked
312 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
313 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
315 bool is_locked() const CDS_NOEXCEPT
317 return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id()));
320 /// Try to lock the spin-lock (synonym for \p try_lock())
321 bool try_lock() CDS_NOEXCEPT
323 thread_id tid = OS::get_current_thread_id();
324 if ( try_taken_lock( tid ))
326 if ( try_acquire()) {
333 /// Try to lock the object
334 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().try_acquire( nTryCount )))
336 thread_id tid = OS::get_current_thread_id();
337 if ( try_taken_lock( tid ))
339 if ( try_acquire( nTryCount )) {
346 /// Lock the object waits if it is busy
347 void lock() CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().acquire()))
349 thread_id tid = OS::get_current_thread_id();
350 if ( !try_taken_lock( tid )) {
356 /// Unlock the spin-lock. Return \p true if the current thread is owner of spin-lock \p false otherwise
357 bool unlock() CDS_NOEXCEPT
359 if ( is_taken( OS::get_current_thread_id())) {
360 integral_type n = m_spin.load( atomics::memory_order_relaxed );
362 m_spin.store( n - 1, atomics::memory_order_relaxed );
365 m_spin.store( 0, atomics::memory_order_release );
366 CDS_TSAN_ANNOTATE_MUTEX_RELEASED( &m_spin );
373 /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
374 bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
376 if ( is_taken( OS::get_current_thread_id())) {
377 assert( newOwnerId != OS::c_NullThreadId );
378 m_OwnerId = newOwnerId;
385 /// Recursive 32bit spin-lock
386 typedef reentrant_spin_lock<uint32_t, backoff::LockDefault> reentrant_spin32;
388 /// Default recursive spin-lock
389 typedef reentrant_spin32 reentrant_spin;
391 /// Recursive 64bit spin-lock
392 typedef reentrant_spin_lock<uint64_t, backoff::LockDefault> reentrant_spin64;
396 #endif // #ifndef CDSLIB_SYNC_SPINLOCK_H