2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_SYNC_SPINLOCK_H
32 #define CDSLIB_SYNC_SPINLOCK_H
34 #include <cds/algo/atomic.h>
35 #include <cds/os/thread.h>
36 #include <cds/algo/backoff_strategy.h>
39 /// Synchronization primitives
43 Simple and light-weight spin-lock critical section
44 It is useful to gain access to small (short-timed) code
48 TATAS (test-and-test-and-lock)
49 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
51 No serialization performed - any of waiting threads may owns the spin-lock.
52 This spin-lock is NOT recursive: the thread owned the lock cannot call \p lock() method without deadlock.
53 The method \p unlock() can call any thread
55 DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
56 - double lock attempt encountered by same thread (deadlock)
57 - unlock by another thread
59 If spin-lock is locked the \p Backoff algorithm is called. Predefined \p backoff::LockDefault class yields current
60 thread and repeats lock attempts later
63 - \p Backoff - backoff strategy. Used when spin lock is locked
65 template <typename Backoff >
69 typedef Backoff backoff_strategy; ///< back-off strategy type
71 atomics::atomic<bool> m_spin; ///< Spin
73 typename OS::ThreadId m_dbgOwnerId; ///< Owner thread id (only for debug mode)
77 /// Construct free (unlocked) spin-lock
78 spin_lock() CDS_NOEXCEPT
80 :m_dbgOwnerId( OS::c_NullThreadId )
83 m_spin.store( false, atomics::memory_order_relaxed );
86 /// Construct spin-lock in specified state
88 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
90 explicit spin_lock( bool bLocked ) CDS_NOEXCEPT
92 : m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
95 m_spin.store( bLocked, atomics::memory_order_relaxed );
98 /// Dummy copy constructor
100 In theory, spin-lock cannot be copied. However, it is not practical.
101 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
102 initializes the spin to free (unlocked) state like default ctor.
104 spin_lock(const spin_lock<Backoff>& ) CDS_NOEXCEPT
107 , m_dbgOwnerId( cds::OS::c_NullThreadId )
111 /// Destructor. On debug time it checks whether spin-lock is free
114 assert( !m_spin.load( atomics::memory_order_relaxed ));
117 /// Check if the spin is locked
118 bool is_locked() const CDS_NOEXCEPT
120 return m_spin.load( atomics::memory_order_relaxed );
123 /// Try to lock the object
125 Returns \p true if locking is succeeded
126 otherwise (if the spin is already locked) returns \p false
128 Debug version: deadlock can be detected
130 bool try_lock() CDS_NOEXCEPT
132 bool bCurrent = false;
133 m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
137 m_dbgOwnerId = OS::get_current_thread_id();
143 /// Try to lock the object, repeat \p nTryCount times if failed
145 Returns \p true if locking is succeeded
146 otherwise (if the spin is already locked) returns \p false
148 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
150 backoff_strategy backoff;
151 while ( nTryCount-- ) {
153 CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin );
161 /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
162 void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()()))
164 backoff_strategy backoff;
167 assert( m_dbgOwnerId != OS::get_current_thread_id());
170 while ( !try_lock()) {
171 while ( m_spin.load( atomics::memory_order_relaxed )) {
175 assert( m_dbgOwnerId == OS::get_current_thread_id());
178 /// Unlock the spin-lock. Debug version: deadlock may be detected
179 void unlock() CDS_NOEXCEPT
181 assert( m_spin.load( atomics::memory_order_relaxed ));
183 assert( m_dbgOwnerId == OS::get_current_thread_id());
184 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
186 m_spin.store( false, atomics::memory_order_release );
187 CDS_TSAN_ANNOTATE_MUTEX_RELEASED( &m_spin );
191 /// Spin-lock implementation default for the current platform
192 typedef spin_lock<backoff::LockDefault > spin;
194 /// Recursive spin lock.
196 Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
199 - \p Integral one of integral atomic type: <tt>unsigned int</tt>, \p int, and others
200 - \p Backoff backoff strategy. Used when spin lock is locked
202 template <typename Integral, class Backoff>
203 class reentrant_spin_lock
205 typedef OS::ThreadId thread_id ; ///< The type of thread id
208 typedef Integral integral_type ; ///< The integral type
209 typedef Backoff backoff_strategy ; ///< The backoff type
212 atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
213 thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to \p OS::c_NullThreadId
217 void take( thread_id tid ) CDS_NOEXCEPT
222 void free() CDS_NOEXCEPT
224 m_OwnerId = OS::c_NullThreadId;
227 bool is_taken( thread_id tid ) const CDS_NOEXCEPT
229 return m_OwnerId == tid;
232 bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
234 if ( is_taken( tid )) {
235 m_spin.fetch_add( 1, atomics::memory_order_relaxed );
241 bool try_acquire() CDS_NOEXCEPT
243 integral_type nCurrent = 0;
244 return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
247 bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
249 backoff_strategy bkoff;
251 while ( nTryCount-- ) {
252 if ( try_acquire() ) {
253 CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin );
261 void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
264 backoff_strategy bkoff;
265 while ( !try_acquire()) {
266 while ( m_spin.load( atomics::memory_order_relaxed ))
273 /// Default constructor initializes spin to free (unlocked) state
274 reentrant_spin_lock() CDS_NOEXCEPT
276 , m_OwnerId( OS::c_NullThreadId )
279 /// Dummy copy constructor
281 In theory, spin-lock cannot be copied. However, it is not practical.
282 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
283 initializes the spin to free (unlocked) state like default ctor.
285 reentrant_spin_lock( const reentrant_spin_lock<Integral, Backoff>& ) CDS_NOEXCEPT
287 , m_OwnerId( OS::c_NullThreadId )
290 /// Construct object for specified state
291 explicit reentrant_spin_lock( bool bLocked )
293 , m_OwnerId( OS::c_NullThreadId )
299 /// Checks if the spin is locked
301 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
302 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
304 bool is_locked() const CDS_NOEXCEPT
306 return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id()));
309 /// Try to lock the spin-lock (synonym for \p try_lock())
310 bool try_lock() CDS_NOEXCEPT
312 thread_id tid = OS::get_current_thread_id();
313 if ( try_taken_lock( tid ))
315 if ( try_acquire()) {
322 /// Try to lock the object
323 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().try_acquire( nTryCount )))
325 thread_id tid = OS::get_current_thread_id();
326 if ( try_taken_lock( tid ))
328 if ( try_acquire( nTryCount )) {
335 /// Lock the object waits if it is busy
336 void lock() CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().acquire()))
338 thread_id tid = OS::get_current_thread_id();
339 if ( !try_taken_lock( tid )) {
345 /// Unlock the spin-lock. Return \p true if the current thread is owner of spin-lock \p false otherwise
346 bool unlock() CDS_NOEXCEPT
348 if ( is_taken( OS::get_current_thread_id())) {
349 integral_type n = m_spin.load( atomics::memory_order_relaxed );
351 m_spin.store( n - 1, atomics::memory_order_relaxed );
354 m_spin.store( 0, atomics::memory_order_release );
355 CDS_TSAN_ANNOTATE_MUTEX_RELEASED( &m_spin );
362 /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
363 bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
365 if ( is_taken( OS::get_current_thread_id())) {
366 assert( newOwnerId != OS::c_NullThreadId );
367 m_OwnerId = newOwnerId;
374 /// Recursive 32bit spin-lock
375 typedef reentrant_spin_lock<uint32_t, backoff::LockDefault> reentrant_spin32;
377 /// Default recursive spin-lock
378 typedef reentrant_spin32 reentrant_spin;
380 /// Recursive 64bit spin-lock
381 typedef reentrant_spin_lock<uint64_t, backoff::LockDefault> reentrant_spin64;
385 #endif // #ifndef CDSLIB_SYNC_SPINLOCK_H