3 #ifndef __CDS_LOCK_SPINLOCK_H
4 #define __CDS_LOCK_SPINLOCK_H
7 Defines spin-lock primitives
9 2012.01.23 1.1.0 khizmax Refactoring: use C++11 atomics
10 2010.01.22 0.6.0 khizmax Refactoring: use cds::atomic namespace
11 Explicit memory ordering specification (atomic::memory_order_xxx)
15 #include <cds/cxx11_atomic.h>
16 #include <cds/os/thread.h>
17 #include <cds/algo/backoff_strategy.h>
18 #include <cds/lock/scoped_lock.h>
20 #include <cds/details/noncopyable.h>
23 /// Synchronization primitives
27 Simple and light-weight spin-lock critical section
28 It is useful to gain access to small (short-timed) code
32 TATAS (test-and-test-and-lock)
33 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
35 No serialization performed - any of waiting threads may owns the spin-lock.
36 This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
37 The method unlock() can call any thread
39 DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
40 - double lock attempt encountered by same thread (deadlock)
41 - unlock by another thread
43 If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
44 thread and repeats lock attempts later
47 - @p Backoff backoff strategy. Used when spin lock is locked
49 template <typename Backoff >
53 typedef Backoff backoff_strategy ; ///< back-off strategy type
55 atomics::atomic<bool> m_spin ; ///< Spin
57 typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode)
61 /// Construct free (unlocked) spin-lock
62 Spinlock() CDS_NOEXCEPT
64 :m_dbgOwnerId( OS::c_NullThreadId )
67 m_spin.store( false, atomics::memory_order_relaxed );
70 /// Construct spin-lock in specified state
72 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
74 Spinlock( bool bLocked ) CDS_NOEXCEPT
76 : m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::c_NullThreadId )
79 m_spin.store( bLocked, atomics::memory_order_relaxed );
82 /// Dummy copy constructor
84 In theory, spin-lock cannot be copied. However, it is not practical.
85 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
86 initializes the spin to free (unlocked) state like default ctor.
88 Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
91 , m_dbgOwnerId( OS::c_NullThreadId )
95 /// Destructor. On debug time it checks whether spin-lock is free
98 assert( !m_spin.load( atomics::memory_order_relaxed ) );
101 /// Check if the spin is locked
102 bool is_locked() const CDS_NOEXCEPT
104 return m_spin.load( atomics::memory_order_relaxed );
107 /// Try to lock the object
109 Returns \p true if locking is succeeded
110 otherwise (if the spin is already locked) returns \p false
112 Debug version: deadlock can be detected
114 bool try_lock() CDS_NOEXCEPT
116 bool bCurrent = false;
117 m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
121 m_dbgOwnerId = OS::getCurrentThreadId();
127 /// Try to lock the object, repeat @p nTryCount times if failed
129 Returns \p true if locking is succeeded
130 otherwise (if the spin is already locked) returns \p false
132 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT( noexept( backoff_strategy()() ) )
134 backoff_strategy backoff;
135 while ( nTryCount-- ) {
143 /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
144 void lock() CDS_NOEXCEPT(noexept( backoff_strategy()() ))
146 backoff_strategy backoff;
149 assert( m_dbgOwnerId != OS::getCurrentThreadId() );
152 while ( !try_lock() ) {
153 while ( m_spin.load( atomics::memory_order_relaxed ) ) {
157 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
160 /// Unlock the spin-lock. Debug version: deadlock may be detected
161 void unlock() CDS_NOEXCEPT
163 assert( m_spin.load( atomics::memory_order_relaxed ) );
165 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
166 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
168 m_spin.store( false, atomics::memory_order_release );
172 /// Spin-lock implementation default for the current platform
173 typedef Spinlock<backoff::LockDefault > Spin;
175 /// Recursive spin lock.
177 Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
180 - @p Integral one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
181 - @p Backoff backoff strategy. Used when spin lock is locked
183 template <typename Integral, class Backoff>
186 typedef OS::ThreadId thread_id ; ///< The type of thread id
189 typedef Integral integral_type ; ///< The integral type
190 typedef Backoff backoff_strategy ; ///< The backoff type
193 atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
194 thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
198 void take( thread_id tid ) CDS_NOEXCEPT
203 void free() CDS_NOEXCEPT
205 m_OwnerId = OS::c_NullThreadId;
208 bool is_taken( thread_id tid ) const CDS_NOEXCEPT
210 return m_OwnerId == tid;
213 bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
215 if ( is_taken( tid )) {
216 m_spin.fetch_add( 1, atomics::memory_order_relaxed );
222 bool try_acquire() CDS_NOEXCEPT
224 integral_type nCurrent = 0;
225 return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
228 bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
230 backoff_strategy bkoff;
232 while ( nTryCount-- ) {
240 void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
243 backoff_strategy bkoff;
244 while ( !try_acquire() ) {
245 while ( m_spin.load( atomics::memory_order_relaxed ) )
252 /// Default constructor initializes spin to free (unlocked) state
253 ReentrantSpinT() CDS_NOEXCEPT
255 , m_OwnerId( OS::c_NullThreadId )
258 /// Dummy copy constructor
260 In theory, spin-lock cannot be copied. However, it is not practical.
261 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
262 initializes the spin to free (unlocked) state like default ctor.
264 ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
266 , m_OwnerId( OS::c_NullThreadId )
269 /// Construct object for specified state
270 ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
272 , m_OwnerId( OS::c_NullThreadId )
278 /// Checks if the spin is locked
280 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
281 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
283 bool is_locked() const CDS_NOEXCEPT
285 return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::getCurrentThreadId() ));
288 /// Try to lock the spin-lock (synonym for \ref try_lock)
289 bool try_lock() CDS_NOEXCEPT
291 thread_id tid = OS::getCurrentThreadId();
292 if ( try_taken_lock( tid ) )
294 if ( try_acquire()) {
301 /// Try to lock the object
302 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
304 thread_id tid = OS::getCurrentThreadId();
305 if ( try_taken_lock( tid ) )
307 if ( try_acquire( nTryCount )) {
314 /// Lock the object waits if it is busy
315 void lock() CDS_NOEXCEPT
317 thread_id tid = OS::getCurrentThreadId();
318 if ( !try_taken_lock( tid ) ) {
324 /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
325 bool unlock() CDS_NOEXCEPT
327 if ( is_taken( OS::getCurrentThreadId() ) ) {
328 integral_type n = m_spin.load( atomics::memory_order_relaxed );
330 m_spin.store( n - 1, atomics::memory_order_relaxed );
333 m_spin.store( 0, atomics::memory_order_release );
340 /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
341 bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
343 if ( is_taken( OS::getCurrentThreadId() ) ) {
344 assert( newOwnerId != OS::c_NullThreadId );
345 m_OwnerId = newOwnerId;
352 /// Recursive spin-lock based on atomic32u_t
353 typedef ReentrantSpinT<uint32_t, backoff::LockDefault> ReentrantSpin32;
355 /// Recursive spin-lock based on atomic64u_t type
356 typedef ReentrantSpinT<uint64_t, backoff::LockDefault> ReentrantSpin64;
358 /// Recursive spin-lock based on atomic32_t type
359 typedef ReentrantSpin32 ReentrantSpin;
361 /// The best (for the current platform) auto spin-lock
362 typedef scoped_lock<Spin> AutoSpin;
366 /// Standard (best for the current platform) spin-lock implementation
367 typedef lock::Spin SpinLock;
369 /// Standard (best for the current platform) recursive spin-lock implementation
370 typedef lock::ReentrantSpin RecursiveSpinLock;
372 /// 32bit recursive spin-lock shortcut
373 typedef lock::ReentrantSpin32 RecursiveSpinLock32;
375 /// 64bit recursive spin-lock shortcut
376 typedef lock::ReentrantSpin64 RecursiveSpinLock64;
378 /// Auto spin-lock shortcut
379 typedef lock::AutoSpin AutoSpinLock;
383 #endif // #ifndef __CDS_LOCK_SPINLOCK_H