2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_SYNC_SPINLOCK_H
32 #define CDSLIB_SYNC_SPINLOCK_H
34 #include <cds/algo/atomic.h>
35 #include <cds/os/thread.h>
36 #include <cds/algo/backoff_strategy.h>
39 /// Synchronization primitives
43 Simple and light-weight spin-lock critical section
44 It is useful to gain access to small (short-timed) code
48 TATAS (test-and-test-and-lock)
49 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
51 No serialization performed - any of waiting threads may owns the spin-lock.
52 This spin-lock is NOT recursive: the thread owned the lock cannot call \p lock() method without deadlock.
53 The method \p unlock() can call any thread
55 DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
56 - double lock attempt encountered by same thread (deadlock)
57 - unlock by another thread
59 If spin-lock is locked the \p Backoff algorithm is called. Predefined \p backoff::LockDefault class yields current
60 thread and repeats lock attempts later
63 - \p Backoff - backoff strategy. Used when spin lock is locked
65 template <typename Backoff >
69 typedef Backoff backoff_strategy; ///< back-off strategy type
71 atomics::atomic<bool> m_spin; ///< Spin
73 typename OS::ThreadId m_dbgOwnerId; ///< Owner thread id (only for debug mode)
77 /// Construct free (unlocked) spin-lock
80 : m_dbgOwnerId( OS::c_NullThreadId )
83 m_spin.store( false, atomics::memory_order_release );
86 /// Construct spin-lock in specified state
88 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
90 explicit spin_lock( bool bLocked ) noexcept
92 : m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
95 m_spin.store( bLocked, atomics::memory_order_release );
98 /// Dummy copy constructor
100 The ctor initializes the spin to free (unlocked) state like the default ctor.
102 spin_lock(const spin_lock<Backoff>& ) noexcept
105 , m_dbgOwnerId( cds::OS::c_NullThreadId )
108 CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
111 /// Destructor. On debug time it checks whether spin-lock is free
114 assert( !m_spin.load( atomics::memory_order_relaxed ));
115 CDS_TSAN_ANNOTATE_MUTEX_DESTROY( this );
118 /// Check if the spin is locked
119 bool is_locked() const noexcept
121 return m_spin.load( atomics::memory_order_relaxed );
124 /// Try to lock the object
126 Returns \p true if locking is succeeded
127 otherwise (if the spin is already locked) returns \p false
129 Debug version: deadlock can be detected
131 bool try_lock() noexcept
133 # ifdef CDS_THREAD_SANITIZER_ENABLED
134 bool bCurrent = m_spin.exchange( true, atomics::memory_order_acq_rel );
136 CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( this );
138 bool bCurrent = m_spin.exchange( true, atomics::memory_order_acquire );
143 m_dbgOwnerId = OS::get_current_thread_id();
149 /// Try to lock the object, repeat \p nTryCount times if failed
151 Returns \p true if locking is succeeded
152 otherwise (if the spin is already locked) returns \p false
154 bool try_lock( unsigned int nTryCount ) noexcept( noexcept( backoff_strategy()()))
156 backoff_strategy backoff;
157 while ( nTryCount-- ) {
165 /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
166 void lock() noexcept(noexcept( backoff_strategy()()))
168 backoff_strategy backoff;
171 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
172 assert( m_dbgOwnerId != OS::get_current_thread_id());
173 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
176 while ( !try_lock()) {
177 while ( m_spin.load( atomics::memory_order_acquire ))
181 assert( m_dbgOwnerId == OS::get_current_thread_id());
184 /// Unlock the spin-lock. Debug version: deadlock may be detected
185 void unlock() noexcept
187 assert( m_spin.load( atomics::memory_order_relaxed ));
188 assert( m_dbgOwnerId == OS::get_current_thread_id());
189 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
191 CDS_TSAN_ANNOTATE_MUTEX_RELEASED( this );
192 m_spin.store( false, atomics::memory_order_release );
196 /// Spin-lock implementation default for the current platform
197 typedef spin_lock<backoff::LockDefault > spin;
199 /// Recursive spin lock.
201 Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
204 - \p Integral one of integral atomic type: <tt>unsigned int</tt>, \p int, and others
205 - \p Backoff backoff strategy. Used when spin lock is locked
207 template <typename Integral, class Backoff>
208 class reentrant_spin_lock
210 typedef OS::ThreadId thread_id; ///< The type of thread id
213 typedef Integral integral_type; ///< The integral type
214 typedef Backoff backoff_strategy; ///< The backoff type
218 atomics::atomic<integral_type> m_spin; ///< spin-lock atomic
219 thread_id m_OwnerId; ///< Owner thread id. If spin-lock is not locked it usually equals to \p OS::c_NullThreadId
224 void take( thread_id tid ) noexcept
231 m_OwnerId = OS::c_NullThreadId;
234 bool is_taken( thread_id tid ) const noexcept
236 return m_OwnerId == tid;
239 bool try_taken_lock( thread_id tid ) noexcept
241 if ( is_taken( tid )) {
242 m_spin.fetch_add( 1, atomics::memory_order_relaxed );
248 bool try_acquire() noexcept
250 integral_type nCurrent = 0;
251 bool bRet = m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_acquire );
253 # ifdef CDS_THREAD_SANITIZER_ENABLED
255 CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( this );
261 bool try_acquire( unsigned int nTryCount ) noexcept( noexcept( backoff_strategy()()))
263 backoff_strategy bkoff;
265 while ( nTryCount-- ) {
273 void acquire() noexcept( noexcept( backoff_strategy()()))
276 backoff_strategy bkoff;
277 while ( !try_acquire()) {
278 while ( m_spin.load( atomics::memory_order_acquire ))
285 /// Default constructor initializes spin to free (unlocked) state
286 reentrant_spin_lock() noexcept
288 , m_OwnerId( OS::c_NullThreadId )
290 CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
293 /// Dummy copy constructor
295 In theory, spin-lock cannot be copied. However, it is not practical.
296 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
297 initializes the spin to free (unlocked) state like default ctor.
299 reentrant_spin_lock( const reentrant_spin_lock<Integral, Backoff>& ) noexcept
301 , m_OwnerId( OS::c_NullThreadId )
303 CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
306 /// Construct object in specified state
307 explicit reentrant_spin_lock( bool bLocked )
309 , m_OwnerId( OS::c_NullThreadId )
311 CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
316 /// Dtor. Spin-lock must be unlocked
317 ~reentrant_spin_lock()
319 assert( m_spin.load( atomics::memory_order_acquire ) == 0 );
320 assert( m_OwnerId == OS::c_NullThreadId );
322 CDS_TSAN_ANNOTATE_MUTEX_DESTROY( this );
325 /// Checks if the spin is locked
327 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
328 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
330 bool is_locked() const noexcept
332 return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id()));
335 /// Try to lock the spin-lock
336 bool try_lock() noexcept( noexcept( std::declval<reentrant_spin_lock>().try_acquire()))
338 thread_id tid = OS::get_current_thread_id();
339 if ( try_taken_lock( tid ))
341 if ( try_acquire()) {
348 /// Try to lock up to \p nTryCount attempts
349 bool try_lock( unsigned int nTryCount ) noexcept( noexcept( std::declval<reentrant_spin_lock>().try_acquire( nTryCount )))
351 thread_id tid = OS::get_current_thread_id();
352 if ( try_taken_lock( tid ))
354 if ( try_acquire( nTryCount )) {
361 /// Lock the object waits if it is busy
362 void lock() noexcept( noexcept( std::declval<reentrant_spin_lock>().acquire()))
364 thread_id tid = OS::get_current_thread_id();
365 if ( !try_taken_lock( tid )) {
371 /// Unlock the spin-lock
372 void unlock() noexcept
374 assert( is_taken( OS::get_current_thread_id()));
376 integral_type n = m_spin.load( atomics::memory_order_relaxed );
378 m_spin.store( n - 1, atomics::memory_order_relaxed );
381 CDS_TSAN_ANNOTATE_MUTEX_RELEASED( this );
382 m_spin.store( 0, atomics::memory_order_release );
386 /// Change the owner of locked spin-lock. May be called by thread that owns spin-lock
387 void change_owner( OS::ThreadId newOwnerId ) noexcept
389 assert( is_taken( OS::get_current_thread_id()));
390 assert( newOwnerId != OS::c_NullThreadId );
392 m_OwnerId = newOwnerId;
396 /// Recursive 32bit spin-lock
397 typedef reentrant_spin_lock<uint32_t, backoff::LockDefault> reentrant_spin32;
399 /// Default recursive spin-lock
400 typedef reentrant_spin32 reentrant_spin;
402 /// Recursive 64bit spin-lock
403 typedef reentrant_spin_lock<uint64_t, backoff::LockDefault> reentrant_spin64;
407 #endif // #ifndef CDSLIB_SYNC_SPINLOCK_H