8bd65fb0b69d1ce6321d497de2960147cd46e493
[libcds.git] / cds / lock / spinlock.h
1 //$$CDS-header$$-2
2
3 #ifndef __CDS_LOCK_SPINLOCK_H
4 #define __CDS_LOCK_SPINLOCK_H
5
6 /*
7     Defines spin-lock primitives
8     Editions:
9         2012.01.23  1.1.0 khizmax     Refactoring: use C++11 atomics
10         2010.01.22  0.6.0 khizmax     Refactoring: use cds::atomic namespace
11                                       Explicit memory ordering specification (atomic::memory_order_xxx)
12         2006              khizmax     Created
13 */
14
15 #include <cds/cxx11_atomic.h>
16 #include <cds/os/thread.h>
17 #include <cds/algo/backoff_strategy.h>
18 #include <cds/lock/scoped_lock.h>
19
20 #include <cds/details/noncopyable.h>
21
22 namespace cds {
23     /// Synchronization primitives
24     namespace lock {
25         /// Spin lock.
26         /**
27             Simple and light-weight spin-lock critical section
28             It is useful to gain access to small (short-timed) code
29
30             Algorithm:
31
32                 TATAS (test-and-test-and-lock)
33                 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
34
35             No serialization performed - any of waiting threads may owns the spin-lock.
36             This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
37             The method unlock() can call any thread
38
39             DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
40                 - double lock attempt encountered by same thread (deadlock)
41                 - unlock by another thread
42
43             If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
44             thread and repeats lock attempts later
45
46             Template parameters:
47                 - @p Backoff    backoff strategy. Used when spin lock is locked
48         */
49         template <typename Backoff >
50         class Spinlock
51         {
52         public:
53             typedef        Backoff      backoff_strategy    ;        ///< back-off strategy type
54         private:
55             atomics::atomic<bool>    m_spin  ;       ///< Spin
56 #    ifdef CDS_DEBUG
57             typename OS::ThreadId    m_dbgOwnerId        ;       ///< Owner thread id (only for debug mode)
58 #    endif
59
60         public:
61             /// Construct free (unlocked) spin-lock
62             Spinlock() CDS_NOEXCEPT
63 #    ifdef CDS_DEBUG
64                 :m_dbgOwnerId( OS::c_NullThreadId )
65 #    endif
66             {
67                 m_spin.store( false, atomics::memory_order_relaxed );
68             }
69
70             /// Construct spin-lock in specified state
71             /**
72                 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
73             */
74             Spinlock( bool bLocked ) CDS_NOEXCEPT
75 #    ifdef CDS_DEBUG
76                 : m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::c_NullThreadId )
77 #    endif
78             {
79                 m_spin.store( bLocked, atomics::memory_order_relaxed );
80             }
81
82             /// Dummy copy constructor
83             /**
84                 In theory, spin-lock cannot be copied. However, it is not practical.
85                 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
86                 initializes the spin to free (unlocked) state like default ctor.
87             */
88             Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
89                 : m_spin( false )
90 #   ifdef CDS_DEBUG
91                 , m_dbgOwnerId( OS::c_NullThreadId )
92 #   endif
93             {}
94
95             /// Destructor. On debug time it checks whether spin-lock is free
96             ~Spinlock()
97             {
98                 assert( !m_spin.load( atomics::memory_order_relaxed ) );
99             }
100
101             /// Check if the spin is locked
102             bool is_locked() const CDS_NOEXCEPT
103             {
104                 return m_spin.load( atomics::memory_order_relaxed );
105             }
106
107             /// Try to lock the object
108             /**
109                 Returns \p true if locking is succeeded
110                 otherwise (if the spin is already locked) returns \p false
111
112                 Debug version: deadlock can be detected
113             */
114             bool try_lock() CDS_NOEXCEPT
115             {
116                 bool bCurrent = false;
117                 m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
118
119                 CDS_DEBUG_ONLY(
120                     if ( !bCurrent ) {
121                         m_dbgOwnerId = OS::getCurrentThreadId();
122                     }
123                 )
124                 return !bCurrent;
125             }
126
127             /// Try to lock the object, repeat @p nTryCount times if failed
128             /**
129                 Returns \p true if locking is succeeded
130                 otherwise (if the spin is already locked) returns \p false
131             */
132             bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT( noexcept( backoff_strategy()() ) )
133             {
134                 backoff_strategy backoff;
135                 while ( nTryCount-- ) {
136                     if ( try_lock() )
137                         return true;
138                     backoff();
139                 }
140                 return false;
141             }
142
143             /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
144             void lock() CDS_NOEXCEPT(noexcept( backoff_strategy()() ))
145             {
146                 backoff_strategy backoff;
147
148                 // Deadlock detected
149                 assert( m_dbgOwnerId != OS::getCurrentThreadId() );
150
151                 // TATAS algorithm
152                 while ( !try_lock() ) {
153                     while ( m_spin.load( atomics::memory_order_relaxed ) ) {
154                         backoff();
155                     }
156                 }
157                 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
158             }
159
160             /// Unlock the spin-lock. Debug version: deadlock may be detected
161             void unlock() CDS_NOEXCEPT
162             {
163                 assert( m_spin.load( atomics::memory_order_relaxed ) );
164
165                 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
166                 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
167
168                 m_spin.store( false, atomics::memory_order_release );
169             }
170         };
171
172         /// Spin-lock implementation default for the current platform
173         typedef Spinlock<backoff::LockDefault >                 Spin;
174
175         /// Recursive spin lock.
176         /**
177             Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
178
179             Template parameters:
180                 - @p Integral       one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
181                 - @p Backoff        backoff strategy. Used when spin lock is locked
182         */
183         template <typename Integral, class Backoff>
184         class ReentrantSpinT
185         {
186             typedef OS::ThreadId    thread_id    ;        ///< The type of thread id
187
188         public:
189             typedef Integral        integral_type       ; ///< The integral type
190             typedef Backoff         backoff_strategy    ; ///< The backoff type
191
192         private:
193             atomics::atomic<integral_type>   m_spin      ; ///< spin-lock atomic
194             thread_id                        m_OwnerId   ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
195
196         private:
197             //@cond
198             void take( thread_id tid ) CDS_NOEXCEPT
199             {
200                 m_OwnerId = tid;
201             }
202
203             void free() CDS_NOEXCEPT
204             {
205                 m_OwnerId = OS::c_NullThreadId;
206             }
207
208             bool is_taken( thread_id tid ) const CDS_NOEXCEPT
209             {
210                 return m_OwnerId == tid;
211             }
212
213             bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
214             {
215                 if ( is_taken( tid )) {
216                     m_spin.fetch_add( 1, atomics::memory_order_relaxed );
217                     return true;
218                 }
219                 return false;
220             }
221
222             bool try_acquire() CDS_NOEXCEPT
223             {
224                 integral_type nCurrent = 0;
225                 return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
226             }
227
228             bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
229             {
230                 backoff_strategy bkoff;
231
232                 while ( nTryCount-- ) {
233                     if ( try_acquire() )
234                         return true;
235                     bkoff();
236                 }
237                 return false;
238             }
239
240             void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
241             {
242                 // TATAS algorithm
243                 backoff_strategy bkoff;
244                 while ( !try_acquire() ) {
245                     while ( m_spin.load( atomics::memory_order_relaxed ) )
246                         bkoff();
247                 }
248             }
249             //@endcond
250
251         public:
252             /// Default constructor initializes spin to free (unlocked) state
253             ReentrantSpinT() CDS_NOEXCEPT
254                 : m_spin(0)
255                 , m_OwnerId( OS::c_NullThreadId )
256             {}
257
258             /// Dummy copy constructor
259             /**
260                 In theory, spin-lock cannot be copied. However, it is not practical.
261                 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
262                 initializes the spin to free (unlocked) state like default ctor.
263             */
264             ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
265                 : m_spin(0)
266                 , m_OwnerId( OS::c_NullThreadId )
267             {}
268
269             /// Construct object for specified state
270             ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
271                 : m_spin(0)
272                 , m_OwnerId( OS::c_NullThreadId )
273             {
274                 if ( bLocked )
275                     lock();
276             }
277
278             /// Checks if the spin is locked
279             /**
280                 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
281                 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
282             */
283             bool is_locked() const CDS_NOEXCEPT
284             {
285                 return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::getCurrentThreadId() ));
286             }
287
288             /// Try to lock the spin-lock (synonym for \ref try_lock)
289             bool try_lock() CDS_NOEXCEPT
290             {
291                 thread_id tid = OS::getCurrentThreadId();
292                 if ( try_taken_lock( tid ) )
293                     return true;
294                 if ( try_acquire()) {
295                     take( tid );
296                     return true;
297                 }
298                 return false;
299             }
300
301             /// Try to lock the object
302             bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
303             {
304                 thread_id tid = OS::getCurrentThreadId();
305                 if ( try_taken_lock( tid ) )
306                     return true;
307                 if ( try_acquire( nTryCount )) {
308                     take( tid );
309                     return true;
310                 }
311                 return false;
312             }
313
314             /// Lock the object waits if it is busy
315             void lock() CDS_NOEXCEPT
316             {
317                 thread_id tid = OS::getCurrentThreadId();
318                 if ( !try_taken_lock( tid ) ) {
319                     acquire();
320                     take( tid );
321                 }
322             }
323
324             /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
325             bool unlock() CDS_NOEXCEPT
326             {
327                 if ( is_taken( OS::getCurrentThreadId() ) ) {
328                     integral_type n = m_spin.load( atomics::memory_order_relaxed );
329                     if ( n > 1 )
330                         m_spin.store( n - 1, atomics::memory_order_relaxed );
331                     else {
332                         free();
333                         m_spin.store( 0, atomics::memory_order_release );
334                     }
335                     return true;
336                 }
337                 return false;
338             }
339
340             /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
341             bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
342             {
343                 if ( is_taken( OS::getCurrentThreadId() ) ) {
344                     assert( newOwnerId != OS::c_NullThreadId );
345                     m_OwnerId = newOwnerId;
346                     return true;
347                 }
348                 return false;
349             }
350         };
351
352         /// Recursive spin-lock based on atomic32u_t
353         typedef ReentrantSpinT<uint32_t, backoff::LockDefault>   ReentrantSpin32;
354
355         /// Recursive spin-lock based on atomic64u_t type
356         typedef ReentrantSpinT<uint64_t, backoff::LockDefault>   ReentrantSpin64;
357
358         /// Recursive spin-lock based on atomic32_t type
359         typedef ReentrantSpin32                                     ReentrantSpin;
360
361         /// The best (for the current platform) auto spin-lock
362         typedef scoped_lock<Spin>   AutoSpin;
363
364     }    // namespace lock
365
366     /// Standard (best for the current platform) spin-lock implementation
367     typedef lock::Spin              SpinLock;
368
369     /// Standard (best for the current platform) recursive spin-lock implementation
370     typedef lock::ReentrantSpin     RecursiveSpinLock;
371
372     /// 32bit recursive spin-lock shortcut
373     typedef lock::ReentrantSpin32   RecursiveSpinLock32;
374
375     /// 64bit recursive spin-lock shortcut
376     typedef lock::ReentrantSpin64   RecursiveSpinLock64;
377
378     /// Auto spin-lock shortcut
379     typedef lock::AutoSpin          AutoSpinLock;
380
381 } // namespace cds
382
383 #endif  // #ifndef __CDS_LOCK_SPINLOCK_H