Uses different pass count for different parallel queue test cases
[libcds.git] / cds / sync / spinlock.h
index 753fe07e7ae1d98995aa4025889bdaa1fb560ef8..f3a915840bcfc3dfe8b5940ce72aa38a82e346be 100644 (file)
@@ -1,4 +1,32 @@
-//$$CDS-header$$-2
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 
 #ifndef CDSLIB_SYNC_SPINLOCK_H
 #define CDSLIB_SYNC_SPINLOCK_H
@@ -21,18 +49,18 @@ namespace cds {
                 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
 
             No serialization performed - any of waiting threads may owns the spin-lock.
-            This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
-            The method unlock() can call any thread
+            This spin-lock is NOT recursive: the thread owned the lock cannot call \p lock() method without deadlock.
+            The method \p unlock() can call any thread
 
             DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
                 - double lock attempt encountered by same thread (deadlock)
                 - unlock by another thread
 
-            If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
+            If spin-lock is locked the \p Backoff algorithm is called. Predefined \p backoff::LockDefault class yields current
             thread and repeats lock attempts later
 
             Template parameters:
-                - @p Backoff    backoff strategy. Used when spin lock is locked
+                - \p Backoff - backoff strategy. Used when spin lock is locked
         */
         template <typename Backoff >
         class spin_lock
@@ -49,41 +77,42 @@ namespace cds {
             /// Construct free (unlocked) spin-lock
             spin_lock() CDS_NOEXCEPT
 #    ifdef CDS_DEBUG
-                :m_dbgOwnerId( OS::c_NullThreadId )
+                : m_dbgOwnerId( OS::c_NullThreadId )
 #    endif
             {
-                m_spin.store( false, atomics::memory_order_relaxed );
+                m_spin.store( false, atomics::memory_order_release );
             }
 
             /// Construct spin-lock in specified state
             /**
                 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
             */
-            spin_lock( bool bLocked ) CDS_NOEXCEPT
+            explicit spin_lock( bool bLocked ) CDS_NOEXCEPT
 #    ifdef CDS_DEBUG
                 : m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
 #    endif
             {
-                m_spin.store( bLocked, atomics::memory_order_relaxed );
+                m_spin.store( bLocked, atomics::memory_order_release );
             }
 
             /// Dummy copy constructor
             /**
-                In theory, spin-lock cannot be copied. However, it is not practical.
-                Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
-                initializes the spin to free (unlocked) state like default ctor.
+                The ctor initializes the spin to free (unlocked) state like the default ctor.
             */
             spin_lock(const spin_lock<Backoff>& ) CDS_NOEXCEPT
                 : m_spin( false )
 #   ifdef CDS_DEBUG
                 , m_dbgOwnerId( cds::OS::c_NullThreadId )
 #   endif
-            {}
+            {
+                CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
+            }
 
             /// Destructor. On debug time it checks whether spin-lock is free
             ~spin_lock()
             {
-                assert( !m_spin.load( atomics::memory_order_relaxed ) );
+                assert( !m_spin.load( atomics::memory_order_relaxed ));
+                CDS_TSAN_ANNOTATE_MUTEX_DESTROY( this );
             }
 
             /// Check if the spin is locked
@@ -101,8 +130,13 @@ namespace cds {
             */
             bool try_lock() CDS_NOEXCEPT
             {
-                bool bCurrent = false;
-                m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
+#           ifdef CDS_THREAD_SANITIZER_ENABLED
+                bool bCurrent = m_spin.exchange( true, atomics::memory_order_acq_rel );
+                if ( !bCurrent )
+                    CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( this );
+#           else
+                bool bCurrent = m_spin.exchange( true, atomics::memory_order_acquire );
+#           endif
 
                 CDS_DEBUG_ONLY(
                     if ( !bCurrent ) {
@@ -112,16 +146,16 @@ namespace cds {
                 return !bCurrent;
             }
 
-            /// Try to lock the object, repeat @p nTryCount times if failed
+            /// Try to lock the object, repeat \p nTryCount times if failed
             /**
                 Returns \p true if locking is succeeded
                 otherwise (if the spin is already locked) returns \p false
             */
-            bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ) )
+            bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
             {
                 backoff_strategy backoff;
                 while ( nTryCount-- ) {
-                    if ( try_lock() )
+                    if ( try_lock())
                         return true;
                     backoff();
                 }
@@ -129,30 +163,32 @@ namespace cds {
             }
 
             /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
-            void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()() ))
+            void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()()))
             {
                 backoff_strategy backoff;
 
                 // Deadlock detected
-                assert( m_dbgOwnerId != OS::get_current_thread_id() );
+                CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
+                assert( m_dbgOwnerId != OS::get_current_thread_id());
+                CDS_TSAN_ANNOTATE_IGNORE_READS_END;
 
                 // TATAS algorithm
-                while ( !try_lock() ) {
-                    while ( m_spin.load( atomics::memory_order_relaxed ) ) {
+                while ( !try_lock()) {
+                    while ( m_spin.load( atomics::memory_order_acquire ))
                         backoff();
-                    }
                 }
-                assert( m_dbgOwnerId == OS::get_current_thread_id() );
+
+                assert( m_dbgOwnerId == OS::get_current_thread_id());
             }
 
             /// Unlock the spin-lock. Debug version: deadlock may be detected
             void unlock() CDS_NOEXCEPT
             {
-                assert( m_spin.load( atomics::memory_order_relaxed ) );
-
-                assert( m_dbgOwnerId == OS::get_current_thread_id() );
+                assert( m_spin.load( atomics::memory_order_relaxed ));
+                assert( m_dbgOwnerId == OS::get_current_thread_id());
                 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
 
+                CDS_TSAN_ANNOTATE_MUTEX_RELEASED( this );
                 m_spin.store( false, atomics::memory_order_release );
             }
         };
@@ -165,21 +201,23 @@ namespace cds {
             Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
 
             Template parameters:
-                - @p Integral       one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
-                - @p Backoff        backoff strategy. Used when spin lock is locked
+                - \p Integral       one of integral atomic type: <tt>unsigned int</tt>, \p int, and others
+                - \p Backoff        backoff strategy. Used when spin lock is locked
         */
         template <typename Integral, class Backoff>
         class reentrant_spin_lock
         {
-            typedef OS::ThreadId    thread_id    ;        ///< The type of thread id
+            typedef OS::ThreadId    thread_id;          ///< The type of thread id
 
         public:
-            typedef Integral        integral_type       ; ///< The integral type
-            typedef Backoff         backoff_strategy    ; ///< The backoff type
+            typedef Integral        integral_type;      ///< The integral type
+            typedef Backoff         backoff_strategy;   ///< The backoff type
 
         private:
-            atomics::atomic<integral_type>   m_spin      ; ///< spin-lock atomic
-            thread_id                        m_OwnerId   ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
+            //@cond
+            atomics::atomic<integral_type>  m_spin;    ///< spin-lock atomic
+            thread_id                       m_OwnerId; ///< Owner thread id. If spin-lock is not locked it usually equals to \p OS::c_NullThreadId
+            //@endcond
 
         private:
             //@cond
@@ -210,27 +248,34 @@ namespace cds {
             bool try_acquire() CDS_NOEXCEPT
             {
                 integral_type nCurrent = 0;
-                return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
+                bool bRet = m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_acquire );
+
+#           ifdef CDS_THREAD_SANITIZER_ENABLED
+                if ( bRet )
+                    CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( this );
+#           endif
+
+                return bRet;
             }
 
-            bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
+            bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
             {
                 backoff_strategy bkoff;
 
                 while ( nTryCount-- ) {
-                    if ( try_acquire() )
+                    if ( try_acquire())
                         return true;
                     bkoff();
                 }
                 return false;
             }
 
-            void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
+            void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
             {
                 // TATAS algorithm
                 backoff_strategy bkoff;
-                while ( !try_acquire() ) {
-                    while ( m_spin.load( atomics::memory_order_relaxed ) )
+                while ( !try_acquire()) {
+                    while ( m_spin.load( atomics::memory_order_acquire ))
                         bkoff();
                 }
             }
@@ -241,7 +286,9 @@ namespace cds {
             reentrant_spin_lock() CDS_NOEXCEPT
                 : m_spin(0)
                 , m_OwnerId( OS::c_NullThreadId )
-            {}
+            {
+                CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
+            }
 
             /// Dummy copy constructor
             /**
@@ -252,17 +299,29 @@ namespace cds {
             reentrant_spin_lock( const reentrant_spin_lock<Integral, Backoff>& ) CDS_NOEXCEPT
                 : m_spin(0)
                 , m_OwnerId( OS::c_NullThreadId )
-            {}
+            {
+                CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
+            }
 
-            /// Construct object for specified state
-            reentrant_spin_lock( bool bLocked ) CDS_NOEXCEPT
+            /// Construct object in specified state
+            explicit reentrant_spin_lock( bool bLocked )
                 : m_spin(0)
                 , m_OwnerId( OS::c_NullThreadId )
             {
+                CDS_TSAN_ANNOTATE_MUTEX_CREATE( this );
                 if ( bLocked )
                     lock();
             }
 
+            /// Dtor. Spin-lock must be unlocked
+            ~reentrant_spin_lock()
+            {
+                assert( m_spin.load( atomics::memory_order_acquire ) == 0 );
+                assert( m_OwnerId == OS::c_NullThreadId );
+
+                CDS_TSAN_ANNOTATE_MUTEX_DESTROY( this );
+            }
+
             /// Checks if the spin is locked
             /**
                 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
@@ -270,14 +329,14 @@ namespace cds {
             */
             bool is_locked() const CDS_NOEXCEPT
             {
-                return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id() ));
+                return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id()));
             }
 
-            /// Try to lock the spin-lock (synonym for \ref try_lock)
-            bool try_lock() CDS_NOEXCEPT
+            /// Try to lock the spin-lock
+            bool try_lock() CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().try_acquire()))
             {
                 thread_id tid = OS::get_current_thread_id();
-                if ( try_taken_lock( tid ) )
+                if ( try_taken_lock( tid ))
                     return true;
                 if ( try_acquire()) {
                     take( tid );
@@ -286,11 +345,11 @@ namespace cds {
                 return false;
             }
 
-            /// Try to lock the object
-            bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
+            /// Try to lock up to \p nTryCount attempts
+            bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().try_acquire( nTryCount )))
             {
                 thread_id tid = OS::get_current_thread_id();
-                if ( try_taken_lock( tid ) )
+                if ( try_taken_lock( tid ))
                     return true;
                 if ( try_acquire( nTryCount )) {
                     take( tid );
@@ -300,40 +359,37 @@ namespace cds {
             }
 
             /// Lock the object waits if it is busy
-            void lock() CDS_NOEXCEPT
+            void lock() CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().acquire()))
             {
                 thread_id tid = OS::get_current_thread_id();
-                if ( !try_taken_lock( tid ) ) {
+                if ( !try_taken_lock( tid )) {
                     acquire();
                     take( tid );
                 }
             }
 
-            /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
-            bool unlock() CDS_NOEXCEPT
+            /// Unlock the spin-lock
+            void unlock() CDS_NOEXCEPT
             {
-                if ( is_taken( OS::get_current_thread_id() ) ) {
-                    integral_type n = m_spin.load( atomics::memory_order_relaxed );
-                    if ( n > 1 )
-                        m_spin.store( n - 1, atomics::memory_order_relaxed );
-                    else {
-                        free();
-                        m_spin.store( 0, atomics::memory_order_release );
-                    }
-                    return true;
+                assert( is_taken( OS::get_current_thread_id()));
+
+                integral_type n = m_spin.load( atomics::memory_order_relaxed );
+                if ( n > 1 )
+                    m_spin.store( n - 1, atomics::memory_order_relaxed );
+                else {
+                    free();
+                    CDS_TSAN_ANNOTATE_MUTEX_RELEASED( this );
+                    m_spin.store( 0, atomics::memory_order_release );
                 }
-                return false;
             }
 
-            /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
-            bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
+            /// Change the owner of locked spin-lock. May be called by thread that owns spin-lock
+            void change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
             {
-                if ( is_taken( OS::get_current_thread_id() ) ) {
-                    assert( newOwnerId != OS::c_NullThreadId );
-                    m_OwnerId = newOwnerId;
-                    return true;
-                }
-                return false;
+                assert( is_taken( OS::get_current_thread_id()));
+                assert( newOwnerId != OS::c_NullThreadId );
+
+                m_OwnerId = newOwnerId;
             }
         };