Merge branch 'flat_combinig_add_stress_and_unint_tests' of https://github.com/mgalimu...
[libcds.git] / cds / sync / pool_monitor.h
index f38edc907ebaba2a0f06ec0d8d9e7b05ab960057..1b73b4042b166fb6e4ff4cf433c1570e8e39d6a0 100644 (file)
@@ -1,4 +1,32 @@
-//$$CDS-header$$
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 
 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
 #define CDSLIB_SYNC_POOL_MONITOR_H
 
 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
 #define CDSLIB_SYNC_POOL_MONITOR_H
@@ -33,17 +61,17 @@ namespace cds { namespace sync {
             typedef Counter event_counter; ///< measure type
 
             event_counter m_nLockCount;         ///< Number of monitor \p lock() call
             typedef Counter event_counter; ///< measure type
 
             event_counter m_nLockCount;         ///< Number of monitor \p lock() call
-            event_counter m_nUnlockCount;       ///< Number of monitor \p unlock call
+            event_counter m_nUnlockCount;       ///< Number of monitor \p unlock() call
             event_counter m_nMaxLocked;         ///< Max number of simuntaneously locked mutexes
             event_counter m_nLockContention;    ///< Number of \p lock() contenton
             event_counter m_nUnlockContention;  ///< Number of \p unlock() contention
             event_counter m_nLockAllocation;    ///< Number of the lock allocation from the pool
             event_counter m_nLockDeallocation;  ///< Number of the lock deallocation
             event_counter m_nMaxLocked;         ///< Max number of simuntaneously locked mutexes
             event_counter m_nLockContention;    ///< Number of \p lock() contenton
             event_counter m_nUnlockContention;  ///< Number of \p unlock() contention
             event_counter m_nLockAllocation;    ///< Number of the lock allocation from the pool
             event_counter m_nLockDeallocation;  ///< Number of the lock deallocation
-            event_counter m_nMaxAllocated;      ///< Max number of sumultanouusly allocated mutexes
+            event_counter m_nMaxAllocated;      ///< Max number of sumultaneously allocated mutexes
 
             //@cond
             void onLock()
 
             //@cond
             void onLock()
-            { 
+            {
                 ++m_nLockCount;
                 int nDiff = static_cast<int>( m_nLockCount.get() - m_nUnlockCount.get());
                 if ( nDiff > 0 && m_nMaxLocked.get() < static_cast<typename event_counter::value_type>( nDiff ))
                 ++m_nLockCount;
                 int nDiff = static_cast<int>( m_nLockCount.get() - m_nUnlockCount.get());
                 if ( nDiff > 0 && m_nMaxLocked.get() < static_cast<typename event_counter::value_type>( nDiff ))
@@ -53,10 +81,10 @@ namespace cds { namespace sync {
             void onLockContention()     { ++m_nLockContention;  }
             void onUnlockContention()   { ++m_nUnlockContention;}
             void onLockAllocation()
             void onLockContention()     { ++m_nLockContention;  }
             void onUnlockContention()   { ++m_nUnlockContention;}
             void onLockAllocation()
-            { 
-                ++m_nLockAllocation;  
+            {
+                ++m_nLockAllocation;
                 int nDiff = static_cast<int>( m_nLockAllocation.get() - m_nLockDeallocation.get());
                 int nDiff = static_cast<int>( m_nLockAllocation.get() - m_nLockDeallocation.get());
-                if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ) )
+                if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ))
                     m_nMaxAllocated = static_cast<typename event_counter::value_type>( nDiff );
             }
             void onLockDeallocation()   { ++m_nLockDeallocation;}
                     m_nMaxAllocated = static_cast<typename event_counter::value_type>( nDiff );
             }
             void onLockDeallocation()   { ++m_nLockDeallocation;}
@@ -70,7 +98,7 @@ namespace cds { namespace sync {
         The monitor is intended for reducing the number of system mutexes for
         huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
         only when container's node should be locked. Lifetime of node's mutex is managed by
         The monitor is intended for reducing the number of system mutexes for
         huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
         only when container's node should be locked. Lifetime of node's mutex is managed by
-        reference counter. When the reference counter to node's mutex becomes zero, 
+        reference counter. When the reference counter to node's mutex becomes zero,
         the mutex is given back to the pool.
 
         The monitor is blocked: the access to node's mutex is performed under the spin-lock.
         the mutex is given back to the pool.
 
         The monitor is blocked: the access to node's mutex is performed under the spin-lock.
@@ -94,18 +122,18 @@ namespace cds { namespace sync {
     public:
         typedef LockPool pool_type; ///< Pool type
         typedef typename pool_type::value_type lock_type; ///< node lock type
     public:
         typedef LockPool pool_type; ///< Pool type
         typedef typename pool_type::value_type lock_type; ///< node lock type
-        typedef typename std::conditional< 
-            std::is_same< BackOff, cds::opt::none >::value, 
+        typedef typename std::conditional<
+            std::is_same< BackOff, cds::opt::none >::value,
             cds::backoff::yield,
             BackOff
         >::type  back_off;  ///< back-off strategy for spinning
         typedef uint32_t refspin_type;  ///< Reference counter + spin-lock bit
 
         /// Internal statistics
             cds::backoff::yield,
             BackOff
         >::type  back_off;  ///< back-off strategy for spinning
         typedef uint32_t refspin_type;  ///< Reference counter + spin-lock bit
 
         /// Internal statistics
-        typedef typename std::conditional< 
-            Stat, 
-            typename pool_monitor_traits::stat<>, 
-            typename pool_monitor_traits::empty_stat 
+        typedef typename std::conditional<
+            Stat,
+            typename pool_monitor_traits::stat<>,
+            typename pool_monitor_traits::empty_stat
         >::type internal_stat;
 
         /// Pool's default capacity
         >::type internal_stat;
 
         /// Pool's default capacity
@@ -127,6 +155,7 @@ namespace cds { namespace sync {
             mutable atomics::atomic<refspin_type>   m_RefSpin;  ///< Spin-lock for \p m_pLock (bit 0) + reference counter
             mutable lock_type *                     m_pLock;    ///< Node-level lock
 
             mutable atomics::atomic<refspin_type>   m_RefSpin;  ///< Spin-lock for \p m_pLock (bit 0) + reference counter
             mutable lock_type *                     m_pLock;    ///< Node-level lock
 
+            //@cond
             node_injection()
                 : m_RefSpin( 0 )
                 , m_pLock( nullptr )
             node_injection()
                 : m_RefSpin( 0 )
                 , m_pLock( nullptr )
@@ -137,6 +166,12 @@ namespace cds { namespace sync {
                 assert( m_pLock == nullptr );
                 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
             }
                 assert( m_pLock == nullptr );
                 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
             }
+
+            bool check_free() const
+            {
+                return m_pLock == nullptr && m_RefSpin.load( atomics::memory_order_acquire ) == 0;
+            }
+            //@endcond
         };
 
         /// Initializes the pool of 256 preallocated mutexes
         };
 
         /// Initializes the pool of 256 preallocated mutexes
@@ -160,7 +195,7 @@ namespace cds { namespace sync {
             // try lock spin and increment reference counter
             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
             if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
             // try lock spin and increment reference counter
             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
             if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
-                atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) 
+                atomics::memory_order_acquire, atomics::memory_order_relaxed ))
             {
                 back_off bkoff;
                 do {
             {
                 back_off bkoff;
                 do {
@@ -175,7 +210,9 @@ namespace cds { namespace sync {
             // If the node has no lock, allocate it from pool
             pLock = p.m_SyncMonitorInjection.m_pLock;
             if ( !pLock ) {
             // If the node has no lock, allocate it from pool
             pLock = p.m_SyncMonitorInjection.m_pLock;
             if ( !pLock ) {
+                assert( cur == 0 );
                 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
                 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
+                assert( pLock != nullptr );
                 m_Stat.onLockAllocation();
             }
 
                 m_Stat.onLockAllocation();
             }
 
@@ -199,15 +236,15 @@ namespace cds { namespace sync {
 
             // try lock spin
             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
 
             // try lock spin
             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
-            if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
-                atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) 
+            if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
+                atomics::memory_order_acquire, atomics::memory_order_relaxed ))
             {
                 back_off bkoff;
                 do {
                     m_Stat.onUnlockContention();
                     bkoff();
                     cur &= ~c_nSpinBit;
             {
                 back_off bkoff;
                 do {
                     m_Stat.onUnlockContention();
                     bkoff();
                     cur &= ~c_nSpinBit;
-                } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
+                } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
                     atomics::memory_order_acquire, atomics::memory_order_relaxed ));
             }
 
                     atomics::memory_order_acquire, atomics::memory_order_relaxed ));
             }
 
@@ -236,9 +273,9 @@ namespace cds { namespace sync {
         /// Returns the reference to internal statistics
         /**
             If class' template argument \p Stat is \p false,
         /// Returns the reference to internal statistics
         /**
             If class' template argument \p Stat is \p false,
-            the function returns \ref empty_stat "dummy statistics".
-            Otherwise, it returns the reference to monitor's internal statistics 
-            of type \ref stat.
+            the function returns \ref pool_monitor_traits::empty_stat "dummy statistics".
+            Otherwise, it returns the reference to monitor's internal statistics
+            of type \ref pool_monitor_traits::stat.
         */
         internal_stat const& statistics() const
         {
         */
         internal_stat const& statistics() const
         {