3 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
4 #define CDSLIB_SYNC_POOL_MONITOR_H
6 #include <cds/sync/monitor.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/backoff_strategy.h>
9 #include <cds/opt/options.h> // opt::none
11 namespace cds { namespace sync {
13 /// \p pool_monitor traits
14 struct pool_monitor_traits {
16 /// Dummy internal statistics if \p Stat template parameter is \p false
20 void onLock() const {}
21 void onUnlock() const {}
22 void onLockContention() const {}
23 void onUnlockContention() const {}
24 void onLockAllocation() const {}
25 void onLockDeallocation() const {}
29 /// Monitor's internal statistics, used if \p Stat template parameter is \p true
30 template <typename Counter = cds::atomicity::event_counter >
33 typedef Counter event_counter; ///< measure type
35 event_counter m_nLockCount; ///< Number of monitor \p lock() call
36 event_counter m_nUnlockCount; ///< Number of monitor \p unlock call
37 event_counter m_nMaxLocked; ///< Max number of simuntaneously locked mutexes
38 event_counter m_nLockContention; ///< Number of \p lock() contenton
39 event_counter m_nUnlockContention; ///< Number of \p unlock() contention
40 event_counter m_nLockAllocation; ///< Number of the lock allocation from the pool
41 event_counter m_nLockDeallocation; ///< Number of the lock deallocation
42 event_counter m_nMaxAllocated; ///< Max number of sumultanouusly allocated mutexes
48 int nDiff = static_cast<int>( m_nLockCount.get() - m_nUnlockCount.get());
49 if ( nDiff > 0 && m_nMaxLocked.get() < static_cast<typename event_counter::value_type>( nDiff ))
50 m_nMaxLocked = static_cast<typename event_counter::value_type>( nDiff );
52 void onUnlock() { ++m_nUnlockCount; }
53 void onLockContention() { ++m_nLockContention; }
54 void onUnlockContention() { ++m_nUnlockContention;}
55 void onLockAllocation()
58 int nDiff = static_cast<int>( m_nLockAllocation.get() - m_nLockDeallocation.get());
59 if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ) )
60 m_nMaxAllocated = static_cast<typename event_counter::value_type>( nDiff );
62 void onLockDeallocation() { ++m_nLockDeallocation;}
68 /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
70 The monitor is intended for reducing the number of system mutexes for
71 huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
72 only when container's node should be locked. Lifetime of node's mutex is managed by
73 reference counter. When the reference counter to node's mutex becomes zero,
74 the mutex is given back to the pool.
76 The monitor is blocked: the access to node's mutex is performed under the spin-lock.
77 However, node locking/unlocking is performed beyond the spin-lock.
80 - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
81 the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
82 - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::yield
83 - \p Stat - enable (\p true) or disable (\p false, the default) monitor's internal statistics.
87 typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
88 typedef cds::sync::pool_monitor< pool_type > sync_monitor;
91 template <class LockPool, typename BackOff = cds::backoff::yield, bool Stat = false >
95 typedef LockPool pool_type; ///< Pool type
96 typedef typename pool_type::value_type lock_type; ///< node lock type
97 typedef typename std::conditional<
98 std::is_same< BackOff, cds::opt::none >::value,
101 >::type back_off; ///< back-off strategy for spinning
102 typedef uint32_t refspin_type; ///< Reference counter + spin-lock bit
104 /// Internal statistics
105 typedef typename std::conditional<
107 typename pool_monitor_traits::stat<>,
108 typename pool_monitor_traits::empty_stat
109 >::type internal_stat;
111 /// Pool's default capacity
112 static CDS_CONSTEXPR size_t const c_nDefaultCapacity = 256;
116 static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
117 static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
118 mutable pool_type m_Pool;
119 mutable internal_stat m_Stat;
125 struct node_injection
127 mutable atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
128 mutable lock_type * m_pLock; ///< Node-level lock
137 assert( m_pLock == nullptr );
138 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
142 /// Initializes the pool of 256 preallocated mutexes
144 : m_Pool( c_nDefaultCapacity )
147 /// Initializes the pool of \p nPoolCapacity preallocated mutexes
148 pool_monitor( size_t nPoolCapacity )
149 : m_Pool( nPoolCapacity ? nPoolCapacity : c_nDefaultCapacity )
152 /// Makes exclusive access to node \p p
153 template <typename Node>
154 void lock( Node const& p ) const
160 // try lock spin and increment reference counter
161 refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
162 if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
163 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
167 m_Stat.onLockContention();
170 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
171 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
175 // If the node has no lock, allocate it from pool
176 pLock = p.m_SyncMonitorInjection.m_pLock;
179 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
180 m_Stat.onLockAllocation();
184 p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
190 /// Unlocks the node \p p
191 template <typename Node>
192 void unlock( Node const& p ) const
194 lock_type * pLock = nullptr;
198 assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
199 p.m_SyncMonitorInjection.m_pLock->unlock();
202 refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
203 if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
204 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
208 m_Stat.onUnlockContention();
211 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
212 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
217 // If we are the unique owner - deallocate lock
218 if ( cur == c_nRefIncrement ) {
219 pLock = p.m_SyncMonitorInjection.m_pLock;
220 p.m_SyncMonitorInjection.m_pLock = nullptr;
224 p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
228 m_Pool.deallocate( pLock, 1 );
229 m_Stat.onLockDeallocation();
234 template <typename Node>
235 using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
237 /// Returns the reference to internal statistics
239 If class' template argument \p Stat is \p false,
240 the function returns \ref empty_stat "dummy statistics".
241 Otherwise, it returns the reference to monitor's internal statistics
244 internal_stat const& statistics() const
250 }} // namespace cds::sync
252 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H