3 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
4 #define CDSLIB_SYNC_POOL_MONITOR_H
6 #include <cds/sync/monitor.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/backoff_strategy.h>
10 namespace cds { namespace sync {
12 /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
14 The monitor is intended for reducing the number of system mutexes for
15 huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
16 only when container's node should be locked. Lifetime of node's mutex is managed by
17 reference counter. When the reference counter to node's mutex becomes zero,
18 the mutex is given back to the pool.
20 The monitor is blocked: the access to node's mutex is performed under the spin-lock.
21 However, node locking/unlocking is performed beyond the spin-lock.
24 - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
25 the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
26 - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::LockDefault
30 typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
31 typedef cds::sync::pool_monitor< pool_type > sync_monitor;
34 template <class LockPool, typename BackOff = cds::backoff::LockDefault >
38 typedef LockPool pool_type; ///< Pool type
39 typedef typename pool_type::value_type lock_type; ///< node lock type
40 typedef BackOff back_off; ///< back-off strategy for spinning
52 mutable atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
53 mutable lock_type * m_pLock; ///< Node-level lock
62 assert( m_pLock == nullptr );
63 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
67 /// Initializes the pool of 256 preallocated mutexes
72 /// Initializes the pool of \p nPoolCapacity preallocated mutexes
73 pool_monitor( size_t nPoolCapacity )
74 : m_Pool( nPoolCapacity)
77 /// Makes exclusive access to node \p p
78 template <typename Node>
79 void lock( Node const& p ) const
83 // try lock spin and increment reference counter
84 refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
85 if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
86 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
92 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
93 atomics::memory_order_acquire, atomics::memory_order_relaxed );
97 // If the node has no lock, allocate it from pool
98 pLock = p.m_SyncMonitorInjection.m_pLock;
100 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
103 p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
109 /// Unlocks the node \p p
110 template <typename Node>
111 void unlock( Node const& p ) const
113 lock_type * pLock = nullptr;
115 assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
116 p.m_SyncMonitorInjection.m_pLock->unlock();
119 refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
120 if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
121 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
127 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
128 atomics::memory_order_acquire, atomics::memory_order_relaxed );
133 // If we are the unique owner - deallocate lock
134 if ( cur == c_nRefIncrement ) {
135 pLock = p.m_SyncMonitorInjection.m_pLock;
136 p.m_SyncMonitorInjection.m_pLock = nullptr;
140 p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
144 m_Pool.deallocate( pLock, 1 );
148 template <typename Node>
149 using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
152 }} // namespace cds::sync
154 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H