3 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
4 #define CDSLIB_SYNC_POOL_MONITOR_H
6 #include <cds/sync/monitor.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/backoff_strategy.h>
10 namespace cds { namespace sync {
12 /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
14 The monitor is intended for reducing the number of system mutexes for
15 huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
16 only when container's node should be locked. Lifetime of node's mutex is managed by
17 reference counter. When the reference counter to node's mutex becomes zero,
18 the mutex is given back to the pool.
20 The monitor is blocked: the access to node's mutex is performed under the spin-lock.
21 However, node locking/unlocking is performed beyond the spin-lock.
24 - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
25 the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
26 - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::LockDefault
30 typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
31 typedef cds::sync::pool_monitor< pool_type > sync_monitor;
34 template <class LockPool, typename BackOff = cds::backoff::LockDefault >
38 typedef LockPool pool_type; ///< Pool type
39 typedef typename pool_type::value_type lock_type; ///< node lock type
40 typedef BackOff back_off; ///< back-off strategy for spinning
41 typedef uint32_t refspin_type; ///< Reference counter + spin-lock bit
45 static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
46 static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
47 mutable pool_type m_Pool;
55 mutable atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
56 mutable lock_type * m_pLock; ///< Node-level lock
65 assert( m_pLock == nullptr );
66 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
70 /// Initializes the pool of 256 preallocated mutexes
75 /// Initializes the pool of \p nPoolCapacity preallocated mutexes
76 pool_monitor( size_t nPoolCapacity )
77 : m_Pool( nPoolCapacity)
80 /// Makes exclusive access to node \p p
81 template <typename Node>
82 void lock( Node const& p ) const
86 // try lock spin and increment reference counter
87 refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
88 if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
89 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
95 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
96 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
100 // If the node has no lock, allocate it from pool
101 pLock = p.m_SyncMonitorInjection.m_pLock;
103 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
106 p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
112 /// Unlocks the node \p p
113 template <typename Node>
114 void unlock( Node const& p ) const
116 lock_type * pLock = nullptr;
118 assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
119 p.m_SyncMonitorInjection.m_pLock->unlock();
122 refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
123 if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
124 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
130 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
131 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
136 // If we are the unique owner - deallocate lock
137 if ( cur == c_nRefIncrement ) {
138 pLock = p.m_SyncMonitorInjection.m_pLock;
139 p.m_SyncMonitorInjection.m_pLock = nullptr;
143 p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
147 m_Pool.deallocate( pLock, 1 );
151 template <typename Node>
152 using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
155 }} // namespace cds::sync
157 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H