3 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
4 #define CDSLIB_SYNC_POOL_MONITOR_H
6 #include <cds/sync/monitor.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/backoff_strategy.h>
10 namespace cds { namespace sync {
12 /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
14 The monitor is intended for reducing the number of system mutexes for
15 huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
16 only when container's node should be locked. Lifetime of node's mutex is managed by
17 reference counter. When the reference counter to node's mutex becomes zero,
18 the mutex is given back to the pool.
20 The monitor is blocked: the access to node's mutex is performed under the spin-lock.
21 However, node locking/unlocking is performed beyond the spin-lock.
24 - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
25 the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
26 - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::LockDefault
30 typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
31 typedef cds::sync::pool_monitor< pool_type > sync_monitor;
34 template <class LockPool, typename BackOff = cds::backoff::LockDefault >
38 typedef LockPool pool_type; ///< Pool type
39 typedef typename pool_type::value_type lock_type; ///< node lock type
40 typedef BackOff back_off; ///< back-off strategy for spinning
48 /// Monitor injection into \p Node
49 template <typename Node>
50 class node_injection : public Node
53 typedef unsigned int refspin_type;
54 static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
55 static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
59 atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
60 lock_type * m_pLock; ///< Node-level lock
69 assert( m_pLock == nullptr );
70 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
76 mutable injection m_Access; ///< injected data
78 # ifdef CDS_CXX11_INHERITING_CTOR
81 // Inheriting ctor emulation
82 template <typename... Args>
83 node_injection( Args&&... args )
84 : Node( std::forward<Args>( args )... )
89 /// Initializes the pool of 256 preallocated mutexes
94 /// Initializes the pool of \p nPoolCapacity preallocated mutexes
95 pool_monitor( size_t nPoolCapacity )
96 : m_Pool( nPoolCapacity)
99 /// Makes exclusive access to node \p p
100 template <typename Node>
101 void lock( Node const& p ) const
105 // try lock spin and increment reference counter
106 refspin_type cur = p.m_Access.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
107 if ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
108 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
114 } while ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
115 atomics::memory_order_acquire, atomics::memory_order_relaxed );
119 // If the node has no lock, allocate it from pool
120 pLock = p.m_Access.m_pLock;
122 pLock = p.m_Access.m_pLock = m_Pool.allocate( 1 );
125 p.m_Access.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
131 /// Unlocks the node \p p
132 template <typename Node>
133 void unlock( Node const& p ) const
135 lock_type * pLock = nullptr;
137 assert( p.m_Access.m_pLock != nullptr );
138 p.m_Access.m_pLock->unlock();
141 refspin_type cur = p.m_Access.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
142 if ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
143 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
149 } while ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
150 atomics::memory_order_acquire, atomics::memory_order_relaxed );
155 // If we are the unique owner - deallocate lock
156 if ( cur == c_nRefIncrement ) {
157 pLock = p.m_Access.m_pLock;
158 p.m_Access.m_pLock = nullptr;
162 p.m_Access.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
166 m_Pool.deallocate( pLock, 1 );
170 template <typename Node>
171 using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
174 }} // namespace cds::sync
176 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H