3 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
4 #define CDSLIB_SYNC_POOL_MONITOR_H
6 #include <cds/sync/monitor.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/backoff_strategy.h>
10 namespace cds { namespace sync {
12 /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
14 The monitor is intended for reducing the number of system mutexes for
15 huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
16 only when container's node should be locked. Lifetime of node's mutex is managed by
17 reference counter. When the reference counter to node's mutex becomes zero,
18 the mutex is given back to the pool.
20 The monitor is blocked: the access to node's mutex is performed under the spin-lock.
21 However, node locking/unlocking is performed beyond the spin-lock.
24 - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
25 the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
26 - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::LockDefault
30 typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
31 typedef cds::sync::pool_monitor< pool_type > sync_monitor;
34 template <class LockPool, typename BackOff = cds::backoff::LockDefault >
38 typedef LockPool pool_type; ///< Pool type
39 typedef typename pool_type::value_type lock_type; ///< node lock type
40 typedef BackOff back_off; ///< back-off strategy for spinning
48 /// Monitor injection into \p Node
49 template <typename Node>
50 class node_injection : public Node
53 typedef unsigned int refspin_type;
54 static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
55 static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
59 atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
60 lock_type * m_pLock; ///< Node-level lock
70 mutable injection m_Access; ///< injected data
72 # ifdef CDS_CXX11_INHERITING_CTOR
75 // Inheriting ctor emulation
76 template <typename... Args>
77 node_injection( Args&&... args )
78 : Node( std::forward<Args>( args )... )
83 /// Initializes the pool of 256 preallocated mutexes
88 /// Initializes the pool of \p nPoolCapacity preallocated mutexes
89 pool_monitor( size_t nPoolCapacity )
90 : m_Pool( nPoolCapacity)
93 /// Makes exclusive access to node \p p
94 template <typename Node>
95 void lock( Node const& p ) const
99 // try lock spin and increment reference counter
100 refspin_type cur = p.m_Access.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
101 if ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
102 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
108 } while ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
109 atomics::memory_order_acquire, atomics::memory_order_relaxed );
113 // If the node has no lock, allocate it from pool
114 pLock = p.m_Access.m_pLock;
116 pLock = p.m_Access.m_pLock = m_Pool.allocate( 1 );
119 p.m_Access.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
125 /// Unlocks the node \p p
126 template <typename Node>
127 void unlock( Node const& p ) const
129 lock_type * pLock = nullptr;
131 assert( p.m_Access.m_pLock != nullptr );
132 p.m_Access.m_pLock->unlock();
135 refspin_type cur = p.m_Access.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
136 if ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
137 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
143 } while ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
144 atomics::memory_order_acquire, atomics::memory_order_relaxed );
149 // If we are the unique owner - deallocate lock
150 if ( cur == c_nRefIncrement ) {
151 pLock = p.m_Access.m_pLock;
152 p.m_Access.m_pLock = nullptr;
156 p.m_Access.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
160 m_Pool.deallocate( pLock, 1 );
164 template <typename Node>
165 using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
168 }} // namespace cds::sync
170 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H