Major merge from 'dev'
[libcds.git] / cds / sync / pool_monitor.h
1 //$$CDS-header$$
2
3 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
4 #define CDSLIB_SYNC_POOL_MONITOR_H
5
6 #include <cds/sync/monitor.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/backoff_strategy.h>
9 #include <cds/opt/options.h> // opt::none
10
11 namespace cds { namespace sync {
12
13     /// \p pool_monitor traits
14     struct pool_monitor_traits {
15
16         /// Dummy internal statistics if \p Stat template parameter is \p false
17         struct empty_stat
18         {
19             //@cond
20             void onLock()              const {}
21             void onUnlock()            const {}
22             void onLockContention()    const {}
23             void onUnlockContention()  const {}
24             void onLockAllocation()    const {}
25             void onLockDeallocation()  const {}
26             //@endcond
27         };
28
29         /// Monitor's internal statistics, used if \p Stat template parameter is \p true
30         template <typename Counter = cds::atomicity::event_counter >
31         struct stat
32         {
33             typedef Counter event_counter; ///< measure type
34
35             event_counter m_nLockCount;         ///< Number of monitor \p lock() call
36             event_counter m_nUnlockCount;       ///< Number of monitor \p unlock() call
37             event_counter m_nMaxLocked;         ///< Max number of simuntaneously locked mutexes
38             event_counter m_nLockContention;    ///< Number of \p lock() contenton
39             event_counter m_nUnlockContention;  ///< Number of \p unlock() contention
40             event_counter m_nLockAllocation;    ///< Number of the lock allocation from the pool
41             event_counter m_nLockDeallocation;  ///< Number of the lock deallocation
42             event_counter m_nMaxAllocated;      ///< Max number of sumultaneously allocated mutexes
43
44             //@cond
45             void onLock()
46             { 
47                 ++m_nLockCount;
48                 int nDiff = static_cast<int>( m_nLockCount.get() - m_nUnlockCount.get());
49                 if ( nDiff > 0 && m_nMaxLocked.get() < static_cast<typename event_counter::value_type>( nDiff ))
50                     m_nMaxLocked = static_cast<typename event_counter::value_type>( nDiff );
51             }
52             void onUnlock()             { ++m_nUnlockCount;     }
53             void onLockContention()     { ++m_nLockContention;  }
54             void onUnlockContention()   { ++m_nUnlockContention;}
55             void onLockAllocation()
56             { 
57                 ++m_nLockAllocation;  
58                 int nDiff = static_cast<int>( m_nLockAllocation.get() - m_nLockDeallocation.get());
59                 if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ) )
60                     m_nMaxAllocated = static_cast<typename event_counter::value_type>( nDiff );
61             }
62             void onLockDeallocation()   { ++m_nLockDeallocation;}
63             //@endcond
64         };
65     };
66
67
68     /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
69     /**
70         The monitor is intended for reducing the number of system mutexes for
71         huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
72         only when container's node should be locked. Lifetime of node's mutex is managed by
73         reference counter. When the reference counter to node's mutex becomes zero, 
74         the mutex is given back to the pool.
75
76         The monitor is blocked: the access to node's mutex is performed under the spin-lock.
77         However, node locking/unlocking is performed beyond the spin-lock.
78
79         Template arguments:
80         - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
81             the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
82         - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::yield
83         - \p Stat - enable (\p true) or disable (\p false, the default) monitor's internal statistics.
84
85         <b>How to use</b>
86         \code
87         typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
88         typedef cds::sync::pool_monitor< pool_type > sync_monitor;
89         \endcode
90     */
91     template <class LockPool, typename BackOff = cds::backoff::yield, bool Stat = false >
92     class pool_monitor
93     {
94     public:
95         typedef LockPool pool_type; ///< Pool type
96         typedef typename pool_type::value_type lock_type; ///< node lock type
97         typedef typename std::conditional< 
98             std::is_same< BackOff, cds::opt::none >::value, 
99             cds::backoff::yield,
100             BackOff
101         >::type  back_off;  ///< back-off strategy for spinning
102         typedef uint32_t refspin_type;  ///< Reference counter + spin-lock bit
103
104         /// Internal statistics
105         typedef typename std::conditional< 
106             Stat, 
107             typename pool_monitor_traits::stat<>, 
108             typename pool_monitor_traits::empty_stat 
109         >::type internal_stat;
110
111         /// Pool's default capacity
112         static CDS_CONSTEXPR size_t const c_nDefaultCapacity = 256;
113
114     private:
115         //@cond
116         static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
117         static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
118         mutable pool_type      m_Pool;
119         mutable internal_stat  m_Stat;
120         //@endcond
121
122     public:
123
124         /// Node injection
125         struct node_injection
126         {
127             mutable atomics::atomic<refspin_type>   m_RefSpin;  ///< Spin-lock for \p m_pLock (bit 0) + reference counter
128             mutable lock_type *                     m_pLock;    ///< Node-level lock
129
130             //@cond
131             node_injection()
132                 : m_RefSpin( 0 )
133                 , m_pLock( nullptr )
134             {}
135
136             ~node_injection()
137             {
138                 assert( m_pLock == nullptr );
139                 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
140             }
141
142             bool check_free() const
143             {
144                 return m_pLock == nullptr && m_RefSpin.load( atomics::memory_order_acquire ) == 0;
145             }
146             //@endcond
147         };
148
149         /// Initializes the pool of 256 preallocated mutexes
150         pool_monitor()
151             : m_Pool( c_nDefaultCapacity )
152         {}
153
154         /// Initializes the pool of \p nPoolCapacity preallocated mutexes
155         pool_monitor( size_t nPoolCapacity )
156             : m_Pool( nPoolCapacity ? nPoolCapacity : c_nDefaultCapacity )
157         {}
158
159         /// Makes exclusive access to node \p p
160         template <typename Node>
161         void lock( Node const& p ) const
162         {
163             lock_type * pLock;
164
165             m_Stat.onLock();
166
167             // try lock spin and increment reference counter
168             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
169             if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
170                 atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) 
171             {
172                 back_off bkoff;
173                 do {
174                     m_Stat.onLockContention();
175                     bkoff();
176                     cur &= ~c_nSpinBit;
177                 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
178                     atomics::memory_order_acquire, atomics::memory_order_relaxed ));
179             }
180
181             // spin locked
182             // If the node has no lock, allocate it from pool
183             pLock = p.m_SyncMonitorInjection.m_pLock;
184             if ( !pLock ) {
185                 assert( cur == 0 );
186                 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
187                 m_Stat.onLockAllocation();
188             }
189
190             // unlock spin
191             p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
192
193             // lock the node
194             pLock->lock();
195         }
196
197         /// Unlocks the node \p p
198         template <typename Node>
199         void unlock( Node const& p ) const
200         {
201             lock_type * pLock = nullptr;
202
203             m_Stat.onUnlock();
204
205             assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
206             p.m_SyncMonitorInjection.m_pLock->unlock();
207
208             // try lock spin
209             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
210             if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
211                 atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) 
212             {
213                 back_off bkoff;
214                 do {
215                     m_Stat.onUnlockContention();
216                     bkoff();
217                     cur &= ~c_nSpinBit;
218                 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
219                     atomics::memory_order_acquire, atomics::memory_order_relaxed ));
220             }
221
222             // spin locked now
223
224             // If we are the unique owner - deallocate lock
225             if ( cur == c_nRefIncrement ) {
226                 pLock = p.m_SyncMonitorInjection.m_pLock;
227                 p.m_SyncMonitorInjection.m_pLock = nullptr;
228             }
229
230             // unlock spin
231             p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
232
233             // free pLock
234             if ( pLock ) {
235                 m_Pool.deallocate( pLock, 1 );
236                 m_Stat.onLockDeallocation();
237             }
238         }
239
240         /// Scoped lock
241         template <typename Node>
242         using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
243
244         /// Returns the reference to internal statistics
245         /**
246             If class' template argument \p Stat is \p false,
247             the function returns \ref empty_stat "dummy statistics".
248             Otherwise, it returns the reference to monitor's internal statistics 
249             of type \ref stat.
250         */
251         internal_stat const& statistics() const
252         {
253             return m_Stat;
254         }
255     };
256
257 }} // namespace cds::sync
258
259 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H
260