small refactoring of pool monitor
[libcds.git] / cds / sync / pool_monitor.h
1 //$$CDS-header$$
2
3 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
4 #define CDSLIB_SYNC_POOL_MONITOR_H
5
6 #include <cds/sync/monitor.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/backoff_strategy.h>
9 #include <cds/opt/options.h> // opt::none
10
11 namespace cds { namespace sync {
12
13     /// \p pool_monitor traits
14     struct pool_monitor_traits {
15
16         /// Dummy internal statistics if \p Stat template parameter is \p false
17         struct empty_stat
18         {
19             //@cond
20             void onLock()              const {}
21             void onUnlock()            const {}
22             void onLockContention()    const {}
23             void onUnlockContention()  const {}
24             void onLockAllocation()    const {}
25             void onLockDeallocation()  const {}
26             //@endcond
27         };
28
29         /// Monitor's internal statistics, used if \p Stat template parameter is \p true
30         template <typename Counter = cds::atomicity::event_counter >
31         struct stat
32         {
33             typedef Counter event_counter; ///< measure type
34
35             event_counter m_nLockCount;         ///< Number of monitor \p lock() call
36             event_counter m_nUnlockCount;       ///< Number of monitor \p unlock call
37             event_counter m_nMaxLocked;         ///< Max number of simuntaneously locked mutexes
38             event_counter m_nLockContention;    ///< Number of \p lock() contenton
39             event_counter m_nUnlockContention;  ///< Number of \p unlock() contention
40             event_counter m_nLockAllocation;    ///< Number of the lock allocation from the pool
41             event_counter m_nLockDeallocation;  ///< Number of the lock deallocation
42             event_counter m_nMaxAllocated;      ///< Max number of sumultanouusly allocated mutexes
43
44             //@cond
45             void onLock()
46             { 
47                 ++m_nLockCount;
48                 int nDiff = static_cast<int>( m_nLockCount.get() - m_nUnlockCount.get());
49                 if ( nDiff > 0 && m_nMaxLocked.get() < static_cast<typename event_counter::value_type>( nDiff ))
50                     m_nMaxLocked = static_cast<typename event_counter::value_type>( nDiff );
51             }
52             void onUnlock()             { ++m_nUnlockCount;     }
53             void onLockContention()     { ++m_nLockContention;  }
54             void onUnlockContention()   { ++m_nUnlockContention;}
55             void onLockAllocation()
56             { 
57                 ++m_nLockAllocation;  
58                 int nDiff = static_cast<int>( m_nLockAllocation.get() - m_nLockDeallocation.get());
59                 if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ) )
60                     m_nMaxAllocated = static_cast<typename event_counter::value_type>( nDiff );
61             }
62             void onLockDeallocation()   { ++m_nLockDeallocation;}
63             //@endcond
64         };
65     };
66
67
68     /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
69     /**
70         The monitor is intended for reducing the number of system mutexes for
71         huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
72         only when container's node should be locked. Lifetime of node's mutex is managed by
73         reference counter. When the reference counter to node's mutex becomes zero, 
74         the mutex is given back to the pool.
75
76         The monitor is blocked: the access to node's mutex is performed under the spin-lock.
77         However, node locking/unlocking is performed beyond the spin-lock.
78
79         Template arguments:
80         - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
81             the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
82         - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::yield
83         - \p Stat - enable (\p true) or disable (\p false, the default) monitor's internal statistics.
84
85         <b>How to use</b>
86         \code
87         typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
88         typedef cds::sync::pool_monitor< pool_type > sync_monitor;
89         \endcode
90     */
91     template <class LockPool, typename BackOff = cds::backoff::yield, bool Stat = false >
92     class pool_monitor
93     {
94     public:
95         typedef LockPool pool_type; ///< Pool type
96         typedef typename pool_type::value_type lock_type; ///< node lock type
97         typedef typename std::conditional< 
98             std::is_same< BackOff, cds::opt::none >::value, 
99             cds::backoff::yield,
100             BackOff
101         >::type  back_off;  ///< back-off strategy for spinning
102         typedef uint32_t refspin_type;  ///< Reference counter + spin-lock bit
103
104         /// Internal statistics
105         typedef typename std::conditional< 
106             Stat, 
107             typename pool_monitor_traits::stat<>, 
108             typename pool_monitor_traits::empty_stat 
109         >::type internal_stat;
110
111         /// Pool's default capacity
112         static CDS_CONSTEXPR size_t const c_nDefaultCapacity = 256;
113
114     private:
115         //@cond
116         static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
117         static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
118         mutable pool_type      m_Pool;
119         mutable internal_stat  m_Stat;
120         //@endcond
121
122     public:
123
124         /// Node injection
125         struct node_injection
126         {
127             mutable atomics::atomic<refspin_type>   m_RefSpin;  ///< Spin-lock for \p m_pLock (bit 0) + reference counter
128             mutable lock_type *                     m_pLock;    ///< Node-level lock
129
130             node_injection()
131                 : m_RefSpin( 0 )
132                 , m_pLock( nullptr )
133             {}
134
135             ~node_injection()
136             {
137                 assert( m_pLock == nullptr );
138                 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
139             }
140         };
141
142         /// Initializes the pool of 256 preallocated mutexes
143         pool_monitor()
144             : m_Pool( c_nDefaultCapacity )
145         {}
146
147         /// Initializes the pool of \p nPoolCapacity preallocated mutexes
148         pool_monitor( size_t nPoolCapacity )
149             : m_Pool( nPoolCapacity ? nPoolCapacity : c_nDefaultCapacity )
150         {}
151
152         /// Makes exclusive access to node \p p
153         template <typename Node>
154         void lock( Node const& p ) const
155         {
156             lock_type * pLock;
157
158             m_Stat.onLock();
159
160             // try lock spin and increment reference counter
161             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
162             if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
163                 atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) 
164             {
165                 back_off bkoff;
166                 do {
167                     m_Stat.onLockContention();
168                     bkoff();
169                     cur &= ~c_nSpinBit;
170                 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
171                     atomics::memory_order_acquire, atomics::memory_order_relaxed ));
172             }
173
174             // spin locked
175             // If the node has no lock, allocate it from pool
176             pLock = p.m_SyncMonitorInjection.m_pLock;
177             if ( !pLock ) {
178                 assert( cur == 0 );
179                 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
180                 m_Stat.onLockAllocation();
181             }
182
183             // unlock spin
184             p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
185
186             // lock the node
187             pLock->lock();
188         }
189
190         /// Unlocks the node \p p
191         template <typename Node>
192         void unlock( Node const& p ) const
193         {
194             lock_type * pLock = nullptr;
195
196             m_Stat.onUnlock();
197
198             assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
199             p.m_SyncMonitorInjection.m_pLock->unlock();
200
201             // try lock spin
202             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
203             if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
204                 atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) 
205             {
206                 back_off bkoff;
207                 do {
208                     m_Stat.onUnlockContention();
209                     bkoff();
210                     cur &= ~c_nSpinBit;
211                 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
212                     atomics::memory_order_acquire, atomics::memory_order_relaxed ));
213             }
214
215             // spin locked now
216
217             // If we are the unique owner - deallocate lock
218             if ( cur == c_nRefIncrement ) {
219                 pLock = p.m_SyncMonitorInjection.m_pLock;
220                 p.m_SyncMonitorInjection.m_pLock = nullptr;
221             }
222
223             // unlock spin
224             p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
225
226             // free pLock
227             if ( pLock ) {
228                 m_Pool.deallocate( pLock, 1 );
229                 m_Stat.onLockDeallocation();
230             }
231         }
232
233         /// Scoped lock
234         template <typename Node>
235         using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
236
237         /// Returns the reference to internal statistics
238         /**
239             If class' template argument \p Stat is \p false,
240             the function returns \ref empty_stat "dummy statistics".
241             Otherwise, it returns the reference to monitor's internal statistics 
242             of type \ref stat.
243         */
244         internal_stat const& statistics() const
245         {
246             return m_Stat;
247         }
248     };
249
250 }} // namespace cds::sync
251
252 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H
253