Changed sync monitor internal interface from base hook to member hook
[libcds.git] / cds / sync / pool_monitor.h
1 //$$CDS-header$$
2
3 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
4 #define CDSLIB_SYNC_POOL_MONITOR_H
5
6 #include <cds/sync/monitor.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/backoff_strategy.h>
9
10 namespace cds { namespace sync {
11
12     /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
13     /**
14         The monitor is intended for reducing the number of system mutexes for
15         huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
16         only when container's node should be locked. Lifetime of node's mutex is managed by
17         reference counter. When the reference counter to node's mutex becomes zero, 
18         the mutex is given back to the pool.
19
20         The monitor is blocked: the access to node's mutex is performed under the spin-lock.
21         However, node locking/unlocking is performed beyond the spin-lock.
22
23         Template arguments:
24         - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
25             the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
26         - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::LockDefault
27
28         <b>How to use</b>
29         \code
30         typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
31         typedef cds::sync::pool_monitor< pool_type > sync_monitor;
32         \endcode
33     */
34     template <class LockPool, typename BackOff = cds::backoff::LockDefault >
35     class pool_monitor
36     {
37     public:
38         typedef LockPool pool_type; ///< Pool type
39         typedef typename pool_type::value_type lock_type; ///< node lock type
40         typedef BackOff  back_off;  ///< back-off strategy for spinning
41
42     private:
43         //@cond
44         pool_type   m_Pool;
45         //@endcond
46
47     public:
48
49         /// Node injection
50         struct node_injection
51         {
52             mutable atomics::atomic<refspin_type>   m_RefSpin;  ///< Spin-lock for \p m_pLock (bit 0) + reference counter
53             mutable lock_type *                     m_pLock;    ///< Node-level lock
54
55             node_injection()
56                 : m_Access( 0 )
57                 , m_pLock( nullptr )
58             {}
59
60             ~node_injection()
61             {
62                 assert( m_pLock == nullptr );
63                 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
64             }
65         };
66
67         /// Initializes the pool of 256 preallocated mutexes
68         pool_monitor()
69             : m_Pool( 256 )
70         {}
71
72         /// Initializes the pool of \p nPoolCapacity preallocated mutexes
73         pool_monitor( size_t nPoolCapacity )
74             : m_Pool( nPoolCapacity)
75         {}
76
77         /// Makes exclusive access to node \p p
78         template <typename Node>
79         void lock( Node const& p ) const
80         {
81             lock_type * pLock;
82
83             // try lock spin and increment reference counter
84             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
85             if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
86                 atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) 
87             {
88                 back_off bkoff;
89                 do {
90                     bkoff();
91                     cur &= ~c_nSpinBit;
92                 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
93                     atomics::memory_order_acquire, atomics::memory_order_relaxed );
94             }
95
96             // spin locked
97             // If the node has no lock, allocate it from pool
98             pLock = p.m_SyncMonitorInjection.m_pLock;
99             if ( !pLock )
100                 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
101
102             // unlock spin
103             p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
104
105             // lock the node
106             pLock->lock();
107         }
108
109         /// Unlocks the node \p p
110         template <typename Node>
111         void unlock( Node const& p ) const
112         {
113             lock_type * pLock = nullptr;
114
115             assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
116             p.m_SyncMonitorInjection.m_pLock->unlock();
117
118             // try lock spin
119             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
120             if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
121                 atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) 
122             {
123                 back_off bkoff;
124                 do {
125                     bkoff();
126                     cur &= ~c_nSpinBit;
127                 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
128                     atomics::memory_order_acquire, atomics::memory_order_relaxed );
129             }
130
131             // spin locked now
132
133             // If we are the unique owner - deallocate lock
134             if ( cur == c_nRefIncrement ) {
135                 pLock = p.m_SyncMonitorInjection.m_pLock;
136                 p.m_SyncMonitorInjection.m_pLock = nullptr;
137             }
138
139             // unlock spin
140             p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
141
142             // free pLock
143             if ( pLock )
144                 m_Pool.deallocate( pLock, 1 );
145         }
146
147         /// Scoped lock
148         template <typename Node>
149         using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
150     };
151
152 }} // namespace cds::sync
153
154 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H
155