fix a lot of typo
[libcds.git] / cds / sync / pool_monitor.h
1 //$$CDS-header$$
2
3 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
4 #define CDSLIB_SYNC_POOL_MONITOR_H
5
6 #include <cds/sync/monitor.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/backoff_strategy.h>
9
10 namespace cds { namespace sync {
11
12     /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
13     /**
14         The monitor is intended for reducing the number of system mutexes for
15         huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
16         only when container's node should be locked. Lifetime of node's mutex is managed by
17         reference counter. When the reference counter to node's mutex becomes zero, 
18         the mutex is given back to the pool.
19
20         The monitor is blocked: the access to node's mutex is performed under the spin-lock.
21         However, node locking/unlocking is performed beyond the spin-lock.
22
23         Template arguments:
24         - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
25             the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
26         - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::LockDefault
27
28         <b>How to use</b>
29         \code
30         typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
31         typedef cds::sync::pool_monitor< pool_type > sync_monitor;
32         \endcode
33     */
34     template <class LockPool, typename BackOff = cds::backoff::LockDefault >
35     class pool_monitor
36     {
37     public:
38         typedef LockPool pool_type; ///< Pool type
39         typedef typename pool_type::value_type lock_type; ///< node lock type
40         typedef BackOff  back_off;  ///< back-off strategy for spinning
41         typedef uint32_t refspin_type;  ///< Reference counter + spin-lock bit
42
43     private:
44         //@cond
45         static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
46         static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
47         mutable pool_type   m_Pool;
48         //@endcond
49
50     public:
51
52         /// Node injection
53         struct node_injection
54         {
55             mutable atomics::atomic<refspin_type>   m_RefSpin;  ///< Spin-lock for \p m_pLock (bit 0) + reference counter
56             mutable lock_type *                     m_pLock;    ///< Node-level lock
57
58             node_injection()
59                 : m_RefSpin( 0 )
60                 , m_pLock( nullptr )
61             {}
62
63             ~node_injection()
64             {
65                 assert( m_pLock == nullptr );
66                 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
67             }
68         };
69
70         /// Initializes the pool of 256 preallocated mutexes
71         pool_monitor()
72             : m_Pool( 256 )
73         {}
74
75         /// Initializes the pool of \p nPoolCapacity preallocated mutexes
76         pool_monitor( size_t nPoolCapacity )
77             : m_Pool( nPoolCapacity)
78         {}
79
80         /// Makes exclusive access to node \p p
81         template <typename Node>
82         void lock( Node const& p ) const
83         {
84             lock_type * pLock;
85
86             // try lock spin and increment reference counter
87             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
88             if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
89                 atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) 
90             {
91                 back_off bkoff;
92                 do {
93                     bkoff();
94                     cur &= ~c_nSpinBit;
95                 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
96                     atomics::memory_order_acquire, atomics::memory_order_relaxed ));
97             }
98
99             // spin locked
100             // If the node has no lock, allocate it from pool
101             pLock = p.m_SyncMonitorInjection.m_pLock;
102             if ( !pLock )
103                 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
104
105             // unlock spin
106             p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
107
108             // lock the node
109             pLock->lock();
110         }
111
112         /// Unlocks the node \p p
113         template <typename Node>
114         void unlock( Node const& p ) const
115         {
116             lock_type * pLock = nullptr;
117
118             assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
119             p.m_SyncMonitorInjection.m_pLock->unlock();
120
121             // try lock spin
122             refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
123             if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
124                 atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) 
125             {
126                 back_off bkoff;
127                 do {
128                     bkoff();
129                     cur &= ~c_nSpinBit;
130                 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
131                     atomics::memory_order_acquire, atomics::memory_order_relaxed ));
132             }
133
134             // spin locked now
135
136             // If we are the unique owner - deallocate lock
137             if ( cur == c_nRefIncrement ) {
138                 pLock = p.m_SyncMonitorInjection.m_pLock;
139                 p.m_SyncMonitorInjection.m_pLock = nullptr;
140             }
141
142             // unlock spin
143             p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
144
145             // free pLock
146             if ( pLock )
147                 m_Pool.deallocate( pLock, 1 );
148         }
149
150         /// Scoped lock
151         template <typename Node>
152         using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
153     };
154
155 }} // namespace cds::sync
156
157 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H
158