From 4fce70a611dfe8f338896c5e3bf64165b2dec0b7 Mon Sep 17 00:00:00 2001 From: khizmax Date: Tue, 3 Feb 2015 12:40:07 +0300 Subject: [PATCH] Added sync monitor for pool of mutexes Simplified monitor interface Fix doc bugs --- cds/memory/vyukov_queue_pool.h | 9 +- cds/sync/injecting_monitor.h | 46 +------ cds/sync/lock_array.h | 6 +- cds/sync/monitor.h | 49 ++++++-- cds/sync/pool_monitor.h | 171 ++++++++++++++++++++++++++ projects/Win/vc12/cds.vcxproj | 1 + projects/Win/vc12/cds.vcxproj.filters | 3 + 7 files changed, 229 insertions(+), 56 deletions(-) create mode 100644 cds/sync/pool_monitor.h diff --git a/cds/memory/vyukov_queue_pool.h b/cds/memory/vyukov_queue_pool.h index 02e80cb2..7bb3f749 100644 --- a/cds/memory/vyukov_queue_pool.h +++ b/cds/memory/vyukov_queue_pool.h @@ -16,12 +16,13 @@ namespace cds { namespace memory { /// Allocator type typedef CDS_DEFAULT_ALLOCATOR allocator; }; + /// Free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue /** @ingroup cds_memory_pool Template parameters: - \p T - the type of object maintaining by free-list - - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus - cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits + - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus + \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits \b Internals @@ -120,7 +121,7 @@ namespace cds { namespace memory { /** \p nCapacity argument is the queue capacity. It should be passed if the queue is based on dynamically-allocated buffer. - See cds::intrusive::VyukovMPMCCycleQueue for explanation. + See \p cds::intrusive::VyukovMPMCCycleQueue for explanation. */ vyukov_queue_pool( size_t nCapacity = 0 ) : m_Queue( nCapacity ) @@ -413,7 +414,7 @@ namespace cds { namespace memory { /** \p nCapacity argument is the queue capacity. It should be passed if the queue is based on dynamically-allocated buffer. - See cds::intrusive::VyukovMPMCCycleQueue for explanation. + See \p cds::intrusive::VyukovMPMCCycleQueue for explanation. */ bounded_vyukov_queue_pool( size_t nCapacity = 0 ) : m_Queue( nCapacity ) diff --git a/cds/sync/injecting_monitor.h b/cds/sync/injecting_monitor.h index d7bf7c2f..a2610e59 100644 --- a/cds/sync/injecting_monitor.h +++ b/cds/sync/injecting_monitor.h @@ -12,7 +12,7 @@ namespace cds { namespace sync { /// @ref cds_sync_monitor "Monitor" that injects the lock into each node /** - This monitor injects the lock object of type \p Lock into each node. + This simple monitor injects the lock object of type \p Lock into each node. The monitor is designed for user-space locking primitives like \ref sync::spin_lock "spin-lock". Template arguments: @@ -25,7 +25,8 @@ namespace cds { namespace sync { typedef Lock lock_type; ///< Lock type /// Monitor injection into \p Node - struct node_injection + template + struct node_injection: public Node { # ifdef CDS_CXX11_INHERITING_CTOR using Node::Node; @@ -37,60 +38,25 @@ namespace cds { namespace sync { {} # endif mutable lock_type m_Lock; ///< Node-level lock - - /// Makes exclusive access to the node - void lock() const - { - m_Lock.lock; - } - - /// Unlocks the node - void unlock() const - { - m_Lock.unlock(); - } }; /// Makes exclusive access to node \p p - /** - \p p must have method \p lock() - */ template void lock( Node const& p ) const { - p.lock(); + p.m_Lock.lock(); } /// Unlocks the node \p p - /** - \p p must have method \p unlock() - */ template void unlock( Node const& p ) const { - p.unlock(); + p.m_Lock.unlock(); } /// Scoped lock template - class scoped_lock - { - Node const& m_Locked; ///< Our locked node - - public: - /// Makes exclusive access to node \p p - scoped_lock( injecting_monitor const&, Node const& p ) - : m_Locked( p ) - { - p.lock(); - } - - /// Unlocks the node - ~scoped_lock() - { - p.unlock(); - } - }; + using scoped_lock = monitor_scoped_lock< injecting_monitor, Node > ; }; }} // namespace cds::sync diff --git a/cds/sync/lock_array.h b/cds/sync/lock_array.h index 60044917..d3f47dc5 100644 --- a/cds/sync/lock_array.h +++ b/cds/sync/lock_array.h @@ -9,7 +9,7 @@ namespace cds { namespace sync { - /// Trivial lock \ref array selection policy + /// Trivial lock \ref lock_array selection policy struct trivial_select_policy { /// Returns \p nWhat @@ -28,7 +28,7 @@ namespace cds { namespace sync { } }; - /// The lock \ref array cell selection policy "division by modulo" + /// The lock \ref lock_array cell selection policy "division by modulo" struct mod_select_policy { /// Returns nWhat % nCapacity @@ -44,7 +44,7 @@ namespace cds { namespace sync { } }; - /// The lock \ref array cell selection policy "division by modulo of power of 2" + /// The lock \ref lock_array cell selection policy "division by modulo of power of 2" /** This policy may be used if the size of lock array is equal to power of two. */ diff --git a/cds/sync/monitor.h b/cds/sync/monitor.h index 82577420..2ce48846 100644 --- a/cds/sync/monitor.h +++ b/cds/sync/monitor.h @@ -30,6 +30,10 @@ namespace cds { namespace sync { - \p sync::injecting_monitor injects the lock object into each node. That mock monitor is designed for user-space locking primitive like \ref sync::spin_lock "spin-lock". + - \p sync::pool_monitor is the monitor that allocates the lock object + for the node from the pool when needed. When the node is unlocked + the lock assigned to it is given back to the pool if no thread + references to that node. How to use @@ -61,15 +65,7 @@ namespace cds { namespace sync { // Scoped lock applyes RAII to Monitor template - class scoped_lock - { - public: - // Locks node by monitor mon - scoped_lock( Monitor& mon, Node& node ); - - // Unlocks the node locked by ctor - ~scoped_lock(); - }; + using scoped_lock = monitor_scoped_lock< pool_monitor, Node >; }; \endcode The monitor should be a member of your container: @@ -84,6 +80,41 @@ namespace cds { namespace sync { \endcode */ + /// Monitor scoped lock (RAII) + /** + Template arguments: + - \p Monitor - monitor type + - \p Node - node type + */ + template + struct monitor_scoped_lock + { + public: + typedef Monitor monitor_type; ///< Monitor type + typedef Node node_type; ///< Node type + + private: + //@cond + monitor_type& m_Monitor; ///< Monitor + node_type const& m_Node; ///< Our locked node + //@endcond + + public: + /// Makes exclusive access to the node \p p by \p monitor + scoped_lock( monitor_type& monitor, node_type const& p ) + : m_Monitor( monitor ) + , m_Node( p ) + { + monitor.lock( p ); + } + + /// Unlocks the node + ~scoped_lock() + { + m_Monitor.unlock( m_Node ); + } + }; + }} // namespace cds::sync #endif // #ifndef CDSLIB_SYNC_MONITOR_H diff --git a/cds/sync/pool_monitor.h b/cds/sync/pool_monitor.h new file mode 100644 index 00000000..cf877166 --- /dev/null +++ b/cds/sync/pool_monitor.h @@ -0,0 +1,171 @@ +//$$CDS-header$$ + +#ifndef CDSLIB_SYNC_POOL_MONITOR_H +#define CDSLIB_SYNC_POOL_MONITOR_H + +#include +#include +#include + +namespace cds { namespace sync { + + /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed + /** + The monitor is intended for reducing the number of system mutexes for + huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool + only when container's node should be locked. Lifetime of node's mutex is managed by + reference counter. When the reference counter to node's mutex becomes zero, + the mutex is given back to the pool. + + The monitor is blocked: the access to node's mutex is performed under the spin-lock. + However, node locking/unlocking is performed beyond the spin-lock. + + Template arguments: + - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain + the objects of type \p std::mutex or similar. The access to the pool is not synchronized. + - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::LockDefault + + How to use + \code + typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type; + typedef cds::sync::pool_monitor< pool_type > sync_monitor; + \endcode + */ + template + class pool_monitor + { + public: + typedef LockPool pool_type; ///< Pool type + typedef typename pool_type::value_type lock_type; ///< node lock type + typedef BackOff back_off; ///< back-off strategy for spinning + + private: + //@cond + pool_type m_Pool; + //@endcond + + public: + /// Monitor injection into \p Node + template + class node_injection : public Node + { + //@cond + typedef unsigned int refspin_type; + constexpr refspin_type const c_nSpinBit = 1; + constexpr refspin_type const c_nRefIncrement = 2; + + struct injection + { + atomics::atomic m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter + lock_type * m_pLock; ///< Node-level lock + + injection() + : m_Access( 0 ) + , m_pLock( nullptr ) + {} + }; + //@endcond + + public: + mutable injection m_Access; ///< injected data + +# ifdef CDS_CXX11_INHERITING_CTOR + using Node::Node; +# else + // Inheriting ctor emulation + template + node_injection( Args&&... args ) + : Node( std::forward( args )... ) + {} +# endif + }; + + /// Initializes the pool of 256 preallocated mutexes + pool_monitor() + : m_Pool( 256 ) + {} + + /// Initializes the pool of \p nPoolCapacity preallocated mutexes + pool_monitor( size_t nPoolCapacity ) + : m_Pool( nPoolCapacity) + {} + + /// Makes exclusive access to node \p p + template + void lock( Node const& p ) const + { + lock_type * pLock; + + // try lock spin and increment reference counter + refspin_type cur = p.m_Access.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit; + if ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit, + atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) + { + back_off bkoff; + do { + bkoff(); + cur &= ~c_nSpinBit; + } while ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit, + atomics::memory_order_acquire, atomics::memory_order_relaxed ); + } + + // spin locked + // If the node has no lock, allocate it from pool + pLock = p.m_Access.m_pLock; + if ( !pLock ) + pLock = p.m_Access.m_pLock = m_Pool.allocate( 1 ); + + // unlock spin + p.m_Access.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release ); + + // lock the node + pLock->lock(); + } + + /// Unlocks the node \p p + template + void unlock( Node const& p ) const + { + lock_type * pLock = nullptr; + + assert( p.m_Access.m_pLock != nullptr ); + p.m_Access.m_pLock->unlock(); + + // try lock spin + refspin_type cur = p.m_Access.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit; + if ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit, + atomics::memory_order_acquire, atomics::memory_order_relaxed ) ) + { + back_off bkoff; + do { + bkoff(); + cur &= ~c_nSpinBit; + } while ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit, + atomics::memory_order_acquire, atomics::memory_order_relaxed ); + } + + // spin locked now + + // If we are the unique owner - deallocate lock + if ( cur == c_nRefIncrement ) { + pLock = p.m_Access.m_pLock; + p.m_Access.m_pLock = nullptr; + } + + // unlock spin + p.m_Access.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release ); + + // free pLock + if ( pLock ) + m_Pool.deallocate( pLock, 1 ); + } + + /// Scoped lock + template + using scoped_lock = monitor_scoped_lock< pool_monitor, Node >; + }; + +}} // namespace cds::sync + +#endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H + diff --git a/projects/Win/vc12/cds.vcxproj b/projects/Win/vc12/cds.vcxproj index 81c05c82..9f3752a4 100644 --- a/projects/Win/vc12/cds.vcxproj +++ b/projects/Win/vc12/cds.vcxproj @@ -797,6 +797,7 @@ + diff --git a/projects/Win/vc12/cds.vcxproj.filters b/projects/Win/vc12/cds.vcxproj.filters index 37cf189b..c3667980 100644 --- a/projects/Win/vc12/cds.vcxproj.filters +++ b/projects/Win/vc12/cds.vcxproj.filters @@ -1181,5 +1181,8 @@ Header Files\cds\sync + + Header Files\cds\sync + \ No newline at end of file -- 2.34.1