/projects/Win/vc14/cds.vcxproj.user
/projects/Win/vc14/*.opensdf
/projects/Win/vc14/.vs/cds/v14
+/build/build-mingw-amd64.bat
+/build/build-mingw-amd64.log
/// BronsonAVLTree related declarations
namespace bronson_avltree {
- template <typename Key, typename T>
+ template <typename Key, typename T, typename SyncMonitor >
struct node;
//@cond
- template <typename Node>
+ template <typename Node, typename SyncMonitor>
struct link_node
{
typedef Node node_type;
atomics::atomic<node_type *> m_pParent; ///< Parent node
atomics::atomic<node_type *> m_pLeft; ///< Left child
atomics::atomic<node_type *> m_pRight; ///< Right child
+ typename SyncMonitor::node_injection m_SyncMonitorInjection; ///< @ref cds_sync_monitor "synchronization monitor" injected data
public:
//@cond
//@endcond
// BronsonAVLTree internal node
- template <typename Key, typename T>
- struct node<Key, T*>: public link_node< node<Key, T*>>
+ template <typename Key, typename T, typename SyncMonitor >
+ struct node<Key, T*, SyncMonitor>: public link_node< node<Key, T*, SyncMonitor>, SyncMonitor >
{
- typedef link_node< node<Key, T*>> base_class;
+ typedef link_node< node<Key, T*, SyncMonitor>, SyncMonitor > base_class;
typedef Key key_type; ///< key type
typedef T mapped_type; ///< value type
protected:
//@cond
- typedef typename sync_monitor::template node_injection< bronson_avltree::node< key_type, mapped_type >> node_type;
+ typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
typedef typename node_type::version_type version_type;
typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
{
- return static_cast<node_type *>(pNode->child( nDir ).load( order ));
+ return pNode->child( nDir ).load( order );
}
static node_type * parent( node_type * pNode, atomics::memory_order order )
{
- return static_cast<node_type *>(pNode->m_pParent.load( order ));
+ return pNode->m_pParent.load( order );
}
// RCU safe disposer
public:
typedef Lock lock_type; ///< Lock type
- /// Monitor injection into \p Node
- template <typename Node>
- struct node_injection: public Node
- {
-# ifdef CDS_CXX11_INHERITING_CTOR
- using Node::Node;
-# else
- // Inheriting ctor emulation
- template <typename... Args>
- node_injection( Args&&... args )
- : Node( std::forward<Args>( args )... )
- {}
-# endif
- mutable lock_type m_Lock; ///< Node-level lock
+ /// Node injection
+ struct node_injection {
+ mutable lock_type m_Lock; ///< Node spin-lock
};
/// Makes exclusive access to node \p p
template <typename Node>
void lock( Node const& p ) const
{
- p.m_Lock.lock();
+ p.m_SyncMonitorInjection.m_Lock.lock();
}
/// Unlocks the node \p p
template <typename Node>
void unlock( Node const& p ) const
{
- p.m_Lock.unlock();
+ p.m_SyncMonitorInjection.m_Lock.unlock();
}
/// Scoped lock
For huge trees containing millions of nodes it can be very inefficient to inject
the lock object into each node. Moreover, some operating systems may not support
the millions of system objects like mutexes per user process.
-
The monitor strategy is intended to solve that problem.
When the node should be locked, the monitor is called to allocate appropriate
- lock object for the node if it's needed, and to lock the node.
+ lock object for the node if needed, and to lock the node.
The monitor strategy can significantly reduce the number of system objects
required for data structure.
using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
};
\endcode
+
+ Monitor's data should be inject into container's node as \p m_SyncMonitorInjection data member:
+ \code
+ template <typename SyncMonitor>
+ struct my_node
+ {
+ typename SyncMonitor::node_injection m_SyncMonitorInjection;
+ };
+ \endcode
+
The monitor should be a member of your container:
\code
template <typename GC, typename T, typename Traits>
class my_container {
// ...
typedef typename Traits::sync_monitor sync_monitor;
+ typedef my_node<sync_monitor> node_type;
sync_monitor m_Monitor;
//...
};
\endcode
+
*/
/// Monitor scoped lock (RAII)
//@endcond
public:
- /// Monitor injection into \p Node
- template <typename Node>
- class node_injection : public Node
+
+ /// Node injection
+ struct node_injection
{
- //@cond
- typedef unsigned int refspin_type;
- static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
- static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
+ mutable atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
+ mutable lock_type * m_pLock; ///< Node-level lock
- struct injection
- {
- atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
- lock_type * m_pLock; ///< Node-level lock
-
- injection()
- : m_Access( 0 )
- , m_pLock( nullptr )
- {}
-
- ~injection()
- {
- assert( m_pLock == nullptr );
- assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
- }
- };
- //@endcond
-
- public:
- mutable injection m_Access; ///< injected data
-
-# ifdef CDS_CXX11_INHERITING_CTOR
- using Node::Node;
-# else
- // Inheriting ctor emulation
- template <typename... Args>
- node_injection( Args&&... args )
- : Node( std::forward<Args>( args )... )
+ node_injection()
+ : m_Access( 0 )
+ , m_pLock( nullptr )
{}
-# endif
+
+ ~node_injection()
+ {
+ assert( m_pLock == nullptr );
+ assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
+ }
};
/// Initializes the pool of 256 preallocated mutexes
lock_type * pLock;
// try lock spin and increment reference counter
- refspin_type cur = p.m_Access.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
- if ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
+ refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
+ if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
{
back_off bkoff;
do {
bkoff();
cur &= ~c_nSpinBit;
- } while ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
+ } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
atomics::memory_order_acquire, atomics::memory_order_relaxed );
}
// spin locked
// If the node has no lock, allocate it from pool
- pLock = p.m_Access.m_pLock;
+ pLock = p.m_SyncMonitorInjection.m_pLock;
if ( !pLock )
- pLock = p.m_Access.m_pLock = m_Pool.allocate( 1 );
+ pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
// unlock spin
- p.m_Access.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
+ p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
// lock the node
pLock->lock();
{
lock_type * pLock = nullptr;
- assert( p.m_Access.m_pLock != nullptr );
- p.m_Access.m_pLock->unlock();
+ assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
+ p.m_SyncMonitorInjection.m_pLock->unlock();
// try lock spin
- refspin_type cur = p.m_Access.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
- if ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
+ refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
+ if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
{
back_off bkoff;
do {
bkoff();
cur &= ~c_nSpinBit;
- } while ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
+ } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
atomics::memory_order_acquire, atomics::memory_order_relaxed );
}
// If we are the unique owner - deallocate lock
if ( cur == c_nRefIncrement ) {
- pLock = p.m_Access.m_pLock;
- p.m_Access.m_pLock = nullptr;
+ pLock = p.m_SyncMonitorInjection.m_pLock;
+ p.m_SyncMonitorInjection.m_pLock = nullptr;
}
// unlock spin
- p.m_Access.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
+ p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
// free pLock
if ( pLock )
<ClInclude Include="..\..\..\cds\intrusive\striped_set\resizing_policy.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\striped_set\striping_policy.h" />\r
<ClInclude Include="..\..\..\cds\lock\array.h" />\r
- <ClInclude Include="..\..\..\cds\memory\mapper.h" />\r
<ClInclude Include="..\..\..\cds\memory\pool_allocator.h" />\r
<ClInclude Include="..\..\..\cds\memory\vyukov_queue_pool.h" />\r
<ClInclude Include="..\..\..\cds\os\osx\timer.h" />\r
<ClInclude Include="..\..\..\cds\compiler\clang\defs.h">\r
<Filter>Header Files\cds\compiler\clang</Filter>\r
</ClInclude>\r
- <ClInclude Include="..\..\..\cds\memory\mapper.h">\r
- <Filter>Header Files\cds\memory</Filter>\r
- </ClInclude>\r
<ClInclude Include="..\..\..\cds\intrusive\basket_queue.h">\r
<Filter>Header Files\cds\intrusive</Filter>\r
</ClInclude>\r