/*
This file is a part of libcds - Concurrent Data Structures library
- (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_SYNC_POOL_MONITOR_H
{
++m_nLockAllocation;
int nDiff = static_cast<int>( m_nLockAllocation.get() - m_nLockDeallocation.get());
- if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ) )
+ if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ))
m_nMaxAllocated = static_cast<typename event_counter::value_type>( nDiff );
}
void onLockDeallocation() { ++m_nLockDeallocation;}
Template arguments:
- \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
- - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::yield
+ - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::Default
- \p Stat - enable (\p true) or disable (\p false, the default) monitor's internal statistics.
<b>How to use</b>
typedef cds::sync::pool_monitor< pool_type > sync_monitor;
\endcode
*/
- template <class LockPool, typename BackOff = cds::backoff::yield, bool Stat = false >
+ template <class LockPool, typename BackOff = cds::backoff::Default, bool Stat = false >
class pool_monitor
{
public:
//@cond
node_injection()
- : m_RefSpin( 0 )
- , m_pLock( nullptr )
- {}
+ : m_pLock( nullptr )
+ {
+ m_RefSpin.store( 0, atomics::memory_order_release );
+ }
~node_injection()
{
bool check_free() const
{
- return m_pLock == nullptr && m_RefSpin.load( atomics::memory_order_acquire ) == 0;
+ return m_pLock == nullptr && m_RefSpin.load( atomics::memory_order_relaxed ) == 0;
}
//@endcond
};
// try lock spin and increment reference counter
refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
- atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
+ atomics::memory_order_acq_rel, atomics::memory_order_acquire ))
{
back_off bkoff;
do {
bkoff();
cur &= ~c_nSpinBit;
} while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
- atomics::memory_order_acquire, atomics::memory_order_relaxed ));
+ atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
}
// spin locked
// try lock spin
refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
- atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
+ atomics::memory_order_acquire, atomics::memory_order_acquire ))
{
back_off bkoff;
do {
bkoff();
cur &= ~c_nSpinBit;
} while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
- atomics::memory_order_acquire, atomics::memory_order_relaxed ));
+ atomics::memory_order_acquire, atomics::memory_order_acquire ));
}
// spin locked now