/// Returns singleton instance
static general_buffered * instance()
{
- return static_cast<general_buffered *>( base_class::instance() );
+ return static_cast<general_buffered *>( base_class::instance());
}
/// Checks if the singleton is created and ready to use
static bool isUsed()
p.free();
}
else {
- push_buffer( std::move(p) );
+ push_buffer( std::move(p));
break;
}
}
bool push_buffer( epoch_retired_ptr&& ep )
{
bool bPushed = m_Buffer.push( ep );
- if ( !bPushed || m_Buffer.size() >= capacity() ) {
+ if ( !bPushed || m_Buffer.size() >= capacity()) {
synchronize();
if ( !bPushed ) {
ep.free();
/// Destroys singleton object
static void Destruct( bool bDetachAll = false )
{
- if ( isUsed() ) {
+ if ( isUsed()) {
instance()->clear_buffer( std::numeric_limits< uint64_t >::max());
if ( bDetachAll )
instance()->m_ThreadList.detach_all();
while ( itFirst != itLast ) {
epoch_retired_ptr ep( *itFirst, nEpoch );
++itFirst;
- push_buffer( std::move(ep) );
+ push_buffer( std::move(ep));
}
}
atomics::atomic_thread_fence( atomics::memory_order_acquire );
{
std::unique_lock<lock_type> sl( m_Lock );
- if ( ep.m_p && m_Buffer.push( ep ) )
+ if ( ep.m_p && m_Buffer.push( ep ))
return false;
nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
flip_and_wait();