-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
-#ifndef __CDS_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H
-#define __CDS_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H
+#define CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H
#include <cds/details/allocator.h>
#include <cds/intrusive/vyukov_mpmc_cycle_queue.h>
+#include <cds/details/throw_exception.h>
namespace cds { namespace memory {
/// Allocator type
typedef CDS_DEFAULT_ALLOCATOR allocator;
};
- /// Free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue
+
+ /// Free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue
/** @ingroup cds_memory_pool
Template parameters:
- - \p T - the type of object maintaining by free-list
- - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus
- cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
+ - \p T - the type of object maintaining by free-list. \p T must be default constructible.
+ - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus
+ \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
\b Internals
// Pool of Foo object of size 1024.
struct pool_traits: public cds::memory::vyukov_queue_pool_traits
{
- typedef cds::opt::v::static_buffer< Foo, 1024 > buffer;
+ typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer;
};
typedef cds::memory::vyukov_queue_pool< Foo, pool_traits > pool_type;
static pool_type thePool;
typedef T value_type ; ///< Value type
typedef Traits traits; ///< Traits type
typedef typename traits::allocator::template rebind<value_type>::other allocator_type ; ///< allocator type
+ typedef typename traits::back_off back_off; ///< back-off strategy
protected:
//@cond
+ typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator;
+ typedef typename cxx_allocator::allocator_type std_allocator;
+
queue_type m_Queue;
value_type * m_pFirst;
value_type * m_pLast;
//@cond
void preallocate_pool()
{
- m_pFirst = allocator_type().allocate( m_Queue.capacity() );
+ m_pFirst = std_allocator().allocate( m_Queue.capacity());
m_pLast = m_pFirst + m_Queue.capacity();
- for ( value_type * p = m_pFirst; p < m_pLast; ++p )
+ for ( value_type * p = m_pFirst; p < m_pLast; ++p ) {
CDS_VERIFY( m_Queue.push( *p )) ; // must be true
+ }
}
bool from_pool( value_type * p ) const
/**
\p nCapacity argument is the queue capacity. It should be passed
if the queue is based on dynamically-allocated buffer.
- See cds::intrusive::VyukovMPMCCycleQueue for explanation.
+ See \p cds::intrusive::VyukovMPMCCycleQueue for explanation.
*/
vyukov_queue_pool( size_t nCapacity = 0 )
: m_Queue( nCapacity )
~vyukov_queue_pool()
{
m_Queue.clear();
- allocator_type().deallocate( m_pFirst, m_Queue.capacity() );
+ std_allocator().deallocate( m_pFirst, m_Queue.capacity());
}
/// Allocates an object from pool
/**
The pool supports allocation only single object (\p n = 1).
- If \p n > 1 the behaviour is undefined.
+ If \p n > 1 the behavior is undefined.
If the queue is not empty, the popped value is returned.
Otherwise, a new value allocated.
value_type * allocate( size_t n )
{
assert( n == 1 );
+ CDS_UNUSED(n);
value_type * p = m_Queue.pop();
if ( p ) {
- assert( from_pool(p) );
- return p;
+ assert( from_pool(p));
+ return new( p ) value_type;
}
-
- return allocator_type().allocate( n );
+ // The pool is empty - allocate new from the heap
+ return cxx_allocator().New();
}
/// Deallocated the object \p p
/**
The pool supports allocation only single object (\p n = 1).
- If \p n > 1 the behaviour is undefined.
+ If \p n > 1 the behavior is undefined.
If \p p is from preallocated pool, it pushes into the queue.
Otherwise, \p p is deallocated by allocator provided.
void deallocate( value_type * p, size_t n )
{
assert( n == 1 );
+ CDS_UNUSED(n);
if ( p ) {
- if ( from_pool( p ) )
- m_Queue.push( *p );
+ if ( from_pool(p)) {
+ p->~value_type();
+ // The queue can notify about false fullness state
+ // so we push in loop
+ back_off bkoff;
+ while ( !m_Queue.push( *p ))
+ bkoff();
+ }
else
- allocator_type().deallocate( p, n );
+ cxx_allocator().Delete( p );
}
}
};
- /// Lazy free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue
+ /// Lazy free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue
/** @ingroup cds_memory_pool
Template parameters:
- - \p T - the type of object maintaining by free-list
- - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus
- cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
+ - \p T - the type of object maintaining by free-list. \p T must be default constructible
+ - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus
+ \p cds::opt::allocator option, default is \p vyukov_queue_pool_traits
\b Internals
protected:
//@cond
+ typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator;
+ typedef typename cxx_allocator::allocator_type std_allocator;
+
queue_type m_Queue;
//@endcond
/// Deallocates all objects from the pool
~lazy_vyukov_queue_pool()
{
- allocator_type a;
- while ( !m_Queue.empty() ) {
- value_type * p = m_Queue.pop();
- a.deallocate( p, 1 );
- }
+ std_allocator a;
+ while ( !m_Queue.empty())
+ a.deallocate( m_Queue.pop(), 1 );
}
/// Allocates an object from pool
/**
The pool supports allocation only single object (\p n = 1).
- If \p n > 1 the behaviour is undefined.
+ If \p n > 1 the behavior is undefined.
If the queue is not empty, the popped value is returned.
Otherwise, a new value allocated.
value_type * allocate( size_t n )
{
assert( n == 1 );
+ CDS_UNUSED(n);
value_type * p = m_Queue.pop();
if ( p )
- return p;
+ return new( p ) value_type;
- return allocator_type().allocate( n );
+ return cxx_allocator().New();
}
- /// Deallocated the object \p p
+ /// Deallocates the object \p p
/**
The pool supports allocation only single object (\p n = 1).
If \p n > 1 the behaviour is undefined.
void deallocate( value_type * p, size_t n )
{
assert( n == 1 );
+ CDS_UNUSED(n);
if ( p ) {
+ p->~value_type();
+ // Here we ignore false fullness state of the queue
if ( !m_Queue.push( *p ))
- allocator_type().deallocate( p, n );
+ std_allocator().deallocate( p, 1 );
}
}
};
- /// Bounded free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue
+ /// Bounded free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue
/** @ingroup cds_memory_pool
Template parameters:
- - \p T - the type of object maintaining by free-list
- - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus
- cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
+ - \p T - the type of object maintaining by free-list. \p T must be default-constructible
+ - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus
+ \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
\b Internals
// Pool of Foo object of size 1024.
struct pool_traits: public cds::memory::vyukov_queue_pool_traits
{
- typedef cds::opt::v::static_buffer< Foo, 1024 > buffer;
+ typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer;
};
typedef cds::memory::bounded_vyukov_queue_pool< Foo, pool_traits > pool_type;
static pool_type thePool;
template <typename T, typename Traits = vyukov_queue_pool_traits >
class bounded_vyukov_queue_pool
{
+ //@cond
+ struct internal_traits : public Traits {
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ //@endcond
public:
- typedef cds::intrusive::VyukovMPMCCycleQueue< T, Traits > queue_type ; ///< Queue type
+ typedef cds::intrusive::VyukovMPMCCycleQueue< T, internal_traits > queue_type ; ///< Queue type
public:
typedef T value_type; ///< Value type
typedef Traits traits; ///< Pool traits
typedef typename traits::allocator::template rebind<value_type>::other allocator_type ; ///< allocator type
+ typedef typename traits::back_off back_off; ///< back-off strategy
protected:
//@cond
+ typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator;
+ typedef typename cxx_allocator::allocator_type std_allocator;
+
queue_type m_Queue;
value_type * m_pFirst;
value_type * m_pLast;
//@cond
void preallocate_pool()
{
- m_pFirst = allocator_type().allocate( m_Queue.capacity() );
- m_pLast = m_pFirst + m_Queue.capacity();
+ size_t const nCount = m_Queue.capacity();
+ m_pFirst = std_allocator().allocate( nCount );
+ m_pLast = m_pFirst + nCount;
for ( value_type * p = m_pFirst; p < m_pLast; ++p )
CDS_VERIFY( m_Queue.push( *p )) ; // must be true
/**
\p nCapacity argument is the queue capacity. It should be passed
if the queue is based on dynamically-allocated buffer.
- See cds::intrusive::VyukovMPMCCycleQueue for explanation.
+ See \p cds::intrusive::VyukovMPMCCycleQueue for explanation.
*/
bounded_vyukov_queue_pool( size_t nCapacity = 0 )
: m_Queue( nCapacity )
~bounded_vyukov_queue_pool()
{
m_Queue.clear();
- allocator_type().deallocate( m_pFirst, m_Queue.capacity() );
+ std_allocator().deallocate( m_pFirst, m_Queue.capacity());
}
/// Allocates an object from pool
value_type * allocate( size_t n )
{
assert( n == 1 );
+ CDS_UNUSED( n );
value_type * p = m_Queue.pop();
- if ( p ) {
- assert( from_pool(p) );
- return p;
+
+ if ( !p ) {
+ back_off bkoff;
+ while ( m_Queue.size()) {
+ p = m_Queue.pop();
+ if ( p )
+ goto ok;
+ bkoff();
+ }
+
+ // The pool is empty
+ CDS_THROW_EXCEPTION( std::bad_alloc());
}
- throw std::bad_alloc();
+ ok:
+ assert( from_pool(p));
+ return p;
}
- /// Deallocated the object \p p
+ /// Deallocates the object \p p
/**
The pool supports allocation only single object (\p n = 1).
If \p n > 1 the behaviour is undefined.
- \p should be from preallocated pool.
+ \p p should be from preallocated pool.
*/
void deallocate( value_type * p, size_t n )
{
assert( n == 1 );
+ CDS_UNUSED( n );
if ( p ) {
assert( from_pool( p ));
- m_Queue.push( *p );
+ back_off bkoff;
+ // The queue can notify it is full but that is false fullness state
+ // So, we push in loop
+ while ( !m_Queue.push(*p))
+ bkoff();
}
}
};
}} // namespace cds::memory
-#endif // #ifndef __CDS_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H
+#endif // #ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H