-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
#ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H
#define CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H
#include <cds/details/allocator.h>
#include <cds/intrusive/vyukov_mpmc_cycle_queue.h>
+#include <cds/details/throw_exception.h>
namespace cds { namespace memory {
{
/// Allocator type
typedef CDS_DEFAULT_ALLOCATOR allocator;
-
- /// Back-off stratey
- typedef cds::backoff::yield back_off;
};
/// Free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue
/** @ingroup cds_memory_pool
Template parameters:
- - \p T - the type of object maintaining by free-list
+ - \p T - the type of object maintaining by free-list. \p T must be default constructible.
- \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus
\p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
// Pool of Foo object of size 1024.
struct pool_traits: public cds::memory::vyukov_queue_pool_traits
{
- typedef cds::opt::v::static_buffer< Foo, 1024 > buffer;
+ typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer;
};
typedef cds::memory::vyukov_queue_pool< Foo, pool_traits > pool_type;
static pool_type thePool;
protected:
//@cond
typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator;
+ typedef typename cxx_allocator::allocator_type std_allocator;
queue_type m_Queue;
value_type * m_pFirst;
//@cond
void preallocate_pool()
{
- m_pFirst = cxx_allocator().NewArray( m_Queue.capacity() );
+ m_pFirst = std_allocator().allocate( m_Queue.capacity());
m_pLast = m_pFirst + m_Queue.capacity();
for ( value_type * p = m_pFirst; p < m_pLast; ++p ) {
~vyukov_queue_pool()
{
m_Queue.clear();
- cxx_allocator().Delete( m_pFirst, m_Queue.capacity());
+ std_allocator().deallocate( m_pFirst, m_Queue.capacity());
}
/// Allocates an object from pool
/**
The pool supports allocation only single object (\p n = 1).
- If \p n > 1 the behaviour is undefined.
+ If \p n > 1 the behavior is undefined.
If the queue is not empty, the popped value is returned.
Otherwise, a new value allocated.
value_type * p = m_Queue.pop();
if ( p ) {
- assert( from_pool(p) );
- return p;
+ assert( from_pool(p));
+ return new( p ) value_type;
}
-
+ // The pool is empty - allocate new from the heap
return cxx_allocator().New();
}
/// Deallocated the object \p p
/**
The pool supports allocation only single object (\p n = 1).
- If \p n > 1 the behaviour is undefined.
+ If \p n > 1 the behavior is undefined.
If \p p is from preallocated pool, it pushes into the queue.
Otherwise, \p p is deallocated by allocator provided.
CDS_UNUSED(n);
if ( p ) {
- if ( from_pool(p) ) {
+ if ( from_pool(p)) {
+ p->~value_type();
// The queue can notify about false fullness state
- // so we push in loop
+ // so we push in loop
back_off bkoff;
while ( !m_Queue.push( *p ))
bkoff();
};
- /// Lazy free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue
+ /// Lazy free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue
/** @ingroup cds_memory_pool
Template parameters:
- - \p T - the type of object maintaining by free-list
- - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus
- cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
+ - \p T - the type of object maintaining by free-list. \p T must be default constructible
+ - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus
+ \p cds::opt::allocator option, default is \p vyukov_queue_pool_traits
\b Internals
protected:
//@cond
typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator;
+ typedef typename cxx_allocator::allocator_type std_allocator;
queue_type m_Queue;
//@endcond
/// Deallocates all objects from the pool
~lazy_vyukov_queue_pool()
{
- cxx_allocator a;
- while ( !m_Queue.empty() )
- a.Delete( m_Queue.pop());
+ std_allocator a;
+ while ( !m_Queue.empty())
+ a.deallocate( m_Queue.pop(), 1 );
}
/// Allocates an object from pool
/**
The pool supports allocation only single object (\p n = 1).
- If \p n > 1 the behaviour is undefined.
+ If \p n > 1 the behavior is undefined.
If the queue is not empty, the popped value is returned.
Otherwise, a new value allocated.
value_type * p = m_Queue.pop();
if ( p )
- return p;
+ return new( p ) value_type;
return cxx_allocator().New();
}
- /// Deallocated the object \p p
+ /// Deallocates the object \p p
/**
The pool supports allocation only single object (\p n = 1).
If \p n > 1 the behaviour is undefined.
CDS_UNUSED(n);
if ( p ) {
+ p->~value_type();
// Here we ignore false fullness state of the queue
if ( !m_Queue.push( *p ))
- cxx_allocator().Delete( p );
+ std_allocator().deallocate( p, 1 );
}
}
};
- /// Bounded free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue
+ /// Bounded free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue
/** @ingroup cds_memory_pool
Template parameters:
- - \p T - the type of object maintaining by free-list
- - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus
- cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
+ - \p T - the type of object maintaining by free-list. \p T must be default-constructible
+ - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus
+ \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
\b Internals
// Pool of Foo object of size 1024.
struct pool_traits: public cds::memory::vyukov_queue_pool_traits
{
- typedef cds::opt::v::static_buffer< Foo, 1024 > buffer;
+ typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer;
};
typedef cds::memory::bounded_vyukov_queue_pool< Foo, pool_traits > pool_type;
static pool_type thePool;
template <typename T, typename Traits = vyukov_queue_pool_traits >
class bounded_vyukov_queue_pool
{
+ //@cond
+ struct internal_traits : public Traits {
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ //@endcond
public:
- typedef cds::intrusive::VyukovMPMCCycleQueue< T, Traits > queue_type ; ///< Queue type
+ typedef cds::intrusive::VyukovMPMCCycleQueue< T, internal_traits > queue_type ; ///< Queue type
public:
typedef T value_type; ///< Value type
protected:
//@cond
- typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator;
+ typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator;
+ typedef typename cxx_allocator::allocator_type std_allocator;
queue_type m_Queue;
value_type * m_pFirst;
void preallocate_pool()
{
size_t const nCount = m_Queue.capacity();
- m_pFirst = cxx_allocator().NewArray( nCount );
+ m_pFirst = std_allocator().allocate( nCount );
m_pLast = m_pFirst + nCount;
for ( value_type * p = m_pFirst; p < m_pLast; ++p )
~bounded_vyukov_queue_pool()
{
m_Queue.clear();
- cxx_allocator().Delete( m_pFirst, m_Queue.capacity() );
+ std_allocator().deallocate( m_pFirst, m_Queue.capacity());
}
/// Allocates an object from pool
CDS_UNUSED( n );
value_type * p = m_Queue.pop();
- if ( p ) {
- assert( from_pool(p) );
- return p;
+
+ if ( !p ) {
+ back_off bkoff;
+ while ( m_Queue.size()) {
+ p = m_Queue.pop();
+ if ( p )
+ goto ok;
+ bkoff();
+ }
+
+ // The pool is empty
+ CDS_THROW_EXCEPTION( std::bad_alloc());
}
- throw std::bad_alloc();
+ ok:
+ assert( from_pool(p));
+ return p;
}
- /// Deallocated the object \p p
+ /// Deallocates the object \p p
/**
The pool supports allocation only single object (\p n = 1).
If \p n > 1 the behaviour is undefined.
- \p should be from preallocated pool.
+ \p p should be from preallocated pool.
*/
void deallocate( value_type * p, size_t n )
{
back_off bkoff;
// The queue can notify it is full but that is false fullness state
// So, we push in loop
- while ( !m_Queue.push(*p) )
+ while ( !m_Queue.push(*p))
bkoff();
}
}