- \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
- Example: declare \p %WeakRingBuffer with static iternal buffer of size 1024:
+ Example: declare \p %WeakRingBuffer with static iternal buffer for 1024 objects:
\code
typedef cds::container::WeakRingBuffer< Foo,
typename cds::container::weak_ringbuffer::make_traits<
There are a specialization \ref cds_nonintrusive_WeakRingBuffer_void "WeakRingBuffer<void, Traits>"
that is not a queue but a "memory pool" between producer and consumer threads.
- \p WeakRingBuffer<void> supports data of different size.
+ \p WeakRingBuffer<void> supports variable-sized data.
+
+ @warning: \p %WeakRingBuffer is developed for 64-bit architecture.
+ On 32-bit platform an integer overflow of internal counters is possible.
*/
template <typename T, typename Traits = weak_ringbuffer::traits>
class WeakRingBuffer: public cds::bounded_container
/// Single-producer single-consumer ring buffer for untyped variable-sized data
/** @ingroup cds_nonintrusive_queue
@anchor cds_nonintrusive_WeakRingBuffer_void
+
+ This SPSC ring-buffer is intended for data of variable size. The producer
+ allocates a buffer from ring, fill it with data and pushes them back to ring.
+ The consumer thread reads data from front-end and then pops them:
+ \code
+ // allocates 1M ring buffer
+ WeakRingBuffer<void> theRing( 1024 * 1024 );
+
+ void producer_thread()
+ {
+ // Get data of size N bytes
+ size_t size;
+ void* data;
+
+ while ( true ) {
+ // Get external data
+ std::tie( data, size ) = get_data();
+
+ if ( data == nullptr )
+ break;
+
+ // Allocates a buffer from the ring
+ void* buf = theRing.back( size );
+ if ( !buf ) {
+ std::cout << "The ring is full" << std::endl;
+ break;
+ }
+
+ memcpy( buf, data, size );
+
+ // Push data into the ring
+ theRing.push_back();
+ }
+ }
+
+ void consumer_thread()
+ {
+ while ( true ) {
+ auto buf = theRing.front();
+
+ if ( buf.first == nullptr ) {
+ std::cout << "The ring is empty" << std::endl;
+ break;
+ }
+
+ // Process data
+ process_data( buf.first, buf.second );
+
+ // Free buffer
+ theRing.pop_front();
+ }
+ }
+ \endcode
+
+ @warning: \p %WeakRingBuffer is developed for 64-bit architecture.
+ On 32-bit platform an integer overflow of internal counters is possible.
*/
#ifdef CDS_DOXYGEN_INVOKED
template <typename Traits = weak_ringbuffer::traits>
/// [producer] Reserve \p size bytes
void* back( size_t size )
{
+ assert( size > 0 );
+
// Any data is rounded to 8-byte boundary
size_t real_size = calc_real_size( size );
}
}
+ back_.store( back, memory_model::memory_order_release );
reserved = buffer_.buffer();
}
size = *reinterpret_cast<size_t*>( buf );
assert( !is_tail( size ) );
+ assert( buf == buffer_.buffer() );
}
#ifdef _DEBUG
}
#endif
- return std::make_pair( reinterpret_cast<void*>( buf + sizeof( size_t ) ), size );
+ return std::make_pair( reinterpret_cast<void*>( buf + sizeof( size_t )), size );
}
/// [consumer] Pops top data
assert( ( reinterpret_cast<uintptr_t>( buf ) & ( sizeof( uintptr_t ) - 1 ) ) == 0 );
size_t size = *reinterpret_cast<size_t*>( buf );
- assert( !is_tail( size ) );
-
- size_t real_size = calc_real_size( size );
+ size_t real_size = calc_real_size( untail( size ));
#ifdef _DEBUG
if ( cback_ - front < real_size ) {
}
private:
+ //@cond
static size_t calc_real_size( size_t size )
{
size_t real_size = (( size + sizeof( uintptr_t ) - 1 ) & ~( sizeof( uintptr_t ) - 1 )) + sizeof( size_t );
return size | ( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 ));
}
+ static size_t untail( size_t size )
+ {
+ return size & (( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 ) ) - 1);
+ }
+ //@endcond
+
private:
//@cond
atomics::atomic<size_t> front_;
ASSERT_CONTAINER_SIZE( q, 0 );
// enqueue/dequeue
- for ( size_t i = 0; i < nSize; ++i ) {
- it = static_cast<value_type>(i);
- ASSERT_TRUE( q.enqueue( it ));
- ASSERT_CONTAINER_SIZE( q, i + 1 );
- }
- ASSERT_FALSE( q.empty());
- ASSERT_CONTAINER_SIZE( q, nSize );
- ASSERT_FALSE( q.enqueue( static_cast<value_type>( nSize ) * 2 ) );
+ for ( unsigned pass = 0; pass < 3; ++pass ) {
+ for ( size_t i = 0; i < nSize; ++i ) {
+ it = static_cast<value_type>( i );
+ ASSERT_TRUE( q.enqueue( it ) );
+ ASSERT_CONTAINER_SIZE( q, i + 1 );
+ }
+ ASSERT_FALSE( q.empty() );
+ ASSERT_CONTAINER_SIZE( q, nSize );
+ ASSERT_FALSE( q.enqueue( static_cast<value_type>( nSize ) * 2 ) );
- for ( size_t i = 0; i < nSize; ++i ) {
- it = -1;
- ASSERT_TRUE( q.dequeue( it ));
- ASSERT_EQ( it, static_cast<value_type>( i ));
- ASSERT_CONTAINER_SIZE( q, nSize - i - 1 );
+ for ( size_t i = 0; i < nSize; ++i ) {
+ it = -1;
+ ASSERT_TRUE( q.dequeue( it ) );
+ ASSERT_EQ( it, static_cast<value_type>( i ) );
+ ASSERT_CONTAINER_SIZE( q, nSize - i - 1 );
+ }
+ ASSERT_TRUE( q.empty() );
+ ASSERT_CONTAINER_SIZE( q, 0 );
}
- ASSERT_TRUE( q.empty());
- ASSERT_CONTAINER_SIZE( q, 0 );
// push/pop
- for ( size_t i = 0; i < nSize; ++i ) {
- it = static_cast<value_type>(i);
- ASSERT_TRUE( q.push( it ));
- ASSERT_CONTAINER_SIZE( q, i + 1 );
- }
- ASSERT_FALSE( q.empty());
- ASSERT_CONTAINER_SIZE( q, nSize );
+ for ( unsigned pass = 0; pass < 3; ++pass ) {
+ for ( size_t i = 0; i < nSize; ++i ) {
+ it = static_cast<value_type>( i );
+ ASSERT_TRUE( q.push( it ) );
+ ASSERT_CONTAINER_SIZE( q, i + 1 );
+ }
+ ASSERT_FALSE( q.empty() );
+ ASSERT_CONTAINER_SIZE( q, nSize );
- for ( size_t i = 0; i < nSize; ++i ) {
- it = -1;
- ASSERT_TRUE( q.pop( it ));
- ASSERT_EQ( it, static_cast<value_type>( i ));
- ASSERT_CONTAINER_SIZE( q, nSize - i - 1 );
+ for ( size_t i = 0; i < nSize; ++i ) {
+ it = -1;
+ ASSERT_TRUE( q.pop( it ) );
+ ASSERT_EQ( it, static_cast<value_type>( i ) );
+ ASSERT_CONTAINER_SIZE( q, nSize - i - 1 );
+ }
+ ASSERT_TRUE( q.empty() );
+ ASSERT_CONTAINER_SIZE( q, 0 );
}
- ASSERT_TRUE( q.empty());
- ASSERT_CONTAINER_SIZE( q, 0 );
// push/pop with lambda
- for ( size_t i = 0; i < nSize; ++i ) {
- it = static_cast<value_type>(i);
- ASSERT_NE( it, -1 );
- auto f = [&it]( value_type& dest ) { dest = it; it = -1; };
- if ( i & 1 )
- ASSERT_TRUE( q.enqueue_with( f ));
- else
- ASSERT_TRUE( q.push_with( f ));
- ASSERT_EQ( it, -1 );
- ASSERT_CONTAINER_SIZE( q, i + 1 );
- }
- ASSERT_FALSE( q.empty());
- ASSERT_CONTAINER_SIZE( q, nSize );
+ for ( unsigned pass = 0; pass < 3; ++pass ) {
+ for ( size_t i = 0; i < nSize; ++i ) {
+ it = static_cast<value_type>( i );
+ ASSERT_NE( it, -1 );
+ auto f = [&it]( value_type& dest ) { dest = it; it = -1; };
+ if ( i & 1 )
+ ASSERT_TRUE( q.enqueue_with( f ) );
+ else
+ ASSERT_TRUE( q.push_with( f ) );
+ ASSERT_EQ( it, -1 );
+ ASSERT_CONTAINER_SIZE( q, i + 1 );
+ }
+ ASSERT_FALSE( q.empty() );
+ ASSERT_CONTAINER_SIZE( q, nSize );
- for ( size_t i = 0; i < nSize; ++i ) {
- it = -1;
- auto f = [&it]( value_type& src ) { it = src; src = -1; };
- if ( i & 1 )
- ASSERT_TRUE( q.pop_with( f ));
- else
- ASSERT_TRUE( q.dequeue_with( f ));
- ASSERT_EQ( it, static_cast<value_type>( i ));
- ASSERT_CONTAINER_SIZE( q, nSize - i - 1 );
+ for ( size_t i = 0; i < nSize; ++i ) {
+ it = -1;
+ auto f = [&it]( value_type& src ) { it = src; src = -1; };
+ if ( i & 1 )
+ ASSERT_TRUE( q.pop_with( f ) );
+ else
+ ASSERT_TRUE( q.dequeue_with( f ) );
+ ASSERT_EQ( it, static_cast<value_type>( i ) );
+ ASSERT_CONTAINER_SIZE( q, nSize - i - 1 );
+ }
+ ASSERT_TRUE( q.empty() );
+ ASSERT_CONTAINER_SIZE( q, 0u );
}
- ASSERT_TRUE( q.empty());
- ASSERT_CONTAINER_SIZE( q, 0u );
for ( size_t i = 0; i < nSize; ++i ) {
ASSERT_TRUE( q.push( static_cast<value_type>(i)));
#include "test_bounded_queue.h"
#include <cds/container/weak_ringbuffer.h>
+#include <cds_test/fixture.h>
namespace {
namespace cc = cds::container;
{
value_type el[nArrSize];
- // batch push
- for ( size_t i = 0; i < nSize; i += nArrSize ) {
- for ( size_t k = 0; k < nArrSize; ++k )
- el[k] = static_cast<value_type>( i + k );
+ for ( unsigned pass = 0; pass < 3; ++pass ) {
+ // batch push
+ for ( size_t i = 0; i < nSize; i += nArrSize ) {
+ for ( size_t k = 0; k < nArrSize; ++k )
+ el[k] = static_cast<value_type>( i + k );
+
+ if ( i + nArrSize <= nSize ) {
+ ASSERT_TRUE( q.push( el, nArrSize ) );
+ }
+ else {
+ ASSERT_FALSE( q.push( el, nArrSize ) );
+ }
+ }
- if ( i + nArrSize <= nSize ) {
- ASSERT_TRUE( q.push( el, nArrSize ) );
+ ASSERT_TRUE( !q.empty() );
+ if ( nSize % nArrSize != 0 ) {
+ ASSERT_FALSE( q.full() );
+ ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize );
+ for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) {
+ ASSERT_TRUE( q.enqueue( static_cast<value_type>( i ) ) );
+ }
}
- else {
- ASSERT_FALSE( q.push( el, nArrSize ) );
+ ASSERT_TRUE( q.full() );
+ ASSERT_CONTAINER_SIZE( q, nSize );
+
+ // batch pop
+ value_type expected = 0;
+ while ( q.pop( el, nArrSize ) ) {
+ for ( size_t i = 0; i < nArrSize; ++i ) {
+ ASSERT_EQ( el[i], expected );
+ ++expected;
+ }
}
- }
- ASSERT_TRUE( !q.empty() );
- if ( nSize % nArrSize != 0 ) {
- ASSERT_FALSE( q.full() );
- ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize );
- for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) {
- ASSERT_TRUE( q.enqueue( static_cast<value_type>( i ) ) );
+ if ( nSize % nArrSize == 0 ) {
+ ASSERT_TRUE( q.empty() );
}
- }
- ASSERT_TRUE( q.full() );
- ASSERT_CONTAINER_SIZE( q, nSize );
-
- // batch pop
- value_type expected = 0;
- while ( q.pop( el, nArrSize ) ) {
- for ( size_t i = 0; i < nArrSize; ++i ) {
- ASSERT_EQ( el[i], expected );
- ++expected;
+ else {
+ ASSERT_FALSE( q.empty() );
+ ASSERT_CONTAINER_SIZE( q, nSize % nArrSize );
+ q.clear();
}
- }
-
- if ( nSize % nArrSize == 0 ) {
ASSERT_TRUE( q.empty() );
+ ASSERT_FALSE( q.full() );
+ ASSERT_CONTAINER_SIZE( q, 0u );
}
- else {
- ASSERT_FALSE( q.empty() );
- ASSERT_CONTAINER_SIZE( q, nSize % nArrSize );
- q.clear();
- }
- ASSERT_TRUE( q.empty() );
- ASSERT_FALSE( q.full() );
- ASSERT_CONTAINER_SIZE( q, 0u );
}
{
size_t el[nArrSize];
auto func_push = []( value_type& dest, size_t src ) { dest = static_cast<value_type>( src * 10 ); };
- for ( size_t i = 0; i < nSize; i += nArrSize ) {
- for ( size_t k = 0; k < nArrSize; ++k )
- el[k] = i + k;
- if ( i + nArrSize <= nSize ) {
- ASSERT_TRUE( q.push( el, nArrSize, func_push ) );
+ for ( unsigned pass = 0; pass < 3; ++pass ) {
+ for ( size_t i = 0; i < nSize; i += nArrSize ) {
+ for ( size_t k = 0; k < nArrSize; ++k )
+ el[k] = i + k;
+
+ if ( i + nArrSize <= nSize ) {
+ ASSERT_TRUE( q.push( el, nArrSize, func_push ) );
+ }
+ else {
+ ASSERT_FALSE( q.push( el, nArrSize, func_push ) );
+ }
}
- else {
- ASSERT_FALSE( q.push( el, nArrSize, func_push ) );
- }
- }
- ASSERT_TRUE( !q.empty() );
- if ( nSize % nArrSize != 0 ) {
- ASSERT_FALSE( q.full() );
- ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize );
- for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) {
- ASSERT_TRUE( q.push( &i, 1, func_push ) );
+ ASSERT_TRUE( !q.empty() );
+ if ( nSize % nArrSize != 0 ) {
+ ASSERT_FALSE( q.full() );
+ ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize );
+ for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) {
+ ASSERT_TRUE( q.push( &i, 1, func_push ) );
+ }
}
- }
- ASSERT_TRUE( q.full() );
- ASSERT_CONTAINER_SIZE( q, nSize );
-
- // batch pop with functor
- auto func_pop = []( size_t& dest, value_type src ) { dest = static_cast<size_t>( src / 10 ); };
- size_t expected = 0;
- while ( q.pop( el, nArrSize, func_pop ) ) {
- for ( size_t i = 0; i < nArrSize; ++i ) {
- ASSERT_EQ( el[i], expected );
- ++expected;
+ ASSERT_TRUE( q.full() );
+ ASSERT_CONTAINER_SIZE( q, nSize );
+
+ // batch pop with functor
+ auto func_pop = []( size_t& dest, value_type src ) { dest = static_cast<size_t>( src / 10 ); };
+ size_t expected = 0;
+ while ( q.pop( el, nArrSize, func_pop ) ) {
+ for ( size_t i = 0; i < nArrSize; ++i ) {
+ ASSERT_EQ( el[i], expected );
+ ++expected;
+ }
}
- }
- if ( nSize % nArrSize == 0 ) {
- ASSERT_TRUE( q.empty() );
- }
- else {
- ASSERT_FALSE( q.empty() );
- ASSERT_CONTAINER_SIZE( q, nSize % nArrSize );
- size_t v;
- while ( q.pop( &v, 1, func_pop ) ) {
- ASSERT_EQ( v, expected );
- ++expected;
+ if ( nSize % nArrSize == 0 ) {
+ ASSERT_TRUE( q.empty() );
}
+ else {
+ ASSERT_FALSE( q.empty() );
+ ASSERT_CONTAINER_SIZE( q, nSize % nArrSize );
+ size_t v;
+ while ( q.pop( &v, 1, func_pop ) ) {
+ ASSERT_EQ( v, expected );
+ ++expected;
+ }
+ }
+ ASSERT_TRUE( q.empty() );
+ ASSERT_FALSE( q.full() );
+ ASSERT_CONTAINER_SIZE( q, 0u );
}
- ASSERT_TRUE( q.empty() );
- ASSERT_FALSE( q.full() );
- ASSERT_CONTAINER_SIZE( q, 0u );
// front/pop_front
- for ( size_t i = 0; i < nSize; i += nArrSize ) {
- for ( size_t k = 0; k < nArrSize; ++k )
- el[k] = i + k;
+ for ( unsigned pass = 0; pass < 3; ++pass ) {
+ for ( size_t i = 0; i < nSize; i += nArrSize ) {
+ for ( size_t k = 0; k < nArrSize; ++k )
+ el[k] = i + k;
+
+ if ( i + nArrSize <= nSize ) {
+ ASSERT_TRUE( q.push( el, nArrSize, func_push ) );
+ }
+ else {
+ ASSERT_FALSE( q.push( el, nArrSize, func_push ) );
+ }
+ }
- if ( i + nArrSize <= nSize ) {
- ASSERT_TRUE( q.push( el, nArrSize, func_push ) );
+ ASSERT_TRUE( !q.empty() );
+ if ( nSize % nArrSize != 0 ) {
+ ASSERT_FALSE( q.full() );
+ ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize );
+ for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) {
+ ASSERT_TRUE( q.push( &i, 1, func_push ) );
+ }
}
- else {
- ASSERT_FALSE( q.push( el, nArrSize, func_push ) );
+ ASSERT_TRUE( q.full() );
+ ASSERT_CONTAINER_SIZE( q, nSize );
+
+ value_type cur = 0;
+ while ( !q.empty() ) {
+ value_type* front = q.front();
+ ASSERT_TRUE( front != nullptr );
+ ASSERT_EQ( cur, *front );
+ ASSERT_TRUE( q.pop_front() );
+ cur += 10;
}
- }
- ASSERT_TRUE( !q.empty() );
- if ( nSize % nArrSize != 0 ) {
- ASSERT_FALSE( q.full() );
- ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize );
- for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) {
- ASSERT_TRUE( q.push( &i, 1, func_push ) );
- }
- }
- ASSERT_TRUE( q.full() );
- ASSERT_CONTAINER_SIZE( q, nSize );
-
- value_type cur = 0;
- while ( !q.empty() ) {
- value_type* front = q.front();
- ASSERT_TRUE( front != nullptr );
- ASSERT_EQ( cur, *front );
- ASSERT_TRUE( q.pop_front() );
- cur += 10;
+ ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.front() == nullptr );
+ ASSERT_FALSE( q.pop_front() );
}
+ }
+ }
+
+ template <typename Queue>
+ void test_varsize_buffer( Queue& q )
+ {
+ size_t const capacity = q.capacity();
+
+ ASSERT_TRUE( q.empty() );
+ ASSERT_EQ( q.size(), 0u );
+ ASSERT_TRUE( q.front().first == nullptr );
+ ASSERT_FALSE( q.pop_front() );
+
+ size_t total_push = 0;
+ uint8_t chfill = 0;
+ while ( total_push < capacity * 4 ) {
+ unsigned buf_size = cds_test::fixture::rand( static_cast<unsigned>( capacity / 4 )) + 1;
+ total_push += buf_size;
+
+ void* buf = q.back( buf_size );
+ ASSERT_TRUE( buf != nullptr );
+
+ memset( buf, chfill, buf_size );
+ q.push_back();
+
+ ASSERT_GE( q.size(), buf_size );
+
+ auto pair = q.front();
+ ASSERT_TRUE( pair.first != nullptr );
+ ASSERT_EQ( pair.second, buf_size );
+ for ( size_t i = 0; i < pair.second; ++i )
+ ASSERT_EQ( *reinterpret_cast<uint8_t*>( pair.first ), chfill );
- ASSERT_TRUE( q.empty() );
- ASSERT_TRUE( q.front() == nullptr );
+ ASSERT_TRUE( q.pop_front() );
ASSERT_FALSE( q.pop_front() );
}
+
+ ASSERT_TRUE( q.empty() );
+ ASSERT_EQ( q.size(), 0u );
+ ASSERT_TRUE( q.front().first == nullptr );
+ ASSERT_FALSE( q.pop_front() );
}
};
test_array( q );
}
+ TEST_F( WeakRingBuffer, var_sized )
+ {
+ typedef cds::container::WeakRingBuffer< void > test_queue;
+
+ test_queue q( 1024 * 64 );
+ test_varsize_buffer( q );
+ }
+
+ TEST_F( WeakRingBuffer, var_sized_static )
+ {
+ struct traits: public cds::container::weak_ringbuffer::traits
+ {
+ typedef cds::opt::v::uninitialized_static_buffer<int, 1024 * 64> buffer;
+ };
+ typedef cds::container::WeakRingBuffer< void, traits > test_queue;
+
+ test_queue q;
+ test_varsize_buffer( q );
+ }
+
} // namespace
\ No newline at end of file