\p WeakRingBuffer<void> supports variable-sized data.
@warning: \p %WeakRingBuffer is developed for 64-bit architecture.
- On 32-bit platform an integer overflow of internal counters is possible.
+ 32-bit platform must provide support for 64-bit atomics.
*/
template <typename T, typename Traits = weak_ringbuffer::traits>
class WeakRingBuffer: public cds::bounded_container
private:
//@cond
typedef typename traits::buffer::template rebind< value_type >::other buffer;
+ typedef uint64_t counter_type;
//@endcond
public:
~WeakRingBuffer()
{
value_cleaner cleaner;
- size_t back = back_.load( memory_model::memory_order_relaxed );
- for ( size_t front = front_.load( memory_model::memory_order_relaxed ); front != back; ++front )
+ counter_type back = back_.load( memory_model::memory_order_relaxed );
+ for ( counter_type front = front_.load( memory_model::memory_order_relaxed ); front != back; ++front )
cleaner( buffer_[ buffer_.mod( front ) ] );
}
bool push( Q* arr, size_t count, CopyFunc copy )
{
assert( count < capacity() );
- size_t back = back_.load( memory_model::memory_order_relaxed );
+ counter_type back = back_.load( memory_model::memory_order_relaxed );
- assert( back - pfront_ <= capacity() );
+ assert( static_cast<size_t>( back - pfront_ ) <= capacity() );
- if ( pfront_ + capacity() - back < count ) {
+ if ( static_cast<size_t>( pfront_ + capacity() - back ) < count ) {
pfront_ = front_.load( memory_model::memory_order_acquire );
- if ( pfront_ + capacity() - back < count ) {
+ if ( static_cast<size_t>( pfront_ + capacity() - back ) < count ) {
// not enough space
return false;
}
typename std::enable_if< std::is_constructible<value_type, Args...>::value, bool>::type
emplace( Args&&... args )
{
- size_t back = back_.load( memory_model::memory_order_relaxed );
+ counter_type back = back_.load( memory_model::memory_order_relaxed );
- assert( back - pfront_ <= capacity() );
+ assert( static_cast<size_t>( back - pfront_ ) <= capacity() );
if ( pfront_ + capacity() - back < 1 ) {
pfront_ = front_.load( memory_model::memory_order_acquire );
template <typename Func>
bool enqueue_with( Func f )
{
- size_t back = back_.load( memory_model::memory_order_relaxed );
+ counter_type back = back_.load( memory_model::memory_order_relaxed );
- assert( back - pfront_ <= capacity() );
+ assert( static_cast<size_t>( back - pfront_ ) <= capacity() );
if ( pfront_ + capacity() - back < 1 ) {
pfront_ = front_.load( memory_model::memory_order_acquire );
{
assert( count < capacity() );
- size_t front = front_.load( memory_model::memory_order_relaxed );
- assert( cback_ - front < capacity() );
+ counter_type front = front_.load( memory_model::memory_order_relaxed );
+ assert( static_cast<size_t>( cback_ - front ) < capacity() );
- if ( cback_ - front < count ) {
+ if ( static_cast<size_t>( cback_ - front ) < count ) {
cback_ = back_.load( memory_model::memory_order_acquire );
- if ( cback_ - front < count )
+ if ( static_cast<size_t>( cback_ - front ) < count )
return false;
}
template <typename Func>
bool dequeue_with( Func f )
{
- size_t front = front_.load( memory_model::memory_order_relaxed );
- assert( cback_ - front < capacity() );
+ counter_type front = front_.load( memory_model::memory_order_relaxed );
+ assert( static_cast<size_t>( cback_ - front ) < capacity() );
if ( cback_ - front < 1 ) {
cback_ = back_.load( memory_model::memory_order_acquire );
*/
value_type* front()
{
- size_t front = front_.load( memory_model::memory_order_relaxed );
- assert( cback_ - front < capacity() );
+ counter_type front = front_.load( memory_model::memory_order_relaxed );
+ assert( static_cast<size_t>( cback_ - front ) < capacity() );
if ( cback_ - front < 1 ) {
cback_ = back_.load( memory_model::memory_order_acquire );
*/
bool pop_front()
{
- size_t front = front_.load( memory_model::memory_order_relaxed );
- assert( cback_ - front <= capacity() );
+ counter_type front = front_.load( memory_model::memory_order_relaxed );
+ assert( static_cast<size_t>( cback_ - front ) <= capacity() );
if ( cback_ - front < 1 ) {
cback_ = back_.load( memory_model::memory_order_acquire );
/// Returns the current size of ring buffer
size_t size() const
{
- return back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed );
+ return static_cast<size_t>( back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed ));
}
/// Returns capacity of the ring buffer
private:
//@cond
- atomics::atomic<size_t> front_;
- typename opt::details::apply_padding< atomics::atomic<size_t>, traits::padding >::padding_type pad1_;
- atomics::atomic<size_t> back_;
- typename opt::details::apply_padding< atomics::atomic<size_t>, traits::padding >::padding_type pad2_;
- size_t pfront_;
- typename opt::details::apply_padding< size_t, traits::padding >::padding_type pad3_;
- size_t cback_;
- typename opt::details::apply_padding< size_t, traits::padding >::padding_type pad4_;
+ atomics::atomic<counter_type> front_;
+ typename opt::details::apply_padding< atomics::atomic<counter_type>, traits::padding >::padding_type pad1_;
+ atomics::atomic<counter_type> back_;
+ typename opt::details::apply_padding< atomics::atomic<counter_type>, traits::padding >::padding_type pad2_;
+ counter_type pfront_;
+ typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad3_;
+ counter_type cback_;
+ typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad4_;
buffer buffer_;
//@endcond
\endcode
@warning: \p %WeakRingBuffer is developed for 64-bit architecture.
- On 32-bit platform an integer overflow of internal counters is possible.
+ 32-bit platform must provide support for 64-bit atomics.
*/
#ifdef CDS_DOXYGEN_INVOKED
template <typename Traits = weak_ringbuffer::traits>
private:
//@cond
typedef typename traits::buffer::template rebind< uint8_t >::other buffer;
+ typedef uint64_t counter_type;
//@endcond
public:
// check if we can reserve read_size bytes
assert( real_size < capacity() );
- size_t back = back_.load( memory_model::memory_order_relaxed );
+ counter_type back = back_.load( memory_model::memory_order_relaxed );
- assert( back - pfront_ <= capacity() );
+ assert( static_cast<size_t>( back - pfront_ ) <= capacity() );
- if ( pfront_ + capacity() - back < real_size ) {
+ if ( static_cast<size_t>( pfront_ + capacity() - back ) < real_size ) {
pfront_ = front_.load( memory_model::memory_order_acquire );
- if ( pfront_ + capacity() - back < real_size ) {
+ if ( static_cast<size_t>( pfront_ + capacity() - back ) < real_size ) {
// not enough space
return nullptr;
}
uint8_t* reserved = buffer_.buffer() + buffer_.mod( back );
// Check if the buffer free space is enough for storing real_size bytes
- size_t tail_size = capacity() - buffer_.mod( back );
+ size_t tail_size = capacity() - static_cast<size_t>( buffer_.mod( back ));
if ( tail_size < real_size ) {
// make unused tail
assert( tail_size >= sizeof( size_t ) );
// We must be in beginning of buffer
assert( buffer_.mod( back ) == 0 );
- if ( pfront_ + capacity() - back < real_size ) {
+ if ( static_cast<size_t>( pfront_ + capacity() - back ) < real_size ) {
pfront_ = front_.load( memory_model::memory_order_acquire );
- if ( pfront_ + capacity() - back < real_size ) {
+ if ( static_cast<size_t>( pfront_ + capacity() - back ) < real_size ) {
// not enough space
return nullptr;
}
*/
void push_back()
{
- size_t back = back_.load( memory_model::memory_order_relaxed );
+ counter_type back = back_.load( memory_model::memory_order_relaxed );
uint8_t* reserved = buffer_.buffer() + buffer_.mod( back );
size_t real_size = calc_real_size( *reinterpret_cast<size_t*>( reserved ) );
*/
std::pair<void*, size_t> front()
{
- size_t front = front_.load( memory_model::memory_order_relaxed );
- assert( cback_ - front < capacity() );
+ counter_type front = front_.load( memory_model::memory_order_relaxed );
+ assert( static_cast<size_t>( cback_ - front ) < capacity() );
if ( cback_ - front < sizeof( size_t )) {
cback_ = back_.load( memory_model::memory_order_acquire );
#ifdef _DEBUG
size_t real_size = calc_real_size( size );
- if ( cback_ - front < real_size ) {
+ if ( static_cast<size_t>( cback_ - front ) < real_size ) {
cback_ = back_.load( memory_model::memory_order_acquire );
- assert( cback_ - front >= real_size );
+ assert( static_cast<size_t>( cback_ - front ) >= real_size );
}
#endif
*/
bool pop_front()
{
- size_t front = front_.load( memory_model::memory_order_relaxed );
- assert( cback_ - front <= capacity() );
+ counter_type front = front_.load( memory_model::memory_order_relaxed );
+ assert( static_cast<size_t>( cback_ - front ) <= capacity() );
if ( cback_ - front < sizeof(size_t) ) {
cback_ = back_.load( memory_model::memory_order_acquire );
size_t real_size = calc_real_size( untail( size ));
#ifdef _DEBUG
- if ( cback_ - front < real_size ) {
+ if ( static_cast<size_t>( cback_ - front ) < real_size ) {
cback_ = back_.load( memory_model::memory_order_acquire );
- assert( cback_ - front >= real_size );
+ assert( static_cast<size_t>( cback_ - front ) >= real_size );
}
#endif
/// Returns the current size of ring buffer
size_t size() const
{
- return back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed );
+ return static_cast<size_t>( back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed ));
}
/// Returns capacity of the ring buffer
private:
//@cond
- atomics::atomic<size_t> front_;
- typename opt::details::apply_padding< atomics::atomic<size_t>, traits::padding >::padding_type pad1_;
- atomics::atomic<size_t> back_;
- typename opt::details::apply_padding< atomics::atomic<size_t>, traits::padding >::padding_type pad2_;
- size_t pfront_;
- typename opt::details::apply_padding< size_t, traits::padding >::padding_type pad3_;
- size_t cback_;
- typename opt::details::apply_padding< size_t, traits::padding >::padding_type pad4_;
+ atomics::atomic<counter_type> front_;
+ typename opt::details::apply_padding< atomics::atomic<counter_type>, traits::padding >::padding_type pad1_;
+ atomics::atomic<counter_type> back_;
+ typename opt::details::apply_padding< atomics::atomic<counter_type>, traits::padding >::padding_type pad2_;
+ counter_type pfront_;
+ typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad3_;
+ counter_type cback_;
+ typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad4_;
buffer buffer_;
//@endcond