/// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification
enum { alignment = opt::cache_line_alignment };
+ /// Padding of segment data, default is no special padding
+ /** @copydetails cds::intrusive::segmented_queue::traits::padding
+ */
+ enum { padding = cds::intrusive::segmented_queue::traits::padding };
+
/// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR
typedef CDS_DEFAULT_ALLOCATOR allocator;
- \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering.
See option description for the full list of possible models
- \p opt::alignment - the alignment of critical data, see option description for explanation
+ - \p opt::padding - the padding of segment data, default no special padding.
+ See \p traits::padding for explanation.
- \p opt::allocator - the allocator used to maintain segments.
- \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated
segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable.
/// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification
enum { alignment = opt::cache_line_alignment };
+ /// Padding of segment data, default is no special padding
+ /**
+ The segment is just an array of atomic data pointers,
+ so, the high load leads to false sharing and performance degradation.
+ A padding of segment data can eliminate false sharing issue.
+ On the other hand, the padding leads to increase segment size.
+ */
+ enum { padding = opt::no_special_padding };
+
/// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR
typedef CDS_DEFAULT_ALLOCATOR allocator;
- \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering.
See option description for the full list of possible models
- \p opt::alignment - the alignment for critical data, see option description for explanation
+ - \p opt::padding - the padding of segment data, default no special padding.
+ See \p traits::padding for explanation.
- \p opt::allocator - the allocator to be used for maintaining segments.
- \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated
segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable.
protected:
//@cond
// Segment cell. LSB is used as deleted mark
- typedef cds::details::marked_ptr< value_type, 1 > cell;
+ typedef cds::details::marked_ptr< value_type, 1 > regular_cell;
+ typedef atomics::atomic< regular_cell > atomic_cell;
+ typedef typename cds::opt::details::apply_padding< atomic_cell, traits::padding >::type cell;
// Segment
struct segment: public boost::intrusive::slist_base_hook<>
{
- atomics::atomic< cell > * cells; // Cell array of size \ref m_nQuasiFactor
- size_t version; // version tag (ABA prevention tag)
+ cell * cells; // Cell array of size \ref m_nQuasiFactor
+ size_t version; // version tag (ABA prevention tag)
// cell array is placed here in one continuous memory block
// Initializes the segment
segment( size_t nCellCount )
// MSVC warning C4355: 'this': used in base member initializer list
- : cells( reinterpret_cast< atomics::atomic< cell > * >( this + 1 ))
+ : cells( reinterpret_cast< cell *>( this + 1 ))
, version( 0 )
{
init( nCellCount );
void init( size_t nCellCount )
{
- atomics::atomic< cell > * pLastCell = cells + nCellCount;
- for ( atomics::atomic< cell > * pCell = cells; pCell < pLastCell; ++pCell )
- pCell->store( cell(), atomics::memory_order_relaxed );
+ cell * pLastCell = cells + nCellCount;
+ for ( cell* pCell = cells; pCell < pLastCell; ++pCell )
+ pCell->data.store( regular_cell(), atomics::memory_order_relaxed );
atomics::atomic_thread_fence( memory_model::memory_order_release );
}
};
bool populated( segment const& s ) const
{
// The lock should be held
- atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor();
- for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
- if ( !pCell->load( memory_model::memory_order_relaxed ).all() )
+ cell const * pLastCell = s.cells + quasi_factor();
+ for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
+ if ( !pCell->data.load( memory_model::memory_order_relaxed ).all() )
return false;
}
return true;
bool exhausted( segment const& s ) const
{
// The lock should be held
- atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor();
- for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
- if ( !pCell->load( memory_model::memory_order_relaxed ).bits() )
+ cell const * pLastCell = s.cells + quasi_factor();
+ for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
+ if ( !pCell->data.load( memory_model::memory_order_relaxed ).bits() )
return false;
}
return true;
do {
typename permutation_generator::integer_type i = gen;
CDS_DEBUG_ONLY( ++nLoopCount );
- if ( pTailSegment->cells[i].load(memory_model::memory_order_relaxed).all() ) {
+ if ( pTailSegment->cells[i].data.load(memory_model::memory_order_relaxed).all() ) {
// Cell is not empty, go next
m_Stat.onPushPopulated();
}
else {
// Empty cell found, try to enqueue here
- cell nullCell;
- if ( pTailSegment->cells[i].compare_exchange_strong( nullCell, cell( &val ),
+ regular_cell nullCell;
+ if ( pTailSegment->cells[i].data.compare_exchange_strong( nullCell, regular_cell( &val ),
memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
// Ok to push item
}
bool bHadNullValue = false;
- cell item;
+ regular_cell item;
CDS_DEBUG_ONLY( size_t nLoopCount = 0 );
do {
typename permutation_generator::integer_type i = gen;
// Guard the item
// In segmented queue the cell cannot be reused
// So no loop is needed here to protect the cell
- item = pHeadSegment->cells[i].load( memory_model::memory_order_relaxed );
+ item = pHeadSegment->cells[i].data.load( memory_model::memory_order_relaxed );
itemGuard.assign( item.ptr() );
// Check if this cell is empty, which means an element
// If the item is not deleted yet
if ( !item.bits() ) {
// Try to mark the cell as deleted
- if ( pHeadSegment->cells[i].compare_exchange_strong( item, item | 1,
+ if ( pHeadSegment->cells[i].data.compare_exchange_strong( item, item | 1,
memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
--m_ItemCounter;
} // namespace details
//@endcond
+ /// Special padding constants for \p cds::opt::padding option
+ enum special_pading {
+ no_special_padding = 0, ///< no special padding
+ cache_line_padding = 1, ///< use cache line size defined in cds/user_setup/cache_line.h
+
+ /// Apply padding only for tiny data of size less than required padding
+ /**
+ The flag means that if your data size is less than the casheline size, the padding is applyed.
+ Otherwise no padding will be applyed.
+
+ This flag is applyed for padding value:
+ \code
+ cds::opt::padding< cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only >;
+ cds::opt::padding< 256 | cds::opt::padding_tiny_data_only >;
+ \endcode
+ */
+ padding_tiny_data_only = 0x80000000,
+
+ //@cond
+ padding_flags = padding_tiny_data_only
+ //@endcond
+ };
+
+ /// [value-option] Padding option setter
+ /**
+ The padding for the internal data of some containers. May be useful to solve false sharing problem.
+ \p Value defines desired padding and it may be power of two integer or predefined values from
+ \p special_padding enum.
+ */
+ template <unsigned int Value>
+ struct padding {
+ //@cond
+ template <typename Base> struct pack: public Base
+ {
+ enum { padding = Value };
+ };
+ //@endcond
+ };
+
+ //@cond
+ namespace details {
+ enum padding_vs_datasize {
+ padding_datasize_less,
+ padding_datasize_equal,
+ padding_datasize_greater
+ };
+
+ template < typename T, unsigned int Padding, bool NoPadding, padding_vs_datasize Relation, bool TinyOnly >
+ struct apply_padding_helper;
+
+ template <typename T, padding_vs_datasize Relation, bool TinyOnly >
+ struct apply_padding_helper < T, 0, true, Relation, TinyOnly >
+ {
+ struct type {
+ T data;
+ };
+ };
+
+ template <typename T, unsigned int Padding, bool TinyOnly >
+ struct apply_padding_helper < T, Padding, false, padding_datasize_equal, TinyOnly >
+ {
+ struct type {
+ T data;
+ };
+ };
+
+ template <typename T, unsigned int Padding, bool TinyOnly >
+ struct apply_padding_helper < T, Padding, false, padding_datasize_less, TinyOnly >
+ {
+ struct type {
+ T data;
+ uint8_t pad_[Padding - sizeof( T )];
+ };
+ };
+
+ template <typename T, unsigned int Padding >
+ struct apply_padding_helper < T, Padding, false, padding_datasize_greater, false >
+ {
+ struct type {
+ T data;
+ uint8_t pad_[Padding - sizeof( T ) % Padding];
+ };
+ };
+
+ template <typename T, unsigned int Padding >
+ struct apply_padding_helper < T, Padding, false, padding_datasize_greater, true >
+ {
+ struct type {
+ T data;
+ };
+ };
+
+ template <typename T, unsigned int Padding >
+ struct apply_padding
+ {
+ private:
+ enum { padding = Padding & ~padding_flags };
+
+ public:
+ static CDS_CONSTEXPR const size_t c_nPadding =
+ padding == cache_line_padding ? cds::c_nCacheLineSize :
+ padding == no_special_padding ? 0 : padding ;
+
+ static_assert( (c_nPadding & (c_nPadding - 1)) == 0, "Padding must be a power-of-two number" );
+
+ typedef typename apply_padding_helper< T,
+ c_nPadding,
+ c_nPadding == 0,
+ sizeof( T ) < c_nPadding ? padding_datasize_less : sizeof( T ) == c_nPadding ? padding_datasize_equal : padding_datasize_greater,
+ (Padding & padding_tiny_data_only) != 0
+ >::type type;
+ };
+
+ } // namespace details
+ //@endcond
+
+
/// [type-option] Generic option setter for statisitcs
/**
This option sets a type to gather statistics.
cds::container::TsigasCycleQueue
cds::container::VyukovMPMCCycleQueue
- Added: new member functions push_with(Func) and pop_with(Func) to cds::container::MSPriorityQueue
+ - SegmentedQueue: add padding into segmented_queue::traits to eliminate false sharing.
1.6.0 23.09.2014
General release
namespace queue {
- class HdrIntrusiveSegmentedQueue: public CppUnitMini::TestCase
+ class HdrIntrusiveSegmentedQueue : public CppUnitMini::TestCase
{
struct item {
int nValue;
size_t nDispose2Count;
item()
- : nValue(0)
- , nDisposeCount(0)
- , nDispose2Count(0)
+ : nValue( 0 )
+ , nDisposeCount( 0 )
+ , nDispose2Count( 0 )
{}
item( int nVal )
- : nValue(nVal)
- , nDisposeCount(0)
- , nDispose2Count(0)
+ : nValue( nVal )
+ , nDisposeCount( 0 )
+ , nDispose2Count( 0 )
{}
};
+ struct big_item : public item
+ {
+ big_item()
+ {}
+
+ big_item( int nVal )
+ : item( nVal )
+ {}
+
+ int arr[80];
+ };
+
struct Disposer
{
void operator()( item * p )
void SegmQueue_HP_mutex();
void SegmQueue_HP_shuffle();
void SegmQueue_HP_stat();
+ void SegmQueue_HP_cacheline_padding();
+ void SegmQueue_HP_mutex_cacheline_padding();
+ void SegmQueue_HP_shuffle_cacheline_padding();
+ void SegmQueue_HP_stat_cacheline_padding();
+ void SegmQueue_HP_256_padding();
+ void SegmQueue_HP_mutex_256_padding();
+ void SegmQueue_HP_shuffle_256_padding();
+ void SegmQueue_HP_stat_256_padding();
+ void SegmQueue_HP_cacheline_padding_bigdata();
+ void SegmQueue_HP_mutex_cacheline_padding_bigdata();
+ void SegmQueue_HP_shuffle_cacheline_padding_bigdata();
+ void SegmQueue_HP_stat_cacheline_padding_bigdata();
void SegmQueue_DHP();
void SegmQueue_DHP_mutex();
void SegmQueue_DHP_shuffle();
void SegmQueue_DHP_stat();
+ void SegmQueue_DHP_cacheline_padding();
+ void SegmQueue_DHP_mutex_cacheline_padding();
+ void SegmQueue_DHP_shuffle_cacheline_padding();
+ void SegmQueue_DHP_stat_cacheline_padding();
+ void SegmQueue_DHP_256_padding();
+ void SegmQueue_DHP_mutex_256_padding();
+ void SegmQueue_DHP_shuffle_256_padding();
+ void SegmQueue_DHP_stat_256_padding();
+ void SegmQueue_DHP_cacheline_padding_bigdata();
+ void SegmQueue_DHP_mutex_cacheline_padding_bigdata();
+ void SegmQueue_DHP_shuffle_cacheline_padding_bigdata();
+ void SegmQueue_DHP_stat_cacheline_padding_bigdata();
CPPUNIT_TEST_SUITE(HdrIntrusiveSegmentedQueue)
CPPUNIT_TEST( SegmQueue_HP )
CPPUNIT_TEST( SegmQueue_HP_mutex )
CPPUNIT_TEST( SegmQueue_HP_shuffle )
CPPUNIT_TEST( SegmQueue_HP_stat )
+ CPPUNIT_TEST( SegmQueue_HP_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_HP_mutex_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_HP_shuffle_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_HP_stat_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_HP_256_padding )
+ CPPUNIT_TEST( SegmQueue_HP_mutex_256_padding )
+ CPPUNIT_TEST( SegmQueue_HP_shuffle_256_padding )
+ CPPUNIT_TEST( SegmQueue_HP_stat_256_padding )
+ CPPUNIT_TEST( SegmQueue_HP_cacheline_padding_bigdata )
+ CPPUNIT_TEST( SegmQueue_HP_mutex_cacheline_padding_bigdata )
+ CPPUNIT_TEST( SegmQueue_HP_shuffle_cacheline_padding_bigdata )
+ CPPUNIT_TEST( SegmQueue_HP_stat_cacheline_padding_bigdata )
CPPUNIT_TEST( SegmQueue_DHP )
CPPUNIT_TEST( SegmQueue_DHP_mutex )
CPPUNIT_TEST( SegmQueue_DHP_shuffle )
CPPUNIT_TEST( SegmQueue_DHP_stat )
+ CPPUNIT_TEST( SegmQueue_DHP_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_mutex_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_shuffle_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_stat_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_256_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_mutex_256_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_shuffle_256_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_stat_256_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_cacheline_padding_bigdata )
+ CPPUNIT_TEST( SegmQueue_DHP_mutex_cacheline_padding_bigdata )
+ CPPUNIT_TEST( SegmQueue_DHP_shuffle_cacheline_padding_bigdata )
+ CPPUNIT_TEST( SegmQueue_DHP_stat_cacheline_padding_bigdata )
CPPUNIT_TEST_SUITE_END()
};
test<queue_type>();
}
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_cacheline_padding()
+ {
+ struct queue_traits : public cds::intrusive::segmented_queue::traits
+ {
+ typedef Disposer disposer;
+ enum { padding = cds::opt::cache_line_padding };
+ };
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_mutex_cacheline_padding()
+ {
+ struct queue_traits : public
+ cds::intrusive::segmented_queue::make_traits <
+ cds::intrusive::opt::disposer< Disposer >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ ,cds::opt::lock_type < std::mutex >
+ > ::type
+ {};
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_shuffle_cacheline_padding()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_stat_cacheline_padding()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> >
+ ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_256_padding()
+ {
+ struct queue_traits : public cds::intrusive::segmented_queue::traits
+ {
+ typedef Disposer disposer;
+ enum { padding = 256 };
+ };
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_mutex_256_padding()
+ {
+ struct queue_traits : public
+ cds::intrusive::segmented_queue::make_traits <
+ cds::intrusive::opt::disposer< Disposer >
+ , cds::opt::padding< 256 >
+ ,cds::opt::lock_type < std::mutex >
+ > ::type
+ {};
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_shuffle_256_padding()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> >
+ , cds::opt::padding< 256 >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_stat_256_padding()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> >
+ ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> >
+ , cds::opt::padding< 256 >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_cacheline_padding_bigdata()
+ {
+ struct queue_traits : public cds::intrusive::segmented_queue::traits
+ {
+ typedef Disposer disposer;
+ enum { padding = cds::opt::cache_line_padding };
+ };
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, big_item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_mutex_cacheline_padding_bigdata()
+ {
+ struct queue_traits : public
+ cds::intrusive::segmented_queue::make_traits <
+ cds::intrusive::opt::disposer< Disposer >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ ,cds::opt::lock_type < std::mutex >
+ > ::type
+ {};
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, big_item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_shuffle_cacheline_padding_bigdata()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, big_item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_stat_cacheline_padding_bigdata()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, big_item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> >
+ ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
} // namespace queue
test<queue_type>();
}
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_cacheline_padding()
+ {
+ struct queue_traits : public cds::intrusive::segmented_queue::traits
+ {
+ typedef Disposer disposer;
+ enum { padding = cds::opt::cache_line_padding };
+ };
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_mutex_cacheline_padding()
+ {
+ struct queue_traits : public
+ cds::intrusive::segmented_queue::make_traits <
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::lock_type < std::mutex >
+ ,cds::opt::padding< cds::opt::cache_line_padding >
+ > ::type
+ {};
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_shuffle_cacheline_padding()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_stat_cacheline_padding()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> >
+ ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_256_padding()
+ {
+ struct queue_traits : public cds::intrusive::segmented_queue::traits
+ {
+ typedef Disposer disposer;
+ enum { padding = 256 };
+ };
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_mutex_256_padding()
+ {
+ struct queue_traits : public
+ cds::intrusive::segmented_queue::make_traits <
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::lock_type < std::mutex >
+ ,cds::opt::padding< 256 >
+ > ::type
+ {};
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_shuffle_256_padding()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ , cds::opt::padding< 256 >
+ ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_stat_256_padding()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ , cds::opt::padding< 256 >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> >
+ ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_cacheline_padding_bigdata()
+ {
+ struct queue_traits : public cds::intrusive::segmented_queue::traits
+ {
+ typedef Disposer disposer;
+ enum { padding = cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only };
+ };
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, big_item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_mutex_cacheline_padding_bigdata()
+ {
+ struct queue_traits : public
+ cds::intrusive::segmented_queue::make_traits <
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::lock_type < std::mutex >
+ , cds::opt::padding< cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only >
+ > ::type
+ {};
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, big_item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_shuffle_cacheline_padding_bigdata()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, big_item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ , cds::opt::padding< cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only >
+ ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrIntrusiveSegmentedQueue::SegmQueue_HP_stat_cacheline_padding_bigdata()
+ {
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, big_item,
+ cds::intrusive::segmented_queue::make_traits<
+ cds::intrusive::opt::disposer< Disposer >
+ , cds::opt::padding< cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only >
+ ,cds::opt::item_counter< cds::atomicity::item_counter >
+ ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> >
+ ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
} // namespace queue
void SegmQueue_HP_mutex();
void SegmQueue_HP_shuffle();
void SegmQueue_HP_stat();
+ void SegmQueue_HP_cacheline_padding();
+ void SegmQueue_HP_mutex_cacheline_padding();
+ void SegmQueue_HP_shuffle_cacheline_padding();
+ void SegmQueue_HP_stat_cacheline_padding();
void SegmQueue_DHP();
void SegmQueue_DHP_mutex();
void SegmQueue_DHP_shuffle();
void SegmQueue_DHP_stat();
+ void SegmQueue_DHP_cacheline_padding();
+ void SegmQueue_DHP_mutex_cacheline_padding();
+ void SegmQueue_DHP_shuffle_cacheline_padding();
+ void SegmQueue_DHP_stat_cacheline_padding();
CPPUNIT_TEST_SUITE(HdrSegmentedQueue)
CPPUNIT_TEST( SegmQueue_HP )
CPPUNIT_TEST( SegmQueue_HP_mutex )
CPPUNIT_TEST( SegmQueue_HP_shuffle )
CPPUNIT_TEST( SegmQueue_HP_stat )
+ CPPUNIT_TEST( SegmQueue_HP_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_HP_mutex_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_HP_shuffle_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_HP_stat_cacheline_padding )
CPPUNIT_TEST( SegmQueue_DHP )
CPPUNIT_TEST( SegmQueue_DHP_mutex )
CPPUNIT_TEST( SegmQueue_DHP_shuffle )
CPPUNIT_TEST( SegmQueue_DHP_stat )
- CPPUNIT_TEST_SUITE_END()
+ CPPUNIT_TEST( SegmQueue_DHP_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_mutex_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_shuffle_cacheline_padding )
+ CPPUNIT_TEST( SegmQueue_DHP_stat_cacheline_padding )
+ CPPUNIT_TEST_SUITE_END()
};
} // namespace queue
test<queue_type>();
}
+ void HdrSegmentedQueue::SegmQueue_DHP_cacheline_padding()
+ {
+ struct queue_traits : public cds::container::segmented_queue::traits
+ {
+ enum { padding = cds::opt::cache_line_padding };
+ };
+
+ typedef cds::container::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type;
+ test<queue_type>();
+ }
+
+ void HdrSegmentedQueue::SegmQueue_DHP_mutex_cacheline_padding()
+ {
+ typedef cds::container::SegmentedQueue< cds::gc::DHP, item,
+ cds::container::segmented_queue::make_traits<
+ cds::opt::lock_type< std::mutex >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrSegmentedQueue::SegmQueue_DHP_shuffle_cacheline_padding()
+ {
+ struct queue_traits : public cds::container::segmented_queue::traits
+ {
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::opt::v::random_shuffle_permutation<> permutation_generator;
+ enum { padding = cds::opt::cache_line_padding };
+ };
+ typedef cds::container::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrSegmentedQueue::SegmQueue_DHP_stat_cacheline_padding()
+ {
+ struct queue_traits : public
+ cds::container::segmented_queue::make_traits <
+ cds::opt::item_counter< cds::atomicity::item_counter >
+ , cds::opt::permutation_generator< cds::opt::v::random_permutation<> >
+ , cds::opt::stat < cds::container::segmented_queue::stat<> >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ > ::type
+ {};
+ typedef cds::container::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
} // namespace queue
test<queue_type>();
}
+ void HdrSegmentedQueue::SegmQueue_HP_cacheline_padding()
+ {
+ struct queue_traits : public cds::container::segmented_queue::traits
+ {
+ enum { padding = cds::opt::cache_line_padding };
+ };
+
+ typedef cds::container::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type;
+ test<queue_type>();
+ }
+
+ void HdrSegmentedQueue::SegmQueue_HP_mutex_cacheline_padding()
+ {
+ typedef cds::container::SegmentedQueue< cds::gc::HP, item,
+ cds::container::segmented_queue::make_traits<
+ cds::opt::lock_type< std::mutex >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ >::type
+ > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrSegmentedQueue::SegmQueue_HP_shuffle_cacheline_padding()
+ {
+ struct queue_traits : public cds::container::segmented_queue::traits
+ {
+ typedef cds::atomicity::item_counter item_counter;
+ typedef cds::opt::v::random_shuffle_permutation<> permutation_generator;
+ enum { padding = cds::opt::cache_line_padding };
+ };
+ typedef cds::container::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
+ void HdrSegmentedQueue::SegmQueue_HP_stat_cacheline_padding()
+ {
+ struct queue_traits : public
+ cds::container::segmented_queue::make_traits <
+ cds::opt::item_counter< cds::atomicity::item_counter >
+ , cds::opt::permutation_generator< cds::opt::v::random_permutation<> >
+ , cds::opt::stat < cds::container::segmented_queue::stat<> >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ > ::type
+ {};
+ typedef cds::container::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type;
+
+ test<queue_type>();
+ }
+
} // namespace queue
// SegmentedQueue
#define CDSUNIT_DECLARE_SegmentedQueue \
TEST_SEGMENTED( SegmentedQueue_HP_spin ) \
+ TEST_SEGMENTED( SegmentedQueue_HP_spin_padding ) \
TEST_SEGMENTED( SegmentedQueue_HP_spin_stat ) \
TEST_SEGMENTED( SegmentedQueue_HP_mutex ) \
+ TEST_SEGMENTED( SegmentedQueue_HP_mutex_padding ) \
TEST_SEGMENTED( SegmentedQueue_HP_mutex_stat ) \
- TEST_SEGMENTED( SegmentedQueue_PTB_spin ) \
- TEST_SEGMENTED( SegmentedQueue_PTB_spin_stat ) \
- TEST_SEGMENTED( SegmentedQueue_PTB_mutex ) \
- TEST_SEGMENTED( SegmentedQueue_PTB_mutex_stat )
+ TEST_SEGMENTED( SegmentedQueue_DHP_spin ) \
+ TEST_SEGMENTED( SegmentedQueue_DHP_spin_padding ) \
+ TEST_SEGMENTED( SegmentedQueue_DHP_spin_stat ) \
+ TEST_SEGMENTED( SegmentedQueue_DHP_mutex ) \
+ TEST_SEGMENTED( SegmentedQueue_DHP_mutex_padding ) \
+ TEST_SEGMENTED( SegmentedQueue_DHP_mutex_stat )
#define CDSUNIT_TEST_SegmentedQueue \
CPPUNIT_TEST( SegmentedQueue_HP_spin ) \
+ CPPUNIT_TEST( SegmentedQueue_HP_spin_padding ) \
CPPUNIT_TEST( SegmentedQueue_HP_spin_stat ) \
CPPUNIT_TEST( SegmentedQueue_HP_mutex ) \
+ CPPUNIT_TEST( SegmentedQueue_HP_mutex_padding ) \
CPPUNIT_TEST( SegmentedQueue_HP_mutex_stat ) \
- CPPUNIT_TEST( SegmentedQueue_PTB_spin ) \
- CPPUNIT_TEST( SegmentedQueue_PTB_spin_stat ) \
- CPPUNIT_TEST( SegmentedQueue_PTB_mutex ) \
- CPPUNIT_TEST( SegmentedQueue_PTB_mutex_stat )
+ CPPUNIT_TEST( SegmentedQueue_DHP_spin ) \
+ CPPUNIT_TEST( SegmentedQueue_DHP_spin_padding ) \
+ CPPUNIT_TEST( SegmentedQueue_DHP_spin_stat ) \
+ CPPUNIT_TEST( SegmentedQueue_DHP_mutex ) \
+ CPPUNIT_TEST( SegmentedQueue_DHP_mutex_padding ) \
+ CPPUNIT_TEST( SegmentedQueue_DHP_mutex_stat )
// BoostSList
cds::opt::stat< cds::intrusive::segmented_queue::stat<> >
>::type
{};
- class traits_SegmentedQueue_mutex_stat:
+ class traits_SegmentedQueue_spin_padding :
+ public cds::intrusive::segmented_queue::make_traits<
+ cds::opt::padding< cds::opt::cache_line_padding >
+ >::type
+ {};
+ class traits_SegmentedQueue_mutex_stat :
public cds::intrusive::segmented_queue::make_traits<
cds::opt::stat< cds::intrusive::segmented_queue::stat<> >
,cds::opt::lock_type< std::mutex >
cds::opt::lock_type< std::mutex >
>::type
{};
+ class traits_SegmentedQueue_mutex_padding:
+ public cds::intrusive::segmented_queue::make_traits<
+ cds::opt::lock_type< std::mutex >
+ ,cds::opt::padding< cds::opt::cache_line_padding >
+ >::type
+ {};
typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T > SegmentedQueue_HP_spin;
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T, traits_SegmentedQueue_spin_padding > SegmentedQueue_HP_spin_padding;
typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T, traits_SegmentedQueue_spin_stat > SegmentedQueue_HP_spin_stat;
typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T, traits_SegmentedQueue_mutex > SegmentedQueue_HP_mutex;
+ typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T, traits_SegmentedQueue_mutex_padding > SegmentedQueue_HP_mutex_padding;
typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T, traits_SegmentedQueue_mutex_stat > SegmentedQueue_HP_mutex_stat;
- typedef cds::intrusive::SegmentedQueue< cds::gc::PTB, T > SegmentedQueue_PTB_spin;
- typedef cds::intrusive::SegmentedQueue< cds::gc::PTB, T, traits_SegmentedQueue_spin_stat > SegmentedQueue_PTB_spin_stat;
- typedef cds::intrusive::SegmentedQueue< cds::gc::PTB, T, traits_SegmentedQueue_mutex > SegmentedQueue_PTB_mutex;
- typedef cds::intrusive::SegmentedQueue< cds::gc::PTB, T, traits_SegmentedQueue_mutex_stat > SegmentedQueue_PTB_mutex_stat;
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T > SegmentedQueue_DHP_spin;
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_spin_padding > SegmentedQueue_DHP_spin_padding;
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_spin_stat > SegmentedQueue_DHP_spin_stat;
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_mutex > SegmentedQueue_DHP_mutex;
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_mutex_padding > SegmentedQueue_DHP_mutex_padding;
+ typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_mutex_stat > SegmentedQueue_DHP_mutex_stat;
// Boost SList
typedef details::BoostSList< T, std::mutex > BoostSList_mutex;
// SegmentedQueue
#define CDSUNIT_DECLARE_SegmentedQueue( ITEM_TYPE ) \
TEST_SEGMENTED( SegmentedQueue_HP_spin, ITEM_TYPE ) \
+ TEST_SEGMENTED( SegmentedQueue_HP_spin_padding, ITEM_TYPE ) \
TEST_SEGMENTED( SegmentedQueue_HP_spin_stat, ITEM_TYPE ) \
TEST_SEGMENTED( SegmentedQueue_HP_mutex, ITEM_TYPE ) \
+ TEST_SEGMENTED( SegmentedQueue_HP_mutex_padding, ITEM_TYPE ) \
TEST_SEGMENTED( SegmentedQueue_HP_mutex_stat, ITEM_TYPE ) \
TEST_SEGMENTED( SegmentedQueue_DHP_spin, ITEM_TYPE ) \
+ TEST_SEGMENTED( SegmentedQueue_DHP_spin_padding, ITEM_TYPE ) \
TEST_SEGMENTED( SegmentedQueue_DHP_spin_stat, ITEM_TYPE ) \
TEST_SEGMENTED( SegmentedQueue_DHP_mutex, ITEM_TYPE ) \
+ TEST_SEGMENTED( SegmentedQueue_DHP_mutex_padding, ITEM_TYPE ) \
TEST_SEGMENTED( SegmentedQueue_DHP_mutex_stat, ITEM_TYPE )
#define CDSUNIT_TEST_SegmentedQueue \
CPPUNIT_TEST( SegmentedQueue_HP_spin ) \
+ CPPUNIT_TEST( SegmentedQueue_HP_spin_padding ) \
CPPUNIT_TEST( SegmentedQueue_HP_spin_stat ) \
CPPUNIT_TEST( SegmentedQueue_HP_mutex ) \
+ CPPUNIT_TEST( SegmentedQueue_HP_mutex_padding ) \
CPPUNIT_TEST( SegmentedQueue_HP_mutex_stat ) \
CPPUNIT_TEST( SegmentedQueue_DHP_spin ) \
+ CPPUNIT_TEST( SegmentedQueue_DHP_spin_padding ) \
CPPUNIT_TEST( SegmentedQueue_DHP_spin_stat ) \
CPPUNIT_TEST( SegmentedQueue_DHP_mutex ) \
+ CPPUNIT_TEST( SegmentedQueue_DHP_mutex_padding ) \
CPPUNIT_TEST( SegmentedQueue_DHP_mutex_stat )
-
// std::queue
#define CDSUNIT_DECLARE_StdQueue( ITEM_TYPE ) \
TEST_CASE( StdQueue_deque_Spinlock, ITEM_TYPE ) \
cds::opt::stat< cds::intrusive::segmented_queue::stat<> >
>::type
{};
+ class traits_SegmentedQueue_spin_padding:
+ public cds::container::segmented_queue::make_traits<
+ cds::opt::padding< cds::opt::cache_line_padding >
+ >::type
+ {};
class traits_SegmentedQueue_mutex_stat:
public cds::container::segmented_queue::make_traits<
cds::opt::stat< cds::intrusive::segmented_queue::stat<> >
cds::opt::lock_type< std::mutex >
>::type
{};
+ class traits_SegmentedQueue_mutex_padding:
+ public cds::container::segmented_queue::make_traits<
+ cds::opt::lock_type< std::mutex >
+ , cds::opt::padding< cds::opt::cache_line_padding >
+ >::type
+ {};
typedef cds::container::SegmentedQueue< cds::gc::HP, Value > SegmentedQueue_HP_spin;
+ typedef cds::container::SegmentedQueue< cds::gc::HP, Value, traits_SegmentedQueue_spin_padding > SegmentedQueue_HP_spin_padding;
typedef cds::container::SegmentedQueue< cds::gc::HP, Value, traits_SegmentedQueue_spin_stat > SegmentedQueue_HP_spin_stat;
typedef cds::container::SegmentedQueue< cds::gc::HP, Value, traits_SegmentedQueue_mutex > SegmentedQueue_HP_mutex;
+ typedef cds::container::SegmentedQueue< cds::gc::HP, Value, traits_SegmentedQueue_mutex_padding > SegmentedQueue_HP_mutex_padding;
typedef cds::container::SegmentedQueue< cds::gc::HP, Value, traits_SegmentedQueue_mutex_stat > SegmentedQueue_HP_mutex_stat;
typedef cds::container::SegmentedQueue< cds::gc::DHP, Value > SegmentedQueue_DHP_spin;
+ typedef cds::container::SegmentedQueue< cds::gc::DHP, Value, traits_SegmentedQueue_spin_padding > SegmentedQueue_DHP_spin_padding;
typedef cds::container::SegmentedQueue< cds::gc::DHP, Value, traits_SegmentedQueue_spin_stat > SegmentedQueue_DHP_spin_stat;
typedef cds::container::SegmentedQueue< cds::gc::DHP, Value, traits_SegmentedQueue_mutex > SegmentedQueue_DHP_mutex;
+ typedef cds::container::SegmentedQueue< cds::gc::DHP, Value, traits_SegmentedQueue_mutex_padding > SegmentedQueue_DHP_mutex_padding;
typedef cds::container::SegmentedQueue< cds::gc::DHP, Value, traits_SegmentedQueue_mutex_stat > SegmentedQueue_DHP_mutex_stat;
-
-
};
}