From aedb7b04a9f5c986ab614dc7c56333eb324f4b91 Mon Sep 17 00:00:00 2001 From: khizmax Date: Mon, 3 Nov 2014 16:57:59 +0300 Subject: [PATCH] Add padding option to SegmentedQueue to eliminate false sharing --- cds/container/segmented_queue.h | 7 + cds/intrusive/segmented_queue.h | 51 ++++-- cds/opt/options.h | 117 +++++++++++++ change.log | 1 + .../queue/hdr_intrusive_segmented_queue.h | 74 +++++++- .../hdr_intrusive_segmented_queue_dhp.cpp | 165 ++++++++++++++++++ .../hdr_intrusive_segmented_queue_hp.cpp | 165 ++++++++++++++++++ tests/test-hdr/queue/hdr_segmented_queue.h | 18 +- .../queue/hdr_segmented_queue_dhp.cpp | 51 ++++++ .../test-hdr/queue/hdr_segmented_queue_hp.cpp | 51 ++++++ tests/unit/queue/intrusive_queue_defs.h | 24 ++- tests/unit/queue/intrusive_queue_type.h | 25 ++- tests/unit/queue/queue_defs.h | 9 +- tests/unit/queue/queue_type.h | 17 +- 14 files changed, 732 insertions(+), 43 deletions(-) diff --git a/cds/container/segmented_queue.h b/cds/container/segmented_queue.h index 1b2a1b38..59104fa9 100644 --- a/cds/container/segmented_queue.h +++ b/cds/container/segmented_queue.h @@ -46,6 +46,11 @@ namespace cds { namespace container { /// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification enum { alignment = opt::cache_line_alignment }; + /// Padding of segment data, default is no special padding + /** @copydetails cds::intrusive::segmented_queue::traits::padding + */ + enum { padding = cds::intrusive::segmented_queue::traits::padding }; + /// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR typedef CDS_DEFAULT_ALLOCATOR allocator; @@ -76,6 +81,8 @@ namespace cds { namespace container { - \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering. See option description for the full list of possible models - \p opt::alignment - the alignment of critical data, see option description for explanation + - \p opt::padding - the padding of segment data, default no special padding. + See \p traits::padding for explanation. - \p opt::allocator - the allocator used to maintain segments. - \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable. diff --git a/cds/intrusive/segmented_queue.h b/cds/intrusive/segmented_queue.h index 8ba29ca2..3b715127 100644 --- a/cds/intrusive/segmented_queue.h +++ b/cds/intrusive/segmented_queue.h @@ -92,6 +92,15 @@ namespace cds { namespace intrusive { /// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification enum { alignment = opt::cache_line_alignment }; + /// Padding of segment data, default is no special padding + /** + The segment is just an array of atomic data pointers, + so, the high load leads to false sharing and performance degradation. + A padding of segment data can eliminate false sharing issue. + On the other hand, the padding leads to increase segment size. + */ + enum { padding = opt::no_special_padding }; + /// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR typedef CDS_DEFAULT_ALLOCATOR allocator; @@ -122,6 +131,8 @@ namespace cds { namespace intrusive { - \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering. See option description for the full list of possible models - \p opt::alignment - the alignment for critical data, see option description for explanation + - \p opt::padding - the padding of segment data, default no special padding. + See \p traits::padding for explanation. - \p opt::allocator - the allocator to be used for maintaining segments. - \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable. @@ -205,19 +216,21 @@ namespace cds { namespace intrusive { protected: //@cond // Segment cell. LSB is used as deleted mark - typedef cds::details::marked_ptr< value_type, 1 > cell; + typedef cds::details::marked_ptr< value_type, 1 > regular_cell; + typedef atomics::atomic< regular_cell > atomic_cell; + typedef typename cds::opt::details::apply_padding< atomic_cell, traits::padding >::type cell; // Segment struct segment: public boost::intrusive::slist_base_hook<> { - atomics::atomic< cell > * cells; // Cell array of size \ref m_nQuasiFactor - size_t version; // version tag (ABA prevention tag) + cell * cells; // Cell array of size \ref m_nQuasiFactor + size_t version; // version tag (ABA prevention tag) // cell array is placed here in one continuous memory block // Initializes the segment segment( size_t nCellCount ) // MSVC warning C4355: 'this': used in base member initializer list - : cells( reinterpret_cast< atomics::atomic< cell > * >( this + 1 )) + : cells( reinterpret_cast< cell *>( this + 1 )) , version( 0 ) { init( nCellCount ); @@ -227,9 +240,9 @@ namespace cds { namespace intrusive { void init( size_t nCellCount ) { - atomics::atomic< cell > * pLastCell = cells + nCellCount; - for ( atomics::atomic< cell > * pCell = cells; pCell < pLastCell; ++pCell ) - pCell->store( cell(), atomics::memory_order_relaxed ); + cell * pLastCell = cells + nCellCount; + for ( cell* pCell = cells; pCell < pLastCell; ++pCell ) + pCell->data.store( regular_cell(), atomics::memory_order_relaxed ); atomics::atomic_thread_fence( memory_model::memory_order_release ); } }; @@ -300,9 +313,9 @@ namespace cds { namespace intrusive { bool populated( segment const& s ) const { // The lock should be held - atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor(); - for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) { - if ( !pCell->load( memory_model::memory_order_relaxed ).all() ) + cell const * pLastCell = s.cells + quasi_factor(); + for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) { + if ( !pCell->data.load( memory_model::memory_order_relaxed ).all() ) return false; } return true; @@ -310,9 +323,9 @@ namespace cds { namespace intrusive { bool exhausted( segment const& s ) const { // The lock should be held - atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor(); - for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) { - if ( !pCell->load( memory_model::memory_order_relaxed ).bits() ) + cell const * pLastCell = s.cells + quasi_factor(); + for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) { + if ( !pCell->data.load( memory_model::memory_order_relaxed ).bits() ) return false; } return true; @@ -466,14 +479,14 @@ namespace cds { namespace intrusive { do { typename permutation_generator::integer_type i = gen; CDS_DEBUG_ONLY( ++nLoopCount ); - if ( pTailSegment->cells[i].load(memory_model::memory_order_relaxed).all() ) { + if ( pTailSegment->cells[i].data.load(memory_model::memory_order_relaxed).all() ) { // Cell is not empty, go next m_Stat.onPushPopulated(); } else { // Empty cell found, try to enqueue here - cell nullCell; - if ( pTailSegment->cells[i].compare_exchange_strong( nullCell, cell( &val ), + regular_cell nullCell; + if ( pTailSegment->cells[i].data.compare_exchange_strong( nullCell, regular_cell( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { // Ok to push item @@ -620,7 +633,7 @@ namespace cds { namespace intrusive { } bool bHadNullValue = false; - cell item; + regular_cell item; CDS_DEBUG_ONLY( size_t nLoopCount = 0 ); do { typename permutation_generator::integer_type i = gen; @@ -629,7 +642,7 @@ namespace cds { namespace intrusive { // Guard the item // In segmented queue the cell cannot be reused // So no loop is needed here to protect the cell - item = pHeadSegment->cells[i].load( memory_model::memory_order_relaxed ); + item = pHeadSegment->cells[i].data.load( memory_model::memory_order_relaxed ); itemGuard.assign( item.ptr() ); // Check if this cell is empty, which means an element @@ -640,7 +653,7 @@ namespace cds { namespace intrusive { // If the item is not deleted yet if ( !item.bits() ) { // Try to mark the cell as deleted - if ( pHeadSegment->cells[i].compare_exchange_strong( item, item | 1, + if ( pHeadSegment->cells[i].data.compare_exchange_strong( item, item | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { --m_ItemCounter; diff --git a/cds/opt/options.h b/cds/opt/options.h index bfe2b008..458a9102 100644 --- a/cds/opt/options.h +++ b/cds/opt/options.h @@ -398,6 +398,123 @@ namespace opt { } // namespace details //@endcond + /// Special padding constants for \p cds::opt::padding option + enum special_pading { + no_special_padding = 0, ///< no special padding + cache_line_padding = 1, ///< use cache line size defined in cds/user_setup/cache_line.h + + /// Apply padding only for tiny data of size less than required padding + /** + The flag means that if your data size is less than the casheline size, the padding is applyed. + Otherwise no padding will be applyed. + + This flag is applyed for padding value: + \code + cds::opt::padding< cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only >; + cds::opt::padding< 256 | cds::opt::padding_tiny_data_only >; + \endcode + */ + padding_tiny_data_only = 0x80000000, + + //@cond + padding_flags = padding_tiny_data_only + //@endcond + }; + + /// [value-option] Padding option setter + /** + The padding for the internal data of some containers. May be useful to solve false sharing problem. + \p Value defines desired padding and it may be power of two integer or predefined values from + \p special_padding enum. + */ + template + struct padding { + //@cond + template struct pack: public Base + { + enum { padding = Value }; + }; + //@endcond + }; + + //@cond + namespace details { + enum padding_vs_datasize { + padding_datasize_less, + padding_datasize_equal, + padding_datasize_greater + }; + + template < typename T, unsigned int Padding, bool NoPadding, padding_vs_datasize Relation, bool TinyOnly > + struct apply_padding_helper; + + template + struct apply_padding_helper < T, 0, true, Relation, TinyOnly > + { + struct type { + T data; + }; + }; + + template + struct apply_padding_helper < T, Padding, false, padding_datasize_equal, TinyOnly > + { + struct type { + T data; + }; + }; + + template + struct apply_padding_helper < T, Padding, false, padding_datasize_less, TinyOnly > + { + struct type { + T data; + uint8_t pad_[Padding - sizeof( T )]; + }; + }; + + template + struct apply_padding_helper < T, Padding, false, padding_datasize_greater, false > + { + struct type { + T data; + uint8_t pad_[Padding - sizeof( T ) % Padding]; + }; + }; + + template + struct apply_padding_helper < T, Padding, false, padding_datasize_greater, true > + { + struct type { + T data; + }; + }; + + template + struct apply_padding + { + private: + enum { padding = Padding & ~padding_flags }; + + public: + static CDS_CONSTEXPR const size_t c_nPadding = + padding == cache_line_padding ? cds::c_nCacheLineSize : + padding == no_special_padding ? 0 : padding ; + + static_assert( (c_nPadding & (c_nPadding - 1)) == 0, "Padding must be a power-of-two number" ); + + typedef typename apply_padding_helper< T, + c_nPadding, + c_nPadding == 0, + sizeof( T ) < c_nPadding ? padding_datasize_less : sizeof( T ) == c_nPadding ? padding_datasize_equal : padding_datasize_greater, + (Padding & padding_tiny_data_only) != 0 + >::type type; + }; + + } // namespace details + //@endcond + + /// [type-option] Generic option setter for statisitcs /** This option sets a type to gather statistics. diff --git a/change.log b/change.log index c2919570..0fda6337 100644 --- a/change.log +++ b/change.log @@ -14,6 +14,7 @@ cds::container::TsigasCycleQueue cds::container::VyukovMPMCCycleQueue - Added: new member functions push_with(Func) and pop_with(Func) to cds::container::MSPriorityQueue + - SegmentedQueue: add padding into segmented_queue::traits to eliminate false sharing. 1.6.0 23.09.2014 General release diff --git a/tests/test-hdr/queue/hdr_intrusive_segmented_queue.h b/tests/test-hdr/queue/hdr_intrusive_segmented_queue.h index 5847af6e..b3578ca4 100644 --- a/tests/test-hdr/queue/hdr_intrusive_segmented_queue.h +++ b/tests/test-hdr/queue/hdr_intrusive_segmented_queue.h @@ -9,7 +9,7 @@ namespace queue { - class HdrIntrusiveSegmentedQueue: public CppUnitMini::TestCase + class HdrIntrusiveSegmentedQueue : public CppUnitMini::TestCase { struct item { int nValue; @@ -18,18 +18,30 @@ namespace queue { size_t nDispose2Count; item() - : nValue(0) - , nDisposeCount(0) - , nDispose2Count(0) + : nValue( 0 ) + , nDisposeCount( 0 ) + , nDispose2Count( 0 ) {} item( int nVal ) - : nValue(nVal) - , nDisposeCount(0) - , nDispose2Count(0) + : nValue( nVal ) + , nDisposeCount( 0 ) + , nDispose2Count( 0 ) {} }; + struct big_item : public item + { + big_item() + {} + + big_item( int nVal ) + : item( nVal ) + {} + + int arr[80]; + }; + struct Disposer { void operator()( item * p ) @@ -184,22 +196,70 @@ namespace queue { void SegmQueue_HP_mutex(); void SegmQueue_HP_shuffle(); void SegmQueue_HP_stat(); + void SegmQueue_HP_cacheline_padding(); + void SegmQueue_HP_mutex_cacheline_padding(); + void SegmQueue_HP_shuffle_cacheline_padding(); + void SegmQueue_HP_stat_cacheline_padding(); + void SegmQueue_HP_256_padding(); + void SegmQueue_HP_mutex_256_padding(); + void SegmQueue_HP_shuffle_256_padding(); + void SegmQueue_HP_stat_256_padding(); + void SegmQueue_HP_cacheline_padding_bigdata(); + void SegmQueue_HP_mutex_cacheline_padding_bigdata(); + void SegmQueue_HP_shuffle_cacheline_padding_bigdata(); + void SegmQueue_HP_stat_cacheline_padding_bigdata(); void SegmQueue_DHP(); void SegmQueue_DHP_mutex(); void SegmQueue_DHP_shuffle(); void SegmQueue_DHP_stat(); + void SegmQueue_DHP_cacheline_padding(); + void SegmQueue_DHP_mutex_cacheline_padding(); + void SegmQueue_DHP_shuffle_cacheline_padding(); + void SegmQueue_DHP_stat_cacheline_padding(); + void SegmQueue_DHP_256_padding(); + void SegmQueue_DHP_mutex_256_padding(); + void SegmQueue_DHP_shuffle_256_padding(); + void SegmQueue_DHP_stat_256_padding(); + void SegmQueue_DHP_cacheline_padding_bigdata(); + void SegmQueue_DHP_mutex_cacheline_padding_bigdata(); + void SegmQueue_DHP_shuffle_cacheline_padding_bigdata(); + void SegmQueue_DHP_stat_cacheline_padding_bigdata(); CPPUNIT_TEST_SUITE(HdrIntrusiveSegmentedQueue) CPPUNIT_TEST( SegmQueue_HP ) CPPUNIT_TEST( SegmQueue_HP_mutex ) CPPUNIT_TEST( SegmQueue_HP_shuffle ) CPPUNIT_TEST( SegmQueue_HP_stat ) + CPPUNIT_TEST( SegmQueue_HP_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_HP_mutex_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_HP_shuffle_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_HP_stat_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_HP_256_padding ) + CPPUNIT_TEST( SegmQueue_HP_mutex_256_padding ) + CPPUNIT_TEST( SegmQueue_HP_shuffle_256_padding ) + CPPUNIT_TEST( SegmQueue_HP_stat_256_padding ) + CPPUNIT_TEST( SegmQueue_HP_cacheline_padding_bigdata ) + CPPUNIT_TEST( SegmQueue_HP_mutex_cacheline_padding_bigdata ) + CPPUNIT_TEST( SegmQueue_HP_shuffle_cacheline_padding_bigdata ) + CPPUNIT_TEST( SegmQueue_HP_stat_cacheline_padding_bigdata ) CPPUNIT_TEST( SegmQueue_DHP ) CPPUNIT_TEST( SegmQueue_DHP_mutex ) CPPUNIT_TEST( SegmQueue_DHP_shuffle ) CPPUNIT_TEST( SegmQueue_DHP_stat ) + CPPUNIT_TEST( SegmQueue_DHP_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_DHP_mutex_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_DHP_shuffle_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_DHP_stat_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_DHP_256_padding ) + CPPUNIT_TEST( SegmQueue_DHP_mutex_256_padding ) + CPPUNIT_TEST( SegmQueue_DHP_shuffle_256_padding ) + CPPUNIT_TEST( SegmQueue_DHP_stat_256_padding ) + CPPUNIT_TEST( SegmQueue_DHP_cacheline_padding_bigdata ) + CPPUNIT_TEST( SegmQueue_DHP_mutex_cacheline_padding_bigdata ) + CPPUNIT_TEST( SegmQueue_DHP_shuffle_cacheline_padding_bigdata ) + CPPUNIT_TEST( SegmQueue_DHP_stat_cacheline_padding_bigdata ) CPPUNIT_TEST_SUITE_END() }; diff --git a/tests/test-hdr/queue/hdr_intrusive_segmented_queue_dhp.cpp b/tests/test-hdr/queue/hdr_intrusive_segmented_queue_dhp.cpp index 23f4cfef..3dfb1115 100644 --- a/tests/test-hdr/queue/hdr_intrusive_segmented_queue_dhp.cpp +++ b/tests/test-hdr/queue/hdr_intrusive_segmented_queue_dhp.cpp @@ -57,4 +57,169 @@ namespace queue { test(); } + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_cacheline_padding() + { + struct queue_traits : public cds::intrusive::segmented_queue::traits + { + typedef Disposer disposer; + enum { padding = cds::opt::cache_line_padding }; + }; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_mutex_cacheline_padding() + { + struct queue_traits : public + cds::intrusive::segmented_queue::make_traits < + cds::intrusive::opt::disposer< Disposer > + , cds::opt::padding< cds::opt::cache_line_padding > + ,cds::opt::lock_type < std::mutex > + > ::type + {}; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_shuffle_cacheline_padding() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> > + , cds::opt::padding< cds::opt::cache_line_padding > + >::type + > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_stat_cacheline_padding() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> > + ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> > + , cds::opt::padding< cds::opt::cache_line_padding > + >::type + > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_256_padding() + { + struct queue_traits : public cds::intrusive::segmented_queue::traits + { + typedef Disposer disposer; + enum { padding = 256 }; + }; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_mutex_256_padding() + { + struct queue_traits : public + cds::intrusive::segmented_queue::make_traits < + cds::intrusive::opt::disposer< Disposer > + , cds::opt::padding< 256 > + ,cds::opt::lock_type < std::mutex > + > ::type + {}; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_shuffle_256_padding() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> > + , cds::opt::padding< 256 > + >::type + > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_stat_256_padding() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> > + ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> > + , cds::opt::padding< 256 > + >::type + > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_cacheline_padding_bigdata() + { + struct queue_traits : public cds::intrusive::segmented_queue::traits + { + typedef Disposer disposer; + enum { padding = cds::opt::cache_line_padding }; + }; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, big_item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_mutex_cacheline_padding_bigdata() + { + struct queue_traits : public + cds::intrusive::segmented_queue::make_traits < + cds::intrusive::opt::disposer< Disposer > + , cds::opt::padding< cds::opt::cache_line_padding > + ,cds::opt::lock_type < std::mutex > + > ::type + {}; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, big_item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_shuffle_cacheline_padding_bigdata() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, big_item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> > + , cds::opt::padding< cds::opt::cache_line_padding > + >::type + > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_DHP_stat_cacheline_padding_bigdata() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, big_item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> > + ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> > + , cds::opt::padding< cds::opt::cache_line_padding > + >::type + > queue_type; + + test(); + } + } // namespace queue diff --git a/tests/test-hdr/queue/hdr_intrusive_segmented_queue_hp.cpp b/tests/test-hdr/queue/hdr_intrusive_segmented_queue_hp.cpp index 4eea7bd0..4a0fead2 100644 --- a/tests/test-hdr/queue/hdr_intrusive_segmented_queue_hp.cpp +++ b/tests/test-hdr/queue/hdr_intrusive_segmented_queue_hp.cpp @@ -57,4 +57,169 @@ namespace queue { test(); } + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_cacheline_padding() + { + struct queue_traits : public cds::intrusive::segmented_queue::traits + { + typedef Disposer disposer; + enum { padding = cds::opt::cache_line_padding }; + }; + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_mutex_cacheline_padding() + { + struct queue_traits : public + cds::intrusive::segmented_queue::make_traits < + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::lock_type < std::mutex > + ,cds::opt::padding< cds::opt::cache_line_padding > + > ::type + {}; + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_shuffle_cacheline_padding() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + , cds::opt::padding< cds::opt::cache_line_padding > + ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> > + >::type + > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_stat_cacheline_padding() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + , cds::opt::padding< cds::opt::cache_line_padding > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> > + ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> > + >::type + > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_256_padding() + { + struct queue_traits : public cds::intrusive::segmented_queue::traits + { + typedef Disposer disposer; + enum { padding = 256 }; + }; + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_mutex_256_padding() + { + struct queue_traits : public + cds::intrusive::segmented_queue::make_traits < + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::lock_type < std::mutex > + ,cds::opt::padding< 256 > + > ::type + {}; + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_shuffle_256_padding() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + , cds::opt::padding< 256 > + ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> > + >::type + > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_stat_256_padding() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + , cds::opt::padding< 256 > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> > + ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> > + >::type + > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_cacheline_padding_bigdata() + { + struct queue_traits : public cds::intrusive::segmented_queue::traits + { + typedef Disposer disposer; + enum { padding = cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only }; + }; + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, big_item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_mutex_cacheline_padding_bigdata() + { + struct queue_traits : public + cds::intrusive::segmented_queue::make_traits < + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::lock_type < std::mutex > + , cds::opt::padding< cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only > + > ::type + {}; + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, big_item, queue_traits > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_shuffle_cacheline_padding_bigdata() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, big_item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + , cds::opt::padding< cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only > + ,cds::opt::permutation_generator< cds::opt::v::random_shuffle_permutation<> > + >::type + > queue_type; + + test(); + } + + void HdrIntrusiveSegmentedQueue::SegmQueue_HP_stat_cacheline_padding_bigdata() + { + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, big_item, + cds::intrusive::segmented_queue::make_traits< + cds::intrusive::opt::disposer< Disposer > + , cds::opt::padding< cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::permutation_generator< cds::opt::v::random_permutation<> > + ,cds::opt::stat< cds::intrusive::segmented_queue::stat<> > + >::type + > queue_type; + + test(); + } + } // namespace queue diff --git a/tests/test-hdr/queue/hdr_segmented_queue.h b/tests/test-hdr/queue/hdr_segmented_queue.h index 712d8a81..cc53cbb5 100644 --- a/tests/test-hdr/queue/hdr_segmented_queue.h +++ b/tests/test-hdr/queue/hdr_segmented_queue.h @@ -194,23 +194,39 @@ namespace queue { void SegmQueue_HP_mutex(); void SegmQueue_HP_shuffle(); void SegmQueue_HP_stat(); + void SegmQueue_HP_cacheline_padding(); + void SegmQueue_HP_mutex_cacheline_padding(); + void SegmQueue_HP_shuffle_cacheline_padding(); + void SegmQueue_HP_stat_cacheline_padding(); void SegmQueue_DHP(); void SegmQueue_DHP_mutex(); void SegmQueue_DHP_shuffle(); void SegmQueue_DHP_stat(); + void SegmQueue_DHP_cacheline_padding(); + void SegmQueue_DHP_mutex_cacheline_padding(); + void SegmQueue_DHP_shuffle_cacheline_padding(); + void SegmQueue_DHP_stat_cacheline_padding(); CPPUNIT_TEST_SUITE(HdrSegmentedQueue) CPPUNIT_TEST( SegmQueue_HP ) CPPUNIT_TEST( SegmQueue_HP_mutex ) CPPUNIT_TEST( SegmQueue_HP_shuffle ) CPPUNIT_TEST( SegmQueue_HP_stat ) + CPPUNIT_TEST( SegmQueue_HP_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_HP_mutex_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_HP_shuffle_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_HP_stat_cacheline_padding ) CPPUNIT_TEST( SegmQueue_DHP ) CPPUNIT_TEST( SegmQueue_DHP_mutex ) CPPUNIT_TEST( SegmQueue_DHP_shuffle ) CPPUNIT_TEST( SegmQueue_DHP_stat ) - CPPUNIT_TEST_SUITE_END() + CPPUNIT_TEST( SegmQueue_DHP_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_DHP_mutex_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_DHP_shuffle_cacheline_padding ) + CPPUNIT_TEST( SegmQueue_DHP_stat_cacheline_padding ) + CPPUNIT_TEST_SUITE_END() }; } // namespace queue diff --git a/tests/test-hdr/queue/hdr_segmented_queue_dhp.cpp b/tests/test-hdr/queue/hdr_segmented_queue_dhp.cpp index cbe87dd9..c55f3bfa 100644 --- a/tests/test-hdr/queue/hdr_segmented_queue_dhp.cpp +++ b/tests/test-hdr/queue/hdr_segmented_queue_dhp.cpp @@ -49,4 +49,55 @@ namespace queue { test(); } + void HdrSegmentedQueue::SegmQueue_DHP_cacheline_padding() + { + struct queue_traits : public cds::container::segmented_queue::traits + { + enum { padding = cds::opt::cache_line_padding }; + }; + + typedef cds::container::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type; + test(); + } + + void HdrSegmentedQueue::SegmQueue_DHP_mutex_cacheline_padding() + { + typedef cds::container::SegmentedQueue< cds::gc::DHP, item, + cds::container::segmented_queue::make_traits< + cds::opt::lock_type< std::mutex > + , cds::opt::padding< cds::opt::cache_line_padding > + >::type + > queue_type; + + test(); + } + + void HdrSegmentedQueue::SegmQueue_DHP_shuffle_cacheline_padding() + { + struct queue_traits : public cds::container::segmented_queue::traits + { + typedef cds::atomicity::item_counter item_counter; + typedef cds::opt::v::random_shuffle_permutation<> permutation_generator; + enum { padding = cds::opt::cache_line_padding }; + }; + typedef cds::container::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type; + + test(); + } + + void HdrSegmentedQueue::SegmQueue_DHP_stat_cacheline_padding() + { + struct queue_traits : public + cds::container::segmented_queue::make_traits < + cds::opt::item_counter< cds::atomicity::item_counter > + , cds::opt::permutation_generator< cds::opt::v::random_permutation<> > + , cds::opt::stat < cds::container::segmented_queue::stat<> > + , cds::opt::padding< cds::opt::cache_line_padding > + > ::type + {}; + typedef cds::container::SegmentedQueue< cds::gc::DHP, item, queue_traits > queue_type; + + test(); + } + } // namespace queue diff --git a/tests/test-hdr/queue/hdr_segmented_queue_hp.cpp b/tests/test-hdr/queue/hdr_segmented_queue_hp.cpp index 7e2a72bc..68cddc81 100644 --- a/tests/test-hdr/queue/hdr_segmented_queue_hp.cpp +++ b/tests/test-hdr/queue/hdr_segmented_queue_hp.cpp @@ -49,4 +49,55 @@ namespace queue { test(); } + void HdrSegmentedQueue::SegmQueue_HP_cacheline_padding() + { + struct queue_traits : public cds::container::segmented_queue::traits + { + enum { padding = cds::opt::cache_line_padding }; + }; + + typedef cds::container::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type; + test(); + } + + void HdrSegmentedQueue::SegmQueue_HP_mutex_cacheline_padding() + { + typedef cds::container::SegmentedQueue< cds::gc::HP, item, + cds::container::segmented_queue::make_traits< + cds::opt::lock_type< std::mutex > + , cds::opt::padding< cds::opt::cache_line_padding > + >::type + > queue_type; + + test(); + } + + void HdrSegmentedQueue::SegmQueue_HP_shuffle_cacheline_padding() + { + struct queue_traits : public cds::container::segmented_queue::traits + { + typedef cds::atomicity::item_counter item_counter; + typedef cds::opt::v::random_shuffle_permutation<> permutation_generator; + enum { padding = cds::opt::cache_line_padding }; + }; + typedef cds::container::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type; + + test(); + } + + void HdrSegmentedQueue::SegmQueue_HP_stat_cacheline_padding() + { + struct queue_traits : public + cds::container::segmented_queue::make_traits < + cds::opt::item_counter< cds::atomicity::item_counter > + , cds::opt::permutation_generator< cds::opt::v::random_permutation<> > + , cds::opt::stat < cds::container::segmented_queue::stat<> > + , cds::opt::padding< cds::opt::cache_line_padding > + > ::type + {}; + typedef cds::container::SegmentedQueue< cds::gc::HP, item, queue_traits > queue_type; + + test(); + } + } // namespace queue diff --git a/tests/unit/queue/intrusive_queue_defs.h b/tests/unit/queue/intrusive_queue_defs.h index 43444fee..4bcffb99 100644 --- a/tests/unit/queue/intrusive_queue_defs.h +++ b/tests/unit/queue/intrusive_queue_defs.h @@ -127,23 +127,31 @@ // SegmentedQueue #define CDSUNIT_DECLARE_SegmentedQueue \ TEST_SEGMENTED( SegmentedQueue_HP_spin ) \ + TEST_SEGMENTED( SegmentedQueue_HP_spin_padding ) \ TEST_SEGMENTED( SegmentedQueue_HP_spin_stat ) \ TEST_SEGMENTED( SegmentedQueue_HP_mutex ) \ + TEST_SEGMENTED( SegmentedQueue_HP_mutex_padding ) \ TEST_SEGMENTED( SegmentedQueue_HP_mutex_stat ) \ - TEST_SEGMENTED( SegmentedQueue_PTB_spin ) \ - TEST_SEGMENTED( SegmentedQueue_PTB_spin_stat ) \ - TEST_SEGMENTED( SegmentedQueue_PTB_mutex ) \ - TEST_SEGMENTED( SegmentedQueue_PTB_mutex_stat ) + TEST_SEGMENTED( SegmentedQueue_DHP_spin ) \ + TEST_SEGMENTED( SegmentedQueue_DHP_spin_padding ) \ + TEST_SEGMENTED( SegmentedQueue_DHP_spin_stat ) \ + TEST_SEGMENTED( SegmentedQueue_DHP_mutex ) \ + TEST_SEGMENTED( SegmentedQueue_DHP_mutex_padding ) \ + TEST_SEGMENTED( SegmentedQueue_DHP_mutex_stat ) #define CDSUNIT_TEST_SegmentedQueue \ CPPUNIT_TEST( SegmentedQueue_HP_spin ) \ + CPPUNIT_TEST( SegmentedQueue_HP_spin_padding ) \ CPPUNIT_TEST( SegmentedQueue_HP_spin_stat ) \ CPPUNIT_TEST( SegmentedQueue_HP_mutex ) \ + CPPUNIT_TEST( SegmentedQueue_HP_mutex_padding ) \ CPPUNIT_TEST( SegmentedQueue_HP_mutex_stat ) \ - CPPUNIT_TEST( SegmentedQueue_PTB_spin ) \ - CPPUNIT_TEST( SegmentedQueue_PTB_spin_stat ) \ - CPPUNIT_TEST( SegmentedQueue_PTB_mutex ) \ - CPPUNIT_TEST( SegmentedQueue_PTB_mutex_stat ) + CPPUNIT_TEST( SegmentedQueue_DHP_spin ) \ + CPPUNIT_TEST( SegmentedQueue_DHP_spin_padding ) \ + CPPUNIT_TEST( SegmentedQueue_DHP_spin_stat ) \ + CPPUNIT_TEST( SegmentedQueue_DHP_mutex ) \ + CPPUNIT_TEST( SegmentedQueue_DHP_mutex_padding ) \ + CPPUNIT_TEST( SegmentedQueue_DHP_mutex_stat ) // BoostSList diff --git a/tests/unit/queue/intrusive_queue_type.h b/tests/unit/queue/intrusive_queue_type.h index a87226c4..8e588253 100644 --- a/tests/unit/queue/intrusive_queue_type.h +++ b/tests/unit/queue/intrusive_queue_type.h @@ -425,7 +425,12 @@ namespace queue { cds::opt::stat< cds::intrusive::segmented_queue::stat<> > >::type {}; - class traits_SegmentedQueue_mutex_stat: + class traits_SegmentedQueue_spin_padding : + public cds::intrusive::segmented_queue::make_traits< + cds::opt::padding< cds::opt::cache_line_padding > + >::type + {}; + class traits_SegmentedQueue_mutex_stat : public cds::intrusive::segmented_queue::make_traits< cds::opt::stat< cds::intrusive::segmented_queue::stat<> > ,cds::opt::lock_type< std::mutex > @@ -436,16 +441,26 @@ namespace queue { cds::opt::lock_type< std::mutex > >::type {}; + class traits_SegmentedQueue_mutex_padding: + public cds::intrusive::segmented_queue::make_traits< + cds::opt::lock_type< std::mutex > + ,cds::opt::padding< cds::opt::cache_line_padding > + >::type + {}; typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T > SegmentedQueue_HP_spin; + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T, traits_SegmentedQueue_spin_padding > SegmentedQueue_HP_spin_padding; typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T, traits_SegmentedQueue_spin_stat > SegmentedQueue_HP_spin_stat; typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T, traits_SegmentedQueue_mutex > SegmentedQueue_HP_mutex; + typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T, traits_SegmentedQueue_mutex_padding > SegmentedQueue_HP_mutex_padding; typedef cds::intrusive::SegmentedQueue< cds::gc::HP, T, traits_SegmentedQueue_mutex_stat > SegmentedQueue_HP_mutex_stat; - typedef cds::intrusive::SegmentedQueue< cds::gc::PTB, T > SegmentedQueue_PTB_spin; - typedef cds::intrusive::SegmentedQueue< cds::gc::PTB, T, traits_SegmentedQueue_spin_stat > SegmentedQueue_PTB_spin_stat; - typedef cds::intrusive::SegmentedQueue< cds::gc::PTB, T, traits_SegmentedQueue_mutex > SegmentedQueue_PTB_mutex; - typedef cds::intrusive::SegmentedQueue< cds::gc::PTB, T, traits_SegmentedQueue_mutex_stat > SegmentedQueue_PTB_mutex_stat; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T > SegmentedQueue_DHP_spin; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_spin_padding > SegmentedQueue_DHP_spin_padding; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_spin_stat > SegmentedQueue_DHP_spin_stat; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_mutex > SegmentedQueue_DHP_mutex; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_mutex_padding > SegmentedQueue_DHP_mutex_padding; + typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_mutex_stat > SegmentedQueue_DHP_mutex_stat; // Boost SList typedef details::BoostSList< T, std::mutex > BoostSList_mutex; diff --git a/tests/unit/queue/queue_defs.h b/tests/unit/queue/queue_defs.h index 0ea6863d..fa3b951a 100644 --- a/tests/unit/queue/queue_defs.h +++ b/tests/unit/queue/queue_defs.h @@ -203,25 +203,32 @@ // SegmentedQueue #define CDSUNIT_DECLARE_SegmentedQueue( ITEM_TYPE ) \ TEST_SEGMENTED( SegmentedQueue_HP_spin, ITEM_TYPE ) \ + TEST_SEGMENTED( SegmentedQueue_HP_spin_padding, ITEM_TYPE ) \ TEST_SEGMENTED( SegmentedQueue_HP_spin_stat, ITEM_TYPE ) \ TEST_SEGMENTED( SegmentedQueue_HP_mutex, ITEM_TYPE ) \ + TEST_SEGMENTED( SegmentedQueue_HP_mutex_padding, ITEM_TYPE ) \ TEST_SEGMENTED( SegmentedQueue_HP_mutex_stat, ITEM_TYPE ) \ TEST_SEGMENTED( SegmentedQueue_DHP_spin, ITEM_TYPE ) \ + TEST_SEGMENTED( SegmentedQueue_DHP_spin_padding, ITEM_TYPE ) \ TEST_SEGMENTED( SegmentedQueue_DHP_spin_stat, ITEM_TYPE ) \ TEST_SEGMENTED( SegmentedQueue_DHP_mutex, ITEM_TYPE ) \ + TEST_SEGMENTED( SegmentedQueue_DHP_mutex_padding, ITEM_TYPE ) \ TEST_SEGMENTED( SegmentedQueue_DHP_mutex_stat, ITEM_TYPE ) #define CDSUNIT_TEST_SegmentedQueue \ CPPUNIT_TEST( SegmentedQueue_HP_spin ) \ + CPPUNIT_TEST( SegmentedQueue_HP_spin_padding ) \ CPPUNIT_TEST( SegmentedQueue_HP_spin_stat ) \ CPPUNIT_TEST( SegmentedQueue_HP_mutex ) \ + CPPUNIT_TEST( SegmentedQueue_HP_mutex_padding ) \ CPPUNIT_TEST( SegmentedQueue_HP_mutex_stat ) \ CPPUNIT_TEST( SegmentedQueue_DHP_spin ) \ + CPPUNIT_TEST( SegmentedQueue_DHP_spin_padding ) \ CPPUNIT_TEST( SegmentedQueue_DHP_spin_stat ) \ CPPUNIT_TEST( SegmentedQueue_DHP_mutex ) \ + CPPUNIT_TEST( SegmentedQueue_DHP_mutex_padding ) \ CPPUNIT_TEST( SegmentedQueue_DHP_mutex_stat ) - // std::queue #define CDSUNIT_DECLARE_StdQueue( ITEM_TYPE ) \ TEST_CASE( StdQueue_deque_Spinlock, ITEM_TYPE ) \ diff --git a/tests/unit/queue/queue_type.h b/tests/unit/queue/queue_type.h index 947c5482..dc2826cc 100644 --- a/tests/unit/queue/queue_type.h +++ b/tests/unit/queue/queue_type.h @@ -468,6 +468,11 @@ namespace queue { cds::opt::stat< cds::intrusive::segmented_queue::stat<> > >::type {}; + class traits_SegmentedQueue_spin_padding: + public cds::container::segmented_queue::make_traits< + cds::opt::padding< cds::opt::cache_line_padding > + >::type + {}; class traits_SegmentedQueue_mutex_stat: public cds::container::segmented_queue::make_traits< cds::opt::stat< cds::intrusive::segmented_queue::stat<> > @@ -479,18 +484,26 @@ namespace queue { cds::opt::lock_type< std::mutex > >::type {}; + class traits_SegmentedQueue_mutex_padding: + public cds::container::segmented_queue::make_traits< + cds::opt::lock_type< std::mutex > + , cds::opt::padding< cds::opt::cache_line_padding > + >::type + {}; typedef cds::container::SegmentedQueue< cds::gc::HP, Value > SegmentedQueue_HP_spin; + typedef cds::container::SegmentedQueue< cds::gc::HP, Value, traits_SegmentedQueue_spin_padding > SegmentedQueue_HP_spin_padding; typedef cds::container::SegmentedQueue< cds::gc::HP, Value, traits_SegmentedQueue_spin_stat > SegmentedQueue_HP_spin_stat; typedef cds::container::SegmentedQueue< cds::gc::HP, Value, traits_SegmentedQueue_mutex > SegmentedQueue_HP_mutex; + typedef cds::container::SegmentedQueue< cds::gc::HP, Value, traits_SegmentedQueue_mutex_padding > SegmentedQueue_HP_mutex_padding; typedef cds::container::SegmentedQueue< cds::gc::HP, Value, traits_SegmentedQueue_mutex_stat > SegmentedQueue_HP_mutex_stat; typedef cds::container::SegmentedQueue< cds::gc::DHP, Value > SegmentedQueue_DHP_spin; + typedef cds::container::SegmentedQueue< cds::gc::DHP, Value, traits_SegmentedQueue_spin_padding > SegmentedQueue_DHP_spin_padding; typedef cds::container::SegmentedQueue< cds::gc::DHP, Value, traits_SegmentedQueue_spin_stat > SegmentedQueue_DHP_spin_stat; typedef cds::container::SegmentedQueue< cds::gc::DHP, Value, traits_SegmentedQueue_mutex > SegmentedQueue_DHP_mutex; + typedef cds::container::SegmentedQueue< cds::gc::DHP, Value, traits_SegmentedQueue_mutex_padding > SegmentedQueue_DHP_mutex_padding; typedef cds::container::SegmentedQueue< cds::gc::DHP, Value, traits_SegmentedQueue_mutex_stat > SegmentedQueue_DHP_mutex_stat; - - }; } -- 2.34.1