for ( auto& i : arr )
list.put( &i );
- pool.add( new Worker<FreeList>( pool, list ), s_nThreadCount );
-
- propout() << std::make_pair( "work_thread", s_nThreadCount )
- << std::make_pair( "pass_count", s_nPassCount );
-
- std::chrono::milliseconds duration = pool.run();
-
- propout() << std::make_pair( "duration", duration );
+ std::unique_ptr<Worker<FreeList>> worker(
+ new Worker<FreeList>(pool, list));
+ worker->test();
// analyze result
size_t nTotal = 0;
for ( auto const& i : arr )
nTotal += i.counter;
- size_t nSuccess = 0;
- for ( size_t threadNo = 0; threadNo < pool.size(); ++threadNo )
- nSuccess += static_cast<Worker<FreeList>&>( pool.get( threadNo )).m_nSuccess;
-
- EXPECT_EQ( nSuccess, nTotal );
+ EXPECT_EQ( worker->m_nSuccess, nTotal );
list.clear( []( typename FreeList::node* ) {} );
}
size_t put_get::s_nThreadCount = 1;
size_t put_get::s_nPassCount = 100000;
-#define CDSSTRESS_FREELIST_F( name, freelist_type ) \
- TEST_F( put_get, name ) \
- { \
- freelist_type fl; \
- test( fl ); \
- }
+#define CDSSTRESS_FREELIST_F(name, freelist_type) \
+ TEST_F(put_get, name) { \
+ std::unique_ptr<freelist_type> fl(new freelist_type()); \
+ test(*fl); \
+ }
CDSSTRESS_FREELIST_F( FreeList, cds::intrusive::FreeList )
atomics::atomic<tagged_ptr> tp;
if ( tp.is_lock_free()) {
- cds::intrusive::TaggedFreeList fl;
- test( fl );
+ using FL = cds::intrusive::TaggedFreeList;
+ std::unique_ptr<FL> fl(new FL());
+ test( *fl );
}
else
std::cout << "Double-width CAS is not supported\n";
for ( size_t pass = 0; pass < s_nPassCount; ++pass ) {
item_type* p;
while ( (p = static_cast<item_type*>( m_FreeList.get())) == nullptr );
- p->counter.fetch_add( 1, atomics::memory_order_relaxed );
m_FreeList.put( p );
}
}
list.put( &item );
pool.add( new Worker<FreeList>( pool, list ), s_nThreadCount );
-
- propout() << std::make_pair( "work_thread", s_nThreadCount )
- << std::make_pair( "pass_count", s_nPassCount );
-
- std::chrono::milliseconds duration = pool.run();
-
- propout() << std::make_pair( "duration", duration );
-
- // analyze result
- EXPECT_EQ( item.counter.load( atomics::memory_order_relaxed ), s_nPassCount * s_nThreadCount );
-
+ std::unique_ptr<Worker<FreeList>> worker(
+ new Worker<FreeList>(pool, list));
+ worker->test();
list.clear( []( typename FreeList::node* ) {} );
}
};
size_t put_get_single::s_nThreadCount = 1;
size_t put_get_single::s_nPassCount = 100000;
-#define CDSSTRESS_FREELIST_F( name, freelist_type ) \
- TEST_F( put_get_single, name ) \
- { \
- freelist_type fl; \
- test( fl ); \
- }
+#define CDSSTRESS_FREELIST_F(name, freelist_type) \
+ TEST_F(put_get_single, name) { \
+ std::unique_ptr<freelist_type> fl(new freelist_type()); \
+ test(*fl); \
+ }
CDSSTRESS_FREELIST_F( FreeList, cds::intrusive::FreeList )
atomics::atomic<tagged_ptr> tp;
if ( tp.is_lock_free()) {
- cds::intrusive::TaggedFreeList fl;
- test( fl );
+ using FL = cds::intrusive::TaggedFreeList;
+ std::unique_ptr<FL> fl(new FL());
+ test( *fl );
}
else
std::cout << "Double-width CAS is not supported\n";
#define CDSSTRESS_Sequential_Queue_F(test_fixture, type_name) \
TEST_F(test_fixture, type_name) { \
typedef queue::Types<value_type>::type_name queue_type; \
- queue_type queue(s_nQueueSize); \
- test(queue, s_nVyukovQueuePushCount); \
+ std::unique_ptr<queue_type> queue(new queue_type(s_nQueueSize)); \
+ test(*queue, s_nVyukovQueuePushCount); \
}
#define CDSSTRESS_Sequential_VyukovQueue(test_fixture) \
#define CDSSTRESS_Sequential_Queue_F(test_fixture, type_name) \
TEST_F(test_fixture, type_name) { \
typedef queue::Types<value_type>::type_name queue_type; \
- queue_type queue; \
- test(queue); \
+ std::unique_ptr<queue_type> queue(new queue_type()); \
+ test(*queue); \
}
#define CDSSTRESS_Sequential_MSQueue(test_fixture) \
#define CDSSTRESS_Sequential_Queue_F(test_fixture, type_name) \
TEST_F(test_fixture, type_name) { \
typedef queue::Types<value_type>::type_name queue_type; \
- queue_type queue(s_nBufferSize); \
- test(queue); \
+ std::unique_ptr<queue_type> queue(new queue_type(s_nBufferSize)); \
+ test(*queue); \
}
CDSSTRESS_WeakRingBuffer_void(sequential_weak_ring_buffer)
#define CDSSTRESS_SequentialTreiberStack_F(test_fixture, type_name) \
TEST_F(test_fixture, type_name) { \
typedef stack::Types<value_type>::type_name stack_type; \
- stack_type stack; \
- test(stack); \
+ std::unique_ptr<stack_type> stack(new stack_type()); \
+ test(*stack); \
}
#define CDSSTRESS_SequentialEliminationStack_F(test_fixture, type_name) \
TEST_F(test_fixture, type_name) { \
typedef stack::Types<value_type>::type_name stack_type; \
- stack_type stack(s_nSequentialEliminationSize); \
- test(stack); \
+ std::unique_ptr<stack_type> stack( \
+ new stack_type(s_nSequentialEliminationSize)); \
+ test(*stack); \
}
#define CDSSTRESS_SequentialTreiberStack(test_fixture) \
GetConfig(SequentialEliminationSize);
}
- template <typename Stack> void test(Stack &stack) {
+ template <typename Stack>
+ void test(Stack &stack) {
size_t push_error_cnt = 0;
size_t pop_sum = 0;
value_type v;
}
};
-CDSSTRESS_SequentialTreiberStack(sequential_stack)
- CDSSTRESS_SequentialEliminationStack(sequential_stack)
+CDSSTRESS_SequentialTreiberStack(sequential_stack);
+CDSSTRESS_SequentialEliminationStack(sequential_stack);
} // namespace