//$$CDS-header$$
-#ifndef __CDS_ALGO_FLAT_COMBINING_H
-#define __CDS_ALGO_FLAT_COMBINING_H
+#ifndef CDSLIB_ALGO_FLAT_COMBINING_H
+#define CDSLIB_ALGO_FLAT_COMBINING_H
-#include <cds/cxx11_atomic.h>
+#include <mutex>
+#include <cds/algo/atomic.h>
#include <cds/details/allocator.h>
#include <cds/algo/backoff_strategy.h>
-#include <cds/lock/spinlock.h>
-#include <cds/details/std/mutex.h> // lock_guard
+#include <cds/sync/spinlock.h>
#include <cds/opt/options.h>
-#include <cds/int_algo.h>
+#include <cds/algo/int_algo.h>
#include <boost/thread/tss.hpp> // thread_specific_ptr
namespace cds { namespace algo {
Each data structure based on flat combining contains a class derived from \p %publication_record
*/
struct publication_record {
- CDS_ATOMIC::atomic<unsigned int> nRequest; ///< Request field (depends on data structure)
- CDS_ATOMIC::atomic<unsigned int> nState; ///< Record state: inactive, active, removed
- unsigned int nAge; ///< Age of the record
- CDS_ATOMIC::atomic<publication_record *> pNext; ///< Next record in publication list
+ atomics::atomic<unsigned int> nRequest; ///< Request field (depends on data structure)
+ atomics::atomic<unsigned int> nState; ///< Record state: inactive, active, removed
+ atomics::atomic<unsigned int> nAge; ///< Age of the record
+ atomics::atomic<publication_record *> pNext; ///< Next record in publication list
void * pOwner; ///< [internal data] Pointer to \ref kernel object that manages the publication list
/// Initializes publication record
/// Returns the value of \p nRequest field
unsigned int op() const
{
- return nRequest.load( CDS_ATOMIC::memory_order_relaxed );
+ return nRequest.load( atomics::memory_order_relaxed );
}
/// Checks if the operation is done
bool is_done() const
{
- return nRequest.load( CDS_ATOMIC::memory_order_relaxed ) == req_Response;
+ return nRequest.load( atomics::memory_order_relaxed ) == req_Response;
}
};
/// Type traits of \ref kernel class
/**
You can define different type traits for \ref kernel
- by specifying your struct based on \p %type_traits
+ by specifying your struct based on \p %traits
or by using \ref make_traits metafunction.
*/
- struct type_traits
+ struct traits
{
- typedef cds::lock::Spin lock_type; ///< Lock type
+ typedef cds::sync::spin lock_type; ///< Lock type
typedef cds::backoff::delay_of<2> back_off; ///< Back-off strategy
typedef CDS_DEFAULT_ALLOCATOR allocator; ///< Allocator used for TLS data (allocating publication_record derivatives)
typedef empty_stat stat; ///< Internal statistics
/// Metafunction converting option list to traits
/**
- This is a wrapper for <tt> cds::opt::make_options< type_traits, Options...> </tt>
\p Options are:
- - \p opt::lock_type - mutex type, default is \p cds::lock::Spin
+ - \p opt::lock_type - mutex type, default is \p cds::sync::spin
- \p opt::back_off - back-off strategy, defalt is \p cds::backoff::delay_of<2>
- \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR
- \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default)
List of all available memory ordering see opt::memory_model.
Default if cds::opt::v:relaxed_ordering
*/
- template <CDS_DECL_OPTIONS6>
+ template <typename... Options>
struct make_traits {
# ifdef CDS_DOXYGEN_INVOKED
typedef implementation_defined type ; ///< Metafunction result
# else
typedef typename cds::opt::make_options<
- typename cds::opt::find_type_traits< type_traits, CDS_OPTIONS6 >::type
- ,CDS_OPTIONS6
+ typename cds::opt::find_type_traits< traits, Options... >::type
+ ,Options...
>::type type;
# endif
};
/**
Template parameters:
- \p PublicationRecord - a type derived from \ref publication_record
- - \p Traits - a type traits of flat combining, default is flat_combining::type_traits.
+ - \p Traits - a type traits of flat combining, default is \p flat_combining::traits.
\ref make_traits metafunction can be used to create type traits
The kernel object should be a member of a container class. The container cooperates with flat combining
*/
template <
typename PublicationRecord
- ,typename Traits = type_traits
+ ,typename Traits = traits
>
class kernel
{
public:
- typedef PublicationRecord publication_record_type; ///< publication record type
- typedef Traits type_traits; ///< Type traits
- typedef typename type_traits::lock_type global_lock_type; ///< Global lock type
- typedef typename type_traits::back_off back_off; ///< back-off strategy type
- typedef typename type_traits::allocator allocator; ///< Allocator type (used for allocating publication_record_type data)
- typedef typename type_traits::stat stat; ///< Internal statistics
- typedef typename type_traits::memory_model memory_model; ///< C++ memory model
+ typedef PublicationRecord publication_record_type; ///< publication record type
+ typedef Traits traits; ///< Type traits
+ typedef typename traits::lock_type global_lock_type; ///< Global lock type
+ typedef typename traits::back_off back_off; ///< back-off strategy type
+ typedef typename traits::allocator allocator; ///< Allocator type (used for allocating publication_record_type data)
+ typedef typename traits::stat stat; ///< Internal statistics
+ typedef typename traits::memory_model memory_model; ///< C++ memory model
protected:
//@cond
typedef cds::details::Allocator< publication_record_type, allocator > cxx11_allocator; ///< internal helper cds::details::Allocator
- typedef cds_std::lock_guard<global_lock_type> lock_guard;
+ typedef std::lock_guard<global_lock_type> lock_guard;
//@endcond
protected:
void release_record( publication_record_type * pRec )
{
assert( pRec->is_done() );
- pRec->nRequest.store( req_EmptyRecord, memory_model::memory_order_relaxed );
+ pRec->nRequest.store( req_EmptyRecord, memory_model::memory_order_release );
m_Stat.onReleasePubRecord();
}
if ( pRec->nState.load(memory_model::memory_order_relaxed) == active && pRec->pOwner ) {
// record is active and kernel is alive
unsigned int nState = active;
- pRec->nState.compare_exchange_strong( nState, removed, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ pRec->nState.compare_exchange_strong( nState, removed, memory_model::memory_order_release, atomics::memory_order_relaxed );
}
else {
// record is not in publication list or kernel already deleted
{
assert( pRec->nState.load( memory_model::memory_order_relaxed ) == inactive );
- pRec->nAge = m_nCount;
+ pRec->nAge.store( m_nCount, memory_model::memory_order_release );
pRec->nState.store( active, memory_model::memory_order_release );
// Insert record to publication list
pRec->pNext = p;
// Failed CAS changes p
} while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast<publication_record *>(pRec),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ memory_model::memory_order_release, atomics::memory_order_relaxed ));
m_Stat.onActivatPubRecord();
}
}
{
if ( m_Mutex.try_lock() ) {
// The thread becomes a combiner
- lock_guard l( m_Mutex, cds_std::adopt_lock_t() );
+ lock_guard l( m_Mutex, std::adopt_lock_t() );
// The record pRec can be excluded from publication list. Re-publish it
republish( pRec );
// There is another combiner, wait while it executes our request
if ( !wait_for_combining( pRec ) ) {
// The thread becomes a combiner
- lock_guard l( m_Mutex, cds_std::adopt_lock_t() );
+ lock_guard l( m_Mutex, std::adopt_lock_t() );
// The record pRec can be excluded from publication list. Re-publish it
republish( pRec );
{
if ( m_Mutex.try_lock() ) {
// The thread becomes a combiner
- lock_guard l( m_Mutex, cds_std::adopt_lock_t() );
+ lock_guard l( m_Mutex, std::adopt_lock_t() );
// The record pRec can be excluded from publication list. Re-publish it
republish( pRec );
// There is another combiner, wait while it executes our request
if ( !wait_for_combining( pRec ) ) {
// The thread becomes a combiner
- lock_guard l( m_Mutex, cds_std::adopt_lock_t() );
+ lock_guard l( m_Mutex, std::adopt_lock_t() );
// The record pRec can be excluded from publication list. Re-publish it
republish( pRec );
switch ( p->nState.load( memory_model::memory_order_acquire )) {
case active:
if ( p->op() >= req_Operation ) {
- p->nAge = nCurAge;
+ p->nAge.store( nCurAge, memory_model::memory_order_release );
owner.fc_apply( static_cast<publication_record_type *>(p) );
operation_done( *p );
bOpDone = true;
// Thinning publication list
publication_record * pPrev = nullptr;
for ( publication_record * p = m_pHead; p; ) {
- if ( p->nState.load( memory_model::memory_order_acquire ) == active && p->nAge + m_nCompactFactor < nCurAge ) {
+ if ( p->nState.load( memory_model::memory_order_acquire ) == active
+ && p->nAge.load( memory_model::memory_order_acquire ) + m_nCompactFactor < nCurAge )
+ {
if ( pPrev ) {
publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire );
if ( pPrev->pNext.compare_exchange_strong( p, pNext,
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
p->nState.store( inactive, memory_model::memory_order_release );
p = pNext;
if ( pPrev ) {
publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire );
if ( pPrev->pNext.compare_exchange_strong( p, pNext,
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
cxx11_allocator().Delete( static_cast<publication_record_type *>( p ));
m_Stat.onDeletePubRecord();
} // namespace flat_combining
}} // namespace cds::algo
-#endif // #ifndef __CDS_ALGO_FLAT_COMBINING_H
+#endif // #ifndef CDSLIB_ALGO_FLAT_COMBINING_H