#include <cds/algo/atomic.h>
#include <cds/details/allocator.h>
#include <cds/algo/backoff_strategy.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <cds/opt/options.h>
#include <cds/algo/int_algo.h>
#include <boost/thread/tss.hpp> // thread_specific_ptr
*/
struct traits
{
- typedef cds::lock::Spin lock_type; ///< Lock type
+ typedef cds::sync::spin lock_type; ///< Lock type
typedef cds::backoff::delay_of<2> back_off; ///< Back-off strategy
typedef CDS_DEFAULT_ALLOCATOR allocator; ///< Allocator used for TLS data (allocating publication_record derivatives)
typedef empty_stat stat; ///< Internal statistics
/// Metafunction converting option list to traits
/**
\p Options are:
- - \p opt::lock_type - mutex type, default is \p cds::lock::Spin
+ - \p opt::lock_type - mutex type, default is \p cds::sync::spin
- \p opt::back_off - back-off strategy, defalt is \p cds::backoff::delay_of<2>
- \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR
- \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default)
/// Lock type used to lock modifying items
/**
- Default is cds::lock::Spin
+ Default is cds::sync::spin
*/
- typedef cds::lock::Spin lock_type;
+ typedef cds::sync::spin lock_type;
/// back-off strategy used
typedef cds::backoff::Default back_off;
/// Metafunction converting option list to \p lazy_list::traits
/**
\p Options are:
- - \p opt::lock_type - lock type for node-level locking. Default \p is cds::lock::Spin. Note that <b>each</b> node
+ - \p opt::lock_type - lock type for node-level locking. Default \p is cds::sync::spin. Note that <b>each</b> node
of the list has member of type \p lock_type, therefore, heavy-weighted locking primitive is not
acceptable as candidate for \p lock_type.
- \p opt::compare - key compare functor. No default functor is provided.
/// Metafunction converting option list to traits
/**
\p Options are:
- - \p opt::lock_type - mutex type, default is \p cds::lock::Spin
+ - \p opt::lock_type - mutex type, default is \p cds::sync::spin
- \p opt::back_off - back-off strategy, defalt is \p cds::backoff::delay_of<2>
- \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR
- \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default)
/// Metafunction converting option list to traits
/**
\p Options are:
- - \p opt::lock_type - mutex type, default is \p cds::lock::Spin
+ - \p opt::lock_type - mutex type, default is \p cds::sync::spin
- \p opt::back_off - back-off strategy, defalt is \p cds::backoff::delay_of<2>
- \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR
- \p opt::stat - internal statistics, possible type: \p fcpqueue::stat, \p fcpqueue::empty_stat (the default)
/// Metafunction converting option list to traits
/**
\p Options are:
- - \p opt::lock_type - mutex type, default is \p cds::lock::Spin
+ - \p opt::lock_type - mutex type, default is \p cds::sync::spin
- \p opt::back_off - back-off strategy, defalt is \p cds::backoff::delay_of<2>
- \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR
- \p opt::stat - internal statistics, possible type: \p fcqueue::stat, \p fcqueue::empty_stat (the default)
/// Metafunction converting option list to traits
/**
\p Options are:
- - \p opt::lock_type - mutex type, default is \p cds::lock::Spin
+ - \p opt::lock_type - mutex type, default is \p cds::sync::spin
- \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default
- \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR
- \p opt::stat - internal statistics, possible type: \p fcstack::stat, \p fcstack::empty_stat (the default)
- \p opt::compare - priority compare functor. No default functor is provided.
If the option is not specified, the \p opt::less is used.
- \p opt::less - specifies binary predicate used for priority compare. Default is \p std::less<T>.
- - \p opt::lock_type - lock type. Default is \p cds::lock::Spin.
+ - \p opt::lock_type - lock type. Default is \p cds::sync::spin.
- \p opt::back_off - back-off strategy. Default is \p cds::backoff::yield
- \p opt::allocator - allocator (like \p std::allocator) for the values of queue's items.
Default is \ref CDS_DEFAULT_ALLOCATOR
#include <mutex> // unique_lock
#include <cds/container/msqueue.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace cds { namespace container {
/// RWQueue related definitions
struct traits
{
/// Lock policy
- typedef cds::lock::Spin lock_type;
+ typedef cds::sync::spin lock_type;
/// Node allocator
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Metafunction converting option list to \p rwqueue::traits
/**
Supported \p Options are:
- - opt::lock_type - lock policy, default is \p cds::lock::Spin. Any type satisfied \p Mutex C++ concept may be used.
+ - opt::lock_type - lock policy, default is \p cds::sync::spin. Any type satisfied \p Mutex C++ concept may be used.
- opt::allocator - allocator (like \p std::allocator) used for allocating queue nodes. Default is \ref CDS_DEFAULT_ALLOCATOR
- opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled)
To enable item counting use \p cds::atomicity::item_counter.
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Lock type used to maintain an internal list of allocated segments
- typedef cds::lock::Spin lock_type;
+ typedef cds::sync::spin lock_type;
/// Random \ref cds::opt::permutation_generator "permutation generator" for sequence [0, quasi_factor)
typedef cds::opt::v::random2_permutation<int> permutation_generator;
/// Random engine to generate a random position in elimination array
typedef opt::v::c_rand random_engine;
- /// Lock type used in elimination, default is cds::lock::Spin
- typedef cds::lock::Spin lock_type;
+ /// Lock type used in elimination, default is cds::sync::spin
+ typedef cds::sync::spin lock_type;
///@}
};
- opt::random_engine - a random engine to generate a random position in elimination array.
Default is \p opt::v::c_rand.
- opt::elimination_backoff - back-off strategy to wait for elimination, default is \p cds::backoff::delay<>
- - opt::lock_type - a lock type used in elimination back-off, default is \p cds::lock::Spin.
+ - opt::lock_type - a lock type used in elimination back-off, default is \p cds::sync::spin.
Example: declare %TreiberStack with item counting and internal statistics using \p %make_traits
\code
#include <cds/gc/details/retired_ptr.h>
#include <cds/details/aligned_allocator.h>
#include <cds/details/allocator.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#if CDS_COMPILER == CDS_COMPILER_MSVC
# pragma warning(push)
{
cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
- atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
- atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
- SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
+ atomics::atomic<guard_data *> m_GuardList; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
+ atomics::atomic<guard_data *> m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
+ cds::sync::spin m_freeListLock; ///< Access to m_FreeGuardList
/*
Unfortunately, access to the list of free guard is lock-based.
details::guard_data * pGuard;
{
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
if ( pGuard )
m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
{
pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
}
pLast = p;
}
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
}
#include <cds/opt/hash.h>
#include <cds/lock/array.h>
#include <cds/os/thread.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace cds { namespace intrusive {
typedef unsigned long long owner_t;
typedef cds::OS::ThreadId threadId_t;
- typedef cds::lock::Spin spinlock_type;
+ typedef cds::sync::spin spinlock_type;
typedef std::unique_lock< spinlock_type > scoped_spinlock;
//@endcond
#include <cds/opt/compare.h>
#include <cds/details/marked_ptr.h>
#include <cds/details/make_const_type.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <cds/urcu/options.h>
namespace cds { namespace intrusive {
/**
Template parameters:
- GC - garbage collector
- - Lock - lock type. Default is \p cds::lock::Spin
+ - Lock - lock type. Default is \p cds::sync::spin
- Tag - a \ref cds_intrusive_hook_tag "tag"
*/
template <
class GC
- ,typename Lock = cds::lock::Spin
+ ,typename Lock = cds::sync::spin
,typename Tag = opt::none
>
struct node
struct default_hook {
typedef undefined_gc gc;
typedef opt::none tag;
- typedef lock::Spin lock_type;
+ typedef sync::spin lock_type;
};
//@endcond
/**
\p Options are:
- opt::gc - garbage collector
- - opt::lock_type - lock type used for node locking. Default is lock::Spin
+ - opt::lock_type - lock type used for node locking. Default is sync::spin
- opt::tag - a \ref cds_intrusive_hook_tag "tag"
*/
template < typename... Options >
\p Options are:
- opt::gc - garbage collector
- - opt::lock_type - lock type used for node locking. Default is lock::Spin
+ - opt::lock_type - lock type used for node locking. Default is sync::spin
- opt::tag - a \ref cds_intrusive_hook_tag "tag"
*/
template < size_t MemberOffset, typename... Options >
\p Options are:
- opt::gc - garbage collector used.
- - opt::lock_type - lock type used for node locking. Default is lock::Spin
+ - opt::lock_type - lock type used for node locking. Default is sync::spin
- opt::tag - a \ref cds_intrusive_hook_tag "tag"
*/
template <typename NodeTraits, typename... Options >
/// Metafunction converting option list to traits
/**
\p Options are:
- - \p opt::lock_type - mutex type, default is \p cds::lock::Spin
+ - \p opt::lock_type - mutex type, default is \p cds::sync::spin
- \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default
- \p opt::disposer - the functor used for dispose removed items. Default is \p opt::intrusive::v::empty_disposer.
This option is used only in \p FCQueue::clear() function.
/// Metafunction converting option list to traits
/**
\p Options are:
- - \p opt::lock_type - mutex type, default is \p cds::lock::Spin
+ - \p opt::lock_type - mutex type, default is \p cds::sync::spin
- \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default
- \p opt::disposer - the functor used for dispose removed items. Default is \p opt::intrusive::v::empty_disposer.
This option is used only in \p FCStack::clear() function.
/// Lazy list node for \p gc::nogc
/**
Template parameters:
- - Lock - lock type. Default is \p cds::lock::Spin
+ - Lock - lock type. Default is \p cds::sync::spin
- Tag - a \ref cds_intrusive_hook_tag "tag"
*/
template <
#ifdef CDS_DOXYGEN_INVOKED
- typename Lock = cds::lock::Spin,
+ typename Lock = cds::sync::spin,
typename Tag = opt::none
#else
typename Lock,
#include <mutex> // std::unique_lock
#include <cds/intrusive/details/base.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <cds/os/thread.h>
#include <cds/details/bit_reverse_counter.h>
#include <cds/intrusive/options.h>
/**
No default functor is provided. If the option is not specified, the \p less is used.
*/
- typedef opt::none compare;
+ typedef opt::none compare;
/// Specifies binary predicate used for priority comparing.
/**
Default is \p std::less<T>.
*/
- typedef opt::none less;
+ typedef opt::none less;
/// Type of mutual-exclusion lock
- typedef lock::Spin lock_type;
+ typedef cds::sync::spin lock_type;
/// Back-off strategy
typedef backoff::yield back_off;
- \p opt::compare - priority compare functor. No default functor is provided.
If the option is not specified, the \p opt::less is used.
- \p opt::less - specifies binary predicate used for priority compare. Default is \p std::less<T>.
- - \p opt::lock_type - lock type. Default is \p cds::lock::Spin.
+ - \p opt::lock_type - lock type. Default is \p cds::sync::spin
- \p opt::back_off - back-off strategy. Default is \p cds::backoff::yield
- \p opt::stat - internal statistics. Available types: \p mspriority_queue::stat, \p mspriority_queue::empty_stat (the default, no overhead)
*/
#include <cds/intrusive/details/base.h>
#include <cds/details/marked_ptr.h>
#include <cds/algo/int_algo.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <cds/opt/permutation.h>
#include <boost/intrusive/slist.hpp>
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Lock type used to maintain an internal list of allocated segments
- typedef cds::lock::Spin lock_type;
+ typedef cds::sync::spin lock_type;
/// Random \ref cds::opt::permutation_generator "permutation generator" for sequence [0, quasi_factor)
typedef cds::opt::v::random2_permutation<int> permutation_generator;
#include <mutex>
#include <cds/lock/array.h>
#include <cds/os/thread.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace cds { namespace intrusive { namespace striped_set {
typedef unsigned long long owner_t;
typedef cds::OS::ThreadId threadId_t;
- typedef cds::lock::Spin spinlock_type;
+ typedef cds::sync::spin spinlock_type;
typedef std::unique_lock< spinlock_type > scoped_spinlock;
//@endcond
#include <cds/intrusive/details/single_link_struct.h>
#include <cds/algo/elimination.h>
#include <cds/opt/buffer.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <cds/details/type_padding.h>
namespace cds { namespace intrusive {
/// Random engine to generate a random position in elimination array
typedef opt::v::c_rand random_engine;
- /// Lock type used in elimination, default is cds::lock::Spin
- typedef cds::lock::Spin lock_type;
+ /// Lock type used in elimination, default is cds::sync::spin
+ typedef cds::sync::spin lock_type;
///@}
};
- opt::random_engine - a random engine to generate a random position in elimination array.
Default is \p opt::v::c_rand.
- opt::elimination_backoff - back-off strategy to wait for elimination, default is \p cds::backoff::delay<>
- - opt::lock_type - a lock type used in elimination back-off, default is \p cds::lock::Spin.
+ - opt::lock_type - a lock type used in elimination back-off, default is \p cds::sync::spin
Example: declare \p %TreiberStack with elimination enabled and internal statistics
\code
can be simultaneous.
Template arguments:
- - \p Lock - lock type, for example, \p std::mutex, \p cds::lock::Spinlock
+ - \p Lock - lock type, for example, \p std::mutex, \p cds::sync::spin_lock
- \p SelectPolicy - array cell selection policy, the default is \ref mod_select_policy
Available policies: \ref trivial_select_policy, \ref pow2_select_policy, \ref mod_select_policy.
- \p Alloc - memory allocator for array
#ifndef __CDS_LOCK_SPINLOCK_H
#define __CDS_LOCK_SPINLOCK_H
-/*
- Defines spin-lock primitives
- Editions:
- 2012.01.23 1.1.0 khizmax Refactoring: use C++11 atomics
- 2010.01.22 0.6.0 khizmax Refactoring: use cds::atomic namespace
- Explicit memory ordering specification (atomic::memory_order_xxx)
- 2006 khizmax Created
-*/
+#warning "cds/lock/spinlock.h is deprecated, use cds/sync/spinlock.h instead"
-#include <cds/algo/atomic.h>
-#include <cds/os/thread.h>
-#include <cds/algo/backoff_strategy.h>
+#include <cds/sync/spinlock.h>
+//@cond
namespace cds {
- /// Synchronization primitives
+ /// Synchronization primitives (deprecated namespace, use \p cds::sync namespace instead)
namespace lock {
- /// Spin lock.
- /**
- Simple and light-weight spin-lock critical section
- It is useful to gain access to small (short-timed) code
- Algorithm:
-
- TATAS (test-and-test-and-lock)
- [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
-
- No serialization performed - any of waiting threads may owns the spin-lock.
- This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
- The method unlock() can call any thread
-
- DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
- - double lock attempt encountered by same thread (deadlock)
- - unlock by another thread
-
- If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
- thread and repeats lock attempts later
-
- Template parameters:
- - @p Backoff backoff strategy. Used when spin lock is locked
- */
- template <typename Backoff >
- class Spinlock
- {
- public:
- typedef Backoff backoff_strategy ; ///< back-off strategy type
- private:
- atomics::atomic<bool> m_spin ; ///< Spin
-# ifdef CDS_DEBUG
- typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode)
-# endif
-
- public:
- /// Construct free (unlocked) spin-lock
- Spinlock() CDS_NOEXCEPT
-# ifdef CDS_DEBUG
- :m_dbgOwnerId( OS::c_NullThreadId )
-# endif
- {
- m_spin.store( false, atomics::memory_order_relaxed );
- }
-
- /// Construct spin-lock in specified state
- /**
- In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
- */
- Spinlock( bool bLocked ) CDS_NOEXCEPT
-# ifdef CDS_DEBUG
- : m_dbgOwnerId( bLocked ? OS::get_current_thread_id() : OS::c_NullThreadId )
-# endif
- {
- m_spin.store( bLocked, atomics::memory_order_relaxed );
- }
-
- /// Dummy copy constructor
- /**
- In theory, spin-lock cannot be copied. However, it is not practical.
- Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
- initializes the spin to free (unlocked) state like default ctor.
- */
- Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
- : m_spin( false )
-# ifdef CDS_DEBUG
- , m_dbgOwnerId( OS::c_NullThreadId )
-# endif
- {}
-
- /// Destructor. On debug time it checks whether spin-lock is free
- ~Spinlock()
- {
- assert( !m_spin.load( atomics::memory_order_relaxed ) );
- }
-
- /// Check if the spin is locked
- bool is_locked() const CDS_NOEXCEPT
- {
- return m_spin.load( atomics::memory_order_relaxed );
- }
-
- /// Try to lock the object
- /**
- Returns \p true if locking is succeeded
- otherwise (if the spin is already locked) returns \p false
-
- Debug version: deadlock can be detected
- */
- bool try_lock() CDS_NOEXCEPT
- {
- bool bCurrent = false;
- m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
-
- CDS_DEBUG_ONLY(
- if ( !bCurrent ) {
- m_dbgOwnerId = OS::get_current_thread_id();
- }
- )
- return !bCurrent;
- }
-
- /// Try to lock the object, repeat @p nTryCount times if failed
- /**
- Returns \p true if locking is succeeded
- otherwise (if the spin is already locked) returns \p false
- */
- bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ) )
- {
- backoff_strategy backoff;
- while ( nTryCount-- ) {
- if ( try_lock() )
- return true;
- backoff();
- }
- return false;
- }
-
- /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
- void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()() ))
- {
- backoff_strategy backoff;
-
- // Deadlock detected
- assert( m_dbgOwnerId != OS::get_current_thread_id() );
-
- // TATAS algorithm
- while ( !try_lock() ) {
- while ( m_spin.load( atomics::memory_order_relaxed ) ) {
- backoff();
- }
- }
- assert( m_dbgOwnerId == OS::get_current_thread_id() );
- }
-
- /// Unlock the spin-lock. Debug version: deadlock may be detected
- void unlock() CDS_NOEXCEPT
- {
- assert( m_spin.load( atomics::memory_order_relaxed ) );
-
- assert( m_dbgOwnerId == OS::get_current_thread_id() );
- CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
-
- m_spin.store( false, atomics::memory_order_release );
- }
- };
+ /// Alias for \p cds::sync::spin_lock for backward compatibility
+ template <typename Backoff>
+ using Spinlock = cds::sync::spin_lock< Backoff >;
/// Spin-lock implementation default for the current platform
- typedef Spinlock<backoff::LockDefault > Spin;
+ typedef cds::sync::spin_lock< backoff::LockDefault> Spin;
- /// Recursive spin lock.
- /**
- Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
-
- Template parameters:
- - @p Integral one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
- - @p Backoff backoff strategy. Used when spin lock is locked
- */
+ /// Alias for \p cds::sync::reentrant_spin_lock for backward compatibility
template <typename Integral, class Backoff>
- class ReentrantSpinT
- {
- typedef OS::ThreadId thread_id ; ///< The type of thread id
-
- public:
- typedef Integral integral_type ; ///< The integral type
- typedef Backoff backoff_strategy ; ///< The backoff type
-
- private:
- atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
- thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
-
- private:
- //@cond
- void take( thread_id tid ) CDS_NOEXCEPT
- {
- m_OwnerId = tid;
- }
-
- void free() CDS_NOEXCEPT
- {
- m_OwnerId = OS::c_NullThreadId;
- }
-
- bool is_taken( thread_id tid ) const CDS_NOEXCEPT
- {
- return m_OwnerId == tid;
- }
-
- bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
- {
- if ( is_taken( tid )) {
- m_spin.fetch_add( 1, atomics::memory_order_relaxed );
- return true;
- }
- return false;
- }
-
- bool try_acquire() CDS_NOEXCEPT
- {
- integral_type nCurrent = 0;
- return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
- }
-
- bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
- {
- backoff_strategy bkoff;
-
- while ( nTryCount-- ) {
- if ( try_acquire() )
- return true;
- bkoff();
- }
- return false;
- }
-
- void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
- {
- // TATAS algorithm
- backoff_strategy bkoff;
- while ( !try_acquire() ) {
- while ( m_spin.load( atomics::memory_order_relaxed ) )
- bkoff();
- }
- }
- //@endcond
-
- public:
- /// Default constructor initializes spin to free (unlocked) state
- ReentrantSpinT() CDS_NOEXCEPT
- : m_spin(0)
- , m_OwnerId( OS::c_NullThreadId )
- {}
-
- /// Dummy copy constructor
- /**
- In theory, spin-lock cannot be copied. However, it is not practical.
- Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
- initializes the spin to free (unlocked) state like default ctor.
- */
- ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
- : m_spin(0)
- , m_OwnerId( OS::c_NullThreadId )
- {}
-
- /// Construct object for specified state
- ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
- : m_spin(0)
- , m_OwnerId( OS::c_NullThreadId )
- {
- if ( bLocked )
- lock();
- }
-
- /// Checks if the spin is locked
- /**
- The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
- Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
- */
- bool is_locked() const CDS_NOEXCEPT
- {
- return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id() ));
- }
-
- /// Try to lock the spin-lock (synonym for \ref try_lock)
- bool try_lock() CDS_NOEXCEPT
- {
- thread_id tid = OS::get_current_thread_id();
- if ( try_taken_lock( tid ) )
- return true;
- if ( try_acquire()) {
- take( tid );
- return true;
- }
- return false;
- }
-
- /// Try to lock the object
- bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
- {
- thread_id tid = OS::get_current_thread_id();
- if ( try_taken_lock( tid ) )
- return true;
- if ( try_acquire( nTryCount )) {
- take( tid );
- return true;
- }
- return false;
- }
-
- /// Lock the object waits if it is busy
- void lock() CDS_NOEXCEPT
- {
- thread_id tid = OS::get_current_thread_id();
- if ( !try_taken_lock( tid ) ) {
- acquire();
- take( tid );
- }
- }
-
- /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
- bool unlock() CDS_NOEXCEPT
- {
- if ( is_taken( OS::get_current_thread_id() ) ) {
- integral_type n = m_spin.load( atomics::memory_order_relaxed );
- if ( n > 1 )
- m_spin.store( n - 1, atomics::memory_order_relaxed );
- else {
- free();
- m_spin.store( 0, atomics::memory_order_release );
- }
- return true;
- }
- return false;
- }
-
- /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
- bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
- {
- if ( is_taken( OS::get_current_thread_id() ) ) {
- assert( newOwnerId != OS::c_NullThreadId );
- m_OwnerId = newOwnerId;
- return true;
- }
- return false;
- }
- };
+ using ReentrantSpinT = cds::sync::reentrant_spin_lock< Integral, Backoff >;
/// Recursive 32bit spin-lock
- typedef ReentrantSpinT<uint32_t, backoff::LockDefault> ReentrantSpin32;
+ typedef cds::sync::reentrant_spin32 ReentrantSpin32;
/// Recursive 64bit spin-lock
- typedef ReentrantSpinT<uint64_t, backoff::LockDefault> ReentrantSpin64;
+ typedef cds::sync::reentrant_spin64 ReentrantSpin64;
/// Default recursive spin-lock type
typedef ReentrantSpin32 ReentrantSpin;
typedef lock::ReentrantSpin64 RecursiveSpinLock64;
} // namespace cds
+//@endcond
#endif // #ifndef __CDS_LOCK_SPINLOCK_H
#include <cds/os/topology.h>
#include <cds/os/alloc_aligned.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <cds/details/type_padding.h>
#include <cds/details/marked_ptr.h>
#include <cds/container/vyukov_mpmc_cycle_queue.h>
typedef page_cached_allocator<> page_heap;
typedef aligned_malloc_heap aligned_heap;
typedef default_sizeclass_selector sizeclass_selector;
- typedef free_list_locked<cds::lock::Spin> free_list;
- typedef partial_list_locked<cds::lock::Spin> partial_list;
+ typedef free_list_locked<cds::sync::spin> free_list;
+ typedef partial_list_locked<cds::sync::spin> partial_list;
typedef procheap_empty_stat procheap_stat;
typedef os_allocated_empty os_allocated_stat;
typedef cds::opt::none check_bounds;
Examples:
\code
- // default_spin is cds::lock::Spin
- typedef typename cds::opt::select_default< cds::opt::none, cds::lock::Spin >::type default_spin;
+ // default_spin is cds::sync::spin
+ typedef typename cds::opt::select_default< cds::opt::none, cds::sync::spin >::type default_spin;
- // spin_32bit is cds::lock::Spin32
- typedef typename cds::opt::select_default< cds::lock::Spin32, cds::lock::Spin >::type spin_32bit;
+ // spin_32bit is cds::sync::reentrant_spin32
+ typedef typename cds::opt::select_default< cds::lock::Spin32, cds::sync::reentrant_spin32 >::type spin_32bit;
\endcode
*/
template <typename Option, typename Default, typename Value = Option>
--- /dev/null
+//$$CDS-header$$
+
+#ifndef CDSLIB_LOCK_INJECTED_MONITOR_H
+#define CDSLIB_LOCK_INJECTED_MONITOR_H
+
+namespace cds { namespace lock {
+
+ /// Monitor that injects a lock as a member into a class
+ /**
+ Template arguments:
+ - Lock - lock type like \p std::mutex or \p cds::sync::spin
+ */
+ template <typename Lock>
+ class injected_monitor
+ {
+ public:
+ typedef Lock lock_type;
+
+ template <typename T>
+ struct wrapper : public T
+ {
+ using T::T;
+ mutable lock_type m_Lock;
+
+ void lock() const
+ {
+ m_Lock.lock;
+ }
+
+ void unlock() const
+ {
+ m_Lock.unlock();
+ }
+ };
+
+ template <typename T>
+ void lock( T const& p ) const
+ {
+ p.lock();
+ }
+
+ template <typename T>
+ void unlock( T const& p ) const
+ {
+ p.unlock();
+ }
+
+ template <typename T>
+ class scoped_lock
+ {
+ T const& m_Locked;
+
+ public:
+ scoped_lock( injected_monitor const&, T const& p )
+ : m_Locked( p )
+ {
+ p.lock();
+ }
+
+ ~scoped_lock()
+ {
+ p.unlock();
+ }
+ };
+ };
+}} // namespace cds::lock
+
+#endif // #ifndef CDSLIB_LOCK_INJECTED_MONITOR_H
--- /dev/null
+//$$CDS-header$$-2
+
+#ifndef CDSLIB_SYNC_SPINLOCK_H
+#define CDSLIB_SYNC_SPINLOCK_H
+
+#include <cds/algo/atomic.h>
+#include <cds/os/thread.h>
+#include <cds/algo/backoff_strategy.h>
+
+namespace cds {
+ /// Synchronization primitives
+ namespace sync {
+ /// Spin lock
+ /**
+ Simple and light-weight spin-lock critical section
+ It is useful to gain access to small (short-timed) code
+
+ Algorithm:
+
+ TATAS (test-and-test-and-lock)
+ [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
+
+ No serialization performed - any of waiting threads may owns the spin-lock.
+ This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
+ The method unlock() can call any thread
+
+ DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
+ - double lock attempt encountered by same thread (deadlock)
+ - unlock by another thread
+
+ If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
+ thread and repeats lock attempts later
+
+ Template parameters:
+ - @p Backoff backoff strategy. Used when spin lock is locked
+ */
+ template <typename Backoff >
+ class spin_lock
+ {
+ public:
+ typedef Backoff backoff_strategy; ///< back-off strategy type
+ private:
+ atomics::atomic<bool> m_spin; ///< Spin
+# ifdef CDS_DEBUG
+ typename OS::ThreadId m_dbgOwnerId; ///< Owner thread id (only for debug mode)
+# endif
+
+ public:
+ /// Construct free (unlocked) spin-lock
+ spin_lock() CDS_NOEXCEPT
+# ifdef CDS_DEBUG
+ :m_dbgOwnerId( OS::c_NullThreadId )
+# endif
+ {
+ m_spin.store( false, atomics::memory_order_relaxed );
+ }
+
+ /// Construct spin-lock in specified state
+ /**
+ In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
+ */
+ spin_lock( bool bLocked ) CDS_NOEXCEPT
+# ifdef CDS_DEBUG
+ : m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
+# endif
+ {
+ m_spin.store( bLocked, atomics::memory_order_relaxed );
+ }
+
+ /// Dummy copy constructor
+ /**
+ In theory, spin-lock cannot be copied. However, it is not practical.
+ Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
+ initializes the spin to free (unlocked) state like default ctor.
+ */
+ spin_lock(const spin_lock<Backoff>& ) CDS_NOEXCEPT
+ : m_spin( false )
+# ifdef CDS_DEBUG
+ , m_dbgOwnerId( cds::OS::c_NullThreadId )
+# endif
+ {}
+
+ /// Destructor. On debug time it checks whether spin-lock is free
+ ~spin_lock()
+ {
+ assert( !m_spin.load( atomics::memory_order_relaxed ) );
+ }
+
+ /// Check if the spin is locked
+ bool is_locked() const CDS_NOEXCEPT
+ {
+ return m_spin.load( atomics::memory_order_relaxed );
+ }
+
+ /// Try to lock the object
+ /**
+ Returns \p true if locking is succeeded
+ otherwise (if the spin is already locked) returns \p false
+
+ Debug version: deadlock can be detected
+ */
+ bool try_lock() CDS_NOEXCEPT
+ {
+ bool bCurrent = false;
+ m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
+
+ CDS_DEBUG_ONLY(
+ if ( !bCurrent ) {
+ m_dbgOwnerId = OS::get_current_thread_id();
+ }
+ )
+ return !bCurrent;
+ }
+
+ /// Try to lock the object, repeat @p nTryCount times if failed
+ /**
+ Returns \p true if locking is succeeded
+ otherwise (if the spin is already locked) returns \p false
+ */
+ bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ) )
+ {
+ backoff_strategy backoff;
+ while ( nTryCount-- ) {
+ if ( try_lock() )
+ return true;
+ backoff();
+ }
+ return false;
+ }
+
+ /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
+ void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()() ))
+ {
+ backoff_strategy backoff;
+
+ // Deadlock detected
+ assert( m_dbgOwnerId != OS::get_current_thread_id() );
+
+ // TATAS algorithm
+ while ( !try_lock() ) {
+ while ( m_spin.load( atomics::memory_order_relaxed ) ) {
+ backoff();
+ }
+ }
+ assert( m_dbgOwnerId == OS::get_current_thread_id() );
+ }
+
+ /// Unlock the spin-lock. Debug version: deadlock may be detected
+ void unlock() CDS_NOEXCEPT
+ {
+ assert( m_spin.load( atomics::memory_order_relaxed ) );
+
+ assert( m_dbgOwnerId == OS::get_current_thread_id() );
+ CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
+
+ m_spin.store( false, atomics::memory_order_release );
+ }
+ };
+
+ /// Spin-lock implementation default for the current platform
+ typedef spin_lock<backoff::LockDefault > spin;
+
+ /// Recursive spin lock.
+ /**
+ Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
+
+ Template parameters:
+ - @p Integral one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
+ - @p Backoff backoff strategy. Used when spin lock is locked
+ */
+ template <typename Integral, class Backoff>
+ class reentrant_spin_lock
+ {
+ typedef OS::ThreadId thread_id ; ///< The type of thread id
+
+ public:
+ typedef Integral integral_type ; ///< The integral type
+ typedef Backoff backoff_strategy ; ///< The backoff type
+
+ private:
+ atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
+ thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
+
+ private:
+ //@cond
+ void take( thread_id tid ) CDS_NOEXCEPT
+ {
+ m_OwnerId = tid;
+ }
+
+ void free() CDS_NOEXCEPT
+ {
+ m_OwnerId = OS::c_NullThreadId;
+ }
+
+ bool is_taken( thread_id tid ) const CDS_NOEXCEPT
+ {
+ return m_OwnerId == tid;
+ }
+
+ bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
+ {
+ if ( is_taken( tid )) {
+ m_spin.fetch_add( 1, atomics::memory_order_relaxed );
+ return true;
+ }
+ return false;
+ }
+
+ bool try_acquire() CDS_NOEXCEPT
+ {
+ integral_type nCurrent = 0;
+ return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
+ }
+
+ bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
+ {
+ backoff_strategy bkoff;
+
+ while ( nTryCount-- ) {
+ if ( try_acquire() )
+ return true;
+ bkoff();
+ }
+ return false;
+ }
+
+ void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
+ {
+ // TATAS algorithm
+ backoff_strategy bkoff;
+ while ( !try_acquire() ) {
+ while ( m_spin.load( atomics::memory_order_relaxed ) )
+ bkoff();
+ }
+ }
+ //@endcond
+
+ public:
+ /// Default constructor initializes spin to free (unlocked) state
+ reentrant_spin_lock() CDS_NOEXCEPT
+ : m_spin(0)
+ , m_OwnerId( OS::c_NullThreadId )
+ {}
+
+ /// Dummy copy constructor
+ /**
+ In theory, spin-lock cannot be copied. However, it is not practical.
+ Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
+ initializes the spin to free (unlocked) state like default ctor.
+ */
+ reentrant_spin_lock( const reentrant_spin_lock<Integral, Backoff>& ) CDS_NOEXCEPT
+ : m_spin(0)
+ , m_OwnerId( OS::c_NullThreadId )
+ {}
+
+ /// Construct object for specified state
+ reentrant_spin_lock( bool bLocked ) CDS_NOEXCEPT
+ : m_spin(0)
+ , m_OwnerId( OS::c_NullThreadId )
+ {
+ if ( bLocked )
+ lock();
+ }
+
+ /// Checks if the spin is locked
+ /**
+ The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
+ Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
+ */
+ bool is_locked() const CDS_NOEXCEPT
+ {
+ return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id() ));
+ }
+
+ /// Try to lock the spin-lock (synonym for \ref try_lock)
+ bool try_lock() CDS_NOEXCEPT
+ {
+ thread_id tid = OS::get_current_thread_id();
+ if ( try_taken_lock( tid ) )
+ return true;
+ if ( try_acquire()) {
+ take( tid );
+ return true;
+ }
+ return false;
+ }
+
+ /// Try to lock the object
+ bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
+ {
+ thread_id tid = OS::get_current_thread_id();
+ if ( try_taken_lock( tid ) )
+ return true;
+ if ( try_acquire( nTryCount )) {
+ take( tid );
+ return true;
+ }
+ return false;
+ }
+
+ /// Lock the object waits if it is busy
+ void lock() CDS_NOEXCEPT
+ {
+ thread_id tid = OS::get_current_thread_id();
+ if ( !try_taken_lock( tid ) ) {
+ acquire();
+ take( tid );
+ }
+ }
+
+ /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
+ bool unlock() CDS_NOEXCEPT
+ {
+ if ( is_taken( OS::get_current_thread_id() ) ) {
+ integral_type n = m_spin.load( atomics::memory_order_relaxed );
+ if ( n > 1 )
+ m_spin.store( n - 1, atomics::memory_order_relaxed );
+ else {
+ free();
+ m_spin.store( 0, atomics::memory_order_release );
+ }
+ return true;
+ }
+ return false;
+ }
+
+ /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
+ bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
+ {
+ if ( is_taken( OS::get_current_thread_id() ) ) {
+ assert( newOwnerId != OS::c_NullThreadId );
+ m_OwnerId = newOwnerId;
+ return true;
+ }
+ return false;
+ }
+ };
+
+ /// Recursive 32bit spin-lock
+ typedef reentrant_spin_lock<uint32_t, backoff::LockDefault> reentrant_spin32;
+
+ /// Default recursive spin-lock
+ typedef reentrant_spin32 reentrant_spin;
+
+ /// Recursive 64bit spin-lock
+ typedef reentrant_spin_lock<uint64_t, backoff::LockDefault> reentrant_spin64;
+ } // namespace sync
+} // namespace cds
+
+#endif // #ifndef CDSLIB_SYNC_SPINLOCK_H
2.1.0
- TODO
+ - cds::lock namespace is renamed to cds::sync. All classes defined in cds::lock namespace
+ are moved to cds::sync with new names (for example, cds::lock::SpinLock is renamed to
+ cds::sync::spin_lock). cds::lock namespace and its contents is deprecated and it is kept
+ for backward compatibility.
2.0.0 30.12.2014
General release
<ClInclude Include="..\..\..\cds\os\osx\topology.h" />\r
<ClInclude Include="..\..\..\cds\os\posix\fake_topology.h" />\r
<ClInclude Include="..\..\..\cds\os\posix\timer.h" />\r
+ <ClInclude Include="..\..\..\cds\sync\injected_monitor.h" />\r
+ <ClInclude Include="..\..\..\cds\sync\spinlock.h" />\r
<ClInclude Include="..\..\..\cds\threading\details\cxx11.h" />\r
<ClInclude Include="..\..\..\cds\threading\details\cxx11_manager.h" />\r
<ClInclude Include="..\..\..\cds\urcu\details\base.h" />\r
<Filter Include="Header Files\cds\gc\impl">\r
<UniqueIdentifier>{3195cce2-1710-4b79-a1cf-6c7cea085fa3}</UniqueIdentifier>\r
</Filter>\r
+ <Filter Include="Header Files\cds\sync">\r
+ <UniqueIdentifier>{03d212fb-73f8-4f0e-9aff-f22b0783fee8}</UniqueIdentifier>\r
+ </Filter>\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="..\..\..\src\dllmain.cpp">\r
<ClInclude Include="..\..\..\cds\algo\atomic.h">\r
<Filter>Header Files\cds\algo</Filter>\r
</ClInclude>\r
+ <ClInclude Include="..\..\..\cds\sync\injected_monitor.h">\r
+ <Filter>Header Files\cds\sync</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\sync\spinlock.h">\r
+ <Filter>Header Files\cds\sync</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
#include <cds/container/striped_map/boost_flat_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedMap< map_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::Spin> >
+ ,co::mutex_policy< cc::striped_set::refinable<cds::sync::spin> >
, co::hash< hash_int >
, co::less< less >
> map_spin;
#include <cds/container/striped_map/boost_list.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedMap< sequence_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::Spin> >
+ ,co::mutex_policy< cc::striped_set::refinable<cds::sync::spin> >
, co::hash< hash_int >
, co::less< less >
> map_spin;
#include <cds/container/striped_map/boost_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedMap< map_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::Spin> >
+ ,co::mutex_policy< cc::striped_set::refinable<cds::sync::spin> >
, co::hash< hash_int >
, co::less< less >
> map_spin;
#include "map/hdr_striped_map.h"
#include <cds/container/striped_map/boost_unordered_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedMap< map_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::spin> >
, co::hash< hash_int >
, co::less< less >
> map_spin;
#include "map/hdr_striped_map.h"
#include <cds/container/striped_map/std_hash_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#if !((CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS)) && _MSC_VER < 1600)
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedMap< map_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::spin> >
, co::hash< hash_int >
, co::less< less >
> map_spin;
#include "map/hdr_striped_map.h"
#include <cds/container/striped_map/std_list.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedMap< sequence_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::spin> >
, co::hash< hash_int >
, co::less< less >
> map_spin;
#include "map/hdr_striped_map.h"
#include <cds/container/striped_map/std_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedMap< map_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::spin> >
, co::hash< hash_int >
, co::less< less >
> map_spin;
#include <cds/container/striped_map/boost_slist.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedMap< sequence_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::spin> >
, co::hash< hash_int >
, co::less< less >
> map_spin;
#include <cds/container/striped_map/boost_flat_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
typedef cc::StripedMap< map_t
, co::hash< hash_int >
, co::less< less >
- ,co::mutex_policy< cc::striped_set::striping<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::striping<cds::sync::spin> >
> map_spin;
test_striped< map_spin >();
#include <cds/container/striped_map/boost_list.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
typedef cc::StripedMap< sequence_t
, co::hash< hash_int >
, co::less< less >
- ,co::mutex_policy< cc::striped_set::striping<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::striping<cds::sync::spin> >
> map_spin;
test_striped2< map_spin >();
#include <cds/container/striped_map/boost_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
typedef cc::StripedMap< map_t
, co::hash< hash_int >
, co::less< less >
- ,co::mutex_policy< cc::striped_set::striping<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::striping<cds::sync::spin> >
> map_spin;
test_striped< map_spin >();
#include "map/hdr_striped_map.h"
#include <cds/container/striped_map/boost_unordered_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
typedef cc::StripedMap< map_t
, co::hash< hash_int >
, co::less< less >
- ,co::mutex_policy< cc::striped_set::striping<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::striping<cds::sync::spin> >
> map_spin;
test_striped< map_spin >();
#include "map/hdr_striped_map.h"
#include <cds/container/striped_map/std_hash_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#if !((CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS)) && _MSC_VER < 1600)
typedef cc::StripedMap< map_t
, co::hash< hash_int >
, co::less< less >
- ,co::mutex_policy< cc::striped_set::striping<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::striping<cds::sync::spin> >
> map_spin;
test_striped< map_spin >();
#include "map/hdr_striped_map.h"
#include <cds/container/striped_map/std_list.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
typedef cc::StripedMap< sequence_t
, co::hash< hash_int >
, co::less< less >
- ,co::mutex_policy< cc::striped_set::striping<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::striping<cds::sync::spin> >
> map_spin;
test_striped2< map_spin >();
#include "map/hdr_striped_map.h"
#include <cds/container/striped_map/std_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
typedef cc::StripedMap< map_t
, co::hash< hash_int >
, co::less< less >
- ,co::mutex_policy< cc::striped_set::striping<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::striping<cds::sync::spin> >
> map_spin;
test_striped< map_spin >();
#include <cds/container/striped_map/boost_slist.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace map {
typedef cc::StripedMap< sequence_t
, co::hash< hash_int >
, co::less< less >
- ,co::mutex_policy< cc::striped_set::striping<cds::lock::Spin> >
+ , co::mutex_policy< cc::striped_set::striping<cds::sync::spin> >
> map_spin;
test_striped2< map_spin >();
#include <cds/container/striped_set/boost_flat_set.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< set_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include <cds/container/striped_set/boost_list.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< sequence_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include <cds/container/striped_set/boost_set.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< set_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include <cds/details/defs.h>
#include <cds/container/striped_set/boost_stable_vector.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< sequence_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include "set/hdr_striped_set.h"
#include <cds/container/striped_set/boost_unordered_set.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< set_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include <cds/container/striped_set/boost_vector.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< sequence_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include "set/hdr_striped_set.h"
#include <cds/container/striped_set/std_hash_set.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#if !((CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS)) && _MSC_VER < 1600)
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< set_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include "set/hdr_striped_set.h"
#include <cds/container/striped_set/std_list.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< sequence_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include "set/hdr_striped_set.h"
#include <cds/container/striped_set/std_set.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< set_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include <cds/container/striped_set/boost_slist.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< sequence_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include "set/hdr_striped_set.h"
#include <cds/container/striped_set/std_vector.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
// Spinlock as lock policy
CPPUNIT_MESSAGE( "spinlock");
typedef cc::StripedSet< sequence_t
- ,co::mutex_policy< cc::striped_set::refinable<cds::lock::ReentrantSpin> >
+ , co::mutex_policy< cc::striped_set::refinable<cds::sync::reentrant_spin> >
, co::hash< hash_int >
, co::less< less<item> >
> set_spin;
#include <cds/container/striped_set/boost_flat_set.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
typedef cc::StripedSet< set_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ ,co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped< set_spin >();
#include <cds/container/striped_set/boost_list.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
typedef cc::StripedSet< sequence_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ , co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped2< set_spin >();
#include <cds/container/striped_set/boost_set.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
typedef cc::StripedSet< set_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ , co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped< set_spin >();
#include <cds/container/striped_set/boost_stable_vector.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
typedef cc::StripedSet< sequence_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ , co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped2< set_spin >();
#include "set/hdr_striped_set.h"
#include <cds/container/striped_set/boost_unordered_set.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
typedef cc::StripedSet< set_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ , co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped< set_spin >();
#include <cds/container/striped_set/boost_vector.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
typedef cc::StripedSet< sequence_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ , co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped2< set_spin >();
#include "set/hdr_striped_set.h"
#include <cds/container/striped_set/std_hash_set.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#if !((CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS)) && _MSC_VER < 1600)
typedef cc::StripedSet< set_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ , co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped< set_spin >();
#include "set/hdr_striped_set.h"
#include <cds/container/striped_set/std_list.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
typedef cc::StripedSet< sequence_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ , co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped2< set_spin >();
#include "set/hdr_striped_set.h"
#include <cds/container/striped_set/std_set.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
typedef cc::StripedSet< set_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ , co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped< set_spin >();
#include <cds/container/striped_set/boost_slist.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
typedef cc::StripedSet< sequence_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ , co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped2< set_spin >();
#include "set/hdr_striped_set.h"
#include <cds/container/striped_set/std_vector.h>
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set {
typedef cc::StripedSet< sequence_t
, co::hash< hash_int >
, co::less< less<item> >
- ,co::mutex_policy< cc::striped_set::striping< cds::lock::Spin > >
+ , co::mutex_policy< cc::striped_set::striping< cds::sync::spin> >
> set_spin;
test_striped2< set_spin >();
static size_t s_nPassPerThread;
struct Item {
- cds::SpinLock m_access;
+ cds::sync::spin m_access;
char * m_pszBlock;
Item()
}
}
- typedef cds::lock::Spinlock<cds::backoff::exponential<cds::backoff::hint, cds::backoff::yield> > Spinlock_exp;
+ typedef cds::sync::spin_lock<cds::backoff::exponential<cds::backoff::hint, cds::backoff::yield> > Spinlock_exp;
- typedef cds::lock::ReentrantSpinT<unsigned int, cds::backoff::exponential<cds::backoff::hint, cds::backoff::yield> > reentrantSpin_exp;
- typedef cds::lock::ReentrantSpinT<unsigned int, cds::backoff::yield> reentrantSpin_yield;
- typedef cds::lock::ReentrantSpinT<unsigned int, cds::backoff::hint> reentrantSpin_hint;
- typedef cds::lock::ReentrantSpinT<unsigned int, cds::backoff::empty> reentrantSpin_empty;
+ typedef cds::sync::reentrant_spin_lock<unsigned int, cds::backoff::exponential<cds::backoff::hint, cds::backoff::yield> > reentrantSpin_exp;
+ typedef cds::sync::reentrant_spin_lock<unsigned int, cds::backoff::yield> reentrantSpin_yield;
+ typedef cds::sync::reentrant_spin_lock<unsigned int, cds::backoff::hint> reentrantSpin_hint;
+ typedef cds::sync::reentrant_spin_lock<unsigned int, cds::backoff::empty> reentrantSpin_empty;
TEST_CASE(spinLock_exp, Spinlock_exp );
- TEST_CASE(spinLock_yield, cds::lock::Spinlock<cds::backoff::yield> );
- TEST_CASE(spinLock_hint, cds::lock::Spinlock<cds::backoff::hint> );
- TEST_CASE(spinLock_empty, cds::lock::Spinlock<cds::backoff::empty> );
-
- TEST_CASE(reentrantSpinLock, cds::lock::ReentrantSpin );
- TEST_CASE(reentrantSpinLock32, cds::lock::ReentrantSpin32 );
- TEST_CASE(reentrantSpinLock64, cds::lock::ReentrantSpin64 );
+ TEST_CASE(spinLock_yield, cds::sync::spin_lock<cds::backoff::yield> );
+ TEST_CASE(spinLock_hint, cds::sync::spin_lock<cds::backoff::hint> );
+ TEST_CASE(spinLock_empty, cds::sync::spin_lock<cds::backoff::empty> );
+ TEST_CASE( spinLock_yield_lock, cds::lock::Spinlock<cds::backoff::yield> );
+ TEST_CASE( spinLock_hint_lock, cds::lock::Spinlock<cds::backoff::hint> );
+ TEST_CASE( spinLock_empty_lock, cds::lock::Spinlock<cds::backoff::empty> );
+
+ TEST_CASE( reentrantSpinLock32, cds::sync::reentrant_spin32 );
+ TEST_CASE( reentrantSpinLock64, cds::sync::reentrant_spin64 );
+ TEST_CASE( reentrantSpinLock32_lock, cds::lock::ReentrantSpin32 );
+ TEST_CASE( reentrantSpinLock64_lock, cds::lock::ReentrantSpin64 );
TEST_CASE(reentrantSpinlock_exp, reentrantSpin_exp );
TEST_CASE(reentrantSpinlock_yield, reentrantSpin_yield );
CPPUNIT_TEST(spinLock_hint);
CPPUNIT_TEST(spinLock_empty);
- CPPUNIT_TEST(reentrantSpinLock);
CPPUNIT_TEST(reentrantSpinLock32);
CPPUNIT_TEST(reentrantSpinLock64);
#include "map2/map_types.h"
#include "cppunit/thread.h"
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <vector>
#include <algorithm> // random_shuffle
atomics::atomic<bool> bInitialized;
cds::OS::ThreadId threadId ; // insert thread id
- typedef cds::lock::Spinlock< cds::backoff::pause > lock_type;
+ typedef cds::sync::spin_lock< cds::backoff::pause > lock_type;
mutable lock_type m_access;
value_type()
#include <cds/container/striped_map/boost_unordered_map.h>
#include <cds/container/striped_map.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include "cppunit/cppunit_mini.h"
#include "lock/nolock.h"
// ***************************************************************************
// Standard implementations
- typedef StdMap< Key, Value, cds::SpinLock > StdMap_Spin;
- typedef StdMap< Key, Value, lock::NoLock> StdMap_NoLock;
+ typedef StdMap< Key, Value, cds::sync::spin > StdMap_Spin;
+ typedef StdMap< Key, Value, lock::NoLock> StdMap_NoLock;
- typedef StdHashMap< Key, Value, cds::SpinLock > StdHashMap_Spin;
- typedef StdHashMap< Key, Value, lock::NoLock > StdHashMap_NoLock;
+ typedef StdHashMap< Key, Value, cds::sync::spin > StdHashMap_Spin;
+ typedef StdHashMap< Key, Value, lock::NoLock > StdHashMap_NoLock;
};
#include <deque>
#include <boost/container/stable_vector.hpp>
#include <boost/container/deque.hpp>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include "print_ellenbintree_stat.h"
#include "print_skip_list_stat.h"
> FCPQueue_boost_stable_vector_stat;
/// Standard priority_queue
- typedef StdPQueue< Value, std::vector<Value>, cds::lock::Spin > StdPQueue_vector_spin;
+ typedef StdPQueue< Value, std::vector<Value>, cds::sync::spin> StdPQueue_vector_spin;
typedef StdPQueue< Value, std::vector<Value>, std::mutex > StdPQueue_vector_mutex;
- typedef StdPQueue< Value, std::deque<Value>, cds::lock::Spin > StdPQueue_deque_spin;
+ typedef StdPQueue< Value, std::deque<Value>, cds::sync::spin> StdPQueue_deque_spin;
typedef StdPQueue< Value, std::deque<Value>, std::mutex > StdPQueue_deque_mutex;
};
typedef cds::intrusive::SegmentedQueue< cds::gc::DHP, T, traits_SegmentedQueue_mutex_stat > SegmentedQueue_DHP_mutex_stat;
// Boost SList
- typedef details::BoostSList< T, std::mutex > BoostSList_mutex;
- typedef details::BoostSList< T, cds::lock::Spin > BoostSList_spin;
+ typedef details::BoostSList< T, std::mutex > BoostSList_mutex;
+ typedef details::BoostSList< T, cds::sync::spin > BoostSList_spin;
};
}
#include <mutex> //unique_lock
#include <queue>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace queue {
- template <typename T, class Container, class Lock = cds::lock::Spin >
+ template <typename T, class Container, class Lock = cds::sync::spin >
class StdQueue: public std::queue<T, Container >
{
typedef std::queue<T, Container > base_class;
}
};
- template <typename T, class Lock = cds::lock::Spin >
+ template <typename T, class Lock = cds::sync::spin >
class StdQueue_deque: public StdQueue<T, std::deque<T>, Lock >
{};
- template <typename T, class Lock = cds::lock::Spin >
+ template <typename T, class Lock = cds::sync::spin >
class StdQueue_list: public StdQueue<T, std::list<T>, Lock >
{};
}
#include "set2/set_types.h"
#include "cppunit/thread.h"
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace set2 {
bool volatile bInitialized;
cds::OS::ThreadId threadId ; // insert thread id
- typedef cds::lock::Spinlock< cds::backoff::pause > lock_type;
+ typedef cds::sync::spin_lock< cds::backoff::pause > lock_type;
mutable lock_type m_access;
value_type()
#endif
#include <cds/container/striped_set.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <boost/functional/hash/hash.hpp>
#include "cppunit/cppunit_mini.h"
// ***************************************************************************
// Standard implementations
- typedef StdSet< key_val, less, cds::SpinLock > StdSet_Spin;
- typedef StdSet< key_val, less, lock::NoLock> StdSet_NoLock;
+ typedef StdSet< key_val, less, cds::sync::spin > StdSet_Spin;
+ typedef StdSet< key_val, less, lock::NoLock> StdSet_NoLock;
- typedef StdHashSet< key_val, hash, less, equal_to, cds::SpinLock > StdHashSet_Spin;
- typedef StdHashSet< key_val, hash, less, equal_to, lock::NoLock > StdHashSet_NoLock;
+ typedef StdHashSet< key_val, hash, less, equal_to, cds::sync::spin > StdHashSet_Spin;
+ typedef StdHashSet< key_val, hash, less, equal_to, lock::NoLock > StdHashSet_NoLock;
};
#include <cds/gc/dhp.h>
#include <mutex>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <stack>
#include <list>
#include <vector>
// std::stack
typedef details::StdStack< T, std::stack< T* >, std::mutex > StdStack_Deque_Mutex;
- typedef details::StdStack< T, std::stack< T* >, cds::lock::Spin > StdStack_Deque_Spin;
+ typedef details::StdStack< T, std::stack< T* >, cds::sync::spin > StdStack_Deque_Spin;
typedef details::StdStack< T, std::stack< T*, std::vector<T*> >, std::mutex > StdStack_Vector_Mutex;
- typedef details::StdStack< T, std::stack< T*, std::vector<T*> >, cds::lock::Spin > StdStack_Vector_Spin;
+ typedef details::StdStack< T, std::stack< T*, std::vector<T*> >, cds::sync::spin > StdStack_Vector_Spin;
typedef details::StdStack< T, std::stack< T*, std::list<T*> >, std::mutex > StdStack_List_Mutex;
- typedef details::StdStack< T, std::stack< T*, std::list<T*> >, cds::lock::Spin > StdStack_List_Spin;
+ typedef details::StdStack< T, std::stack< T*, std::list<T*> >, cds::sync::spin > StdStack_List_Spin;
};
} // namespace istack
#include <cds/gc/dhp.h>
#include <mutex>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <stack>
#include <list>
#include <vector>
// std::stack
typedef details::StdStack< T, std::stack< T >, std::mutex > StdStack_Deque_Mutex;
- typedef details::StdStack< T, std::stack< T >, cds::lock::Spin > StdStack_Deque_Spin;
+ typedef details::StdStack< T, std::stack< T >, cds::sync::spin > StdStack_Deque_Spin;
typedef details::StdStack< T, std::stack< T, std::vector<T> >, std::mutex > StdStack_Vector_Mutex;
- typedef details::StdStack< T, std::stack< T, std::vector<T> >, cds::lock::Spin > StdStack_Vector_Spin;
+ typedef details::StdStack< T, std::stack< T, std::vector<T> >, cds::sync::spin > StdStack_Vector_Spin;
typedef details::StdStack< T, std::stack< T, std::list<T> >, std::mutex > StdStack_List_Mutex;
- typedef details::StdStack< T, std::stack< T, std::list<T> >, cds::lock::Spin > StdStack_List_Spin;
+ typedef details::StdStack< T, std::stack< T, std::list<T> >, cds::sync::spin > StdStack_List_Spin;
};
} // namespace stack