Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
, typename cds::opt::make_options< traits, Options...>::type
> type;
};
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = iterable_list::select_stat_wrapper< Stat >;
//@endcond
protected:
+ //@cond
typedef atomics::atomic< node_type* > atomic_node_ptr; ///< Atomic node pointer
typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support)
+ typedef typename node_type::marked_data_ptr marked_data_ptr;
atomic_node_ptr m_pHead; ///< Head pointer
item_counter m_ItemCounter; ///< Item counter
mutable stat m_Stat; ///< Internal statistics
- //@cond
typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator;
/// Position pointer for item search
friend class IterableList;
protected:
- node_type* m_pNode;
- value_type* m_pVal;
- typename gc::Guard m_Guard; // for m_pVal
+ node_type* m_pNode;
+ typename gc::Guard m_Guard; // data guard
void next()
{
while ( m_pNode ) {
- m_pNode = m_pNode->next.load( memory_model::memory_order_relaxed );
- if ( !m_pNode )
+ m_pNode = m_pNode->next.load( memory_model::memory_order_acquire );
+ if ( !m_pNode ) {
+ m_Guard.clear();
break;
- m_pVal = m_Guard.protect( m_pNode->data );
- if ( m_pVal )
+ }
+ if ( m_Guard.protect( m_pNode->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr())
break;
}
}
explicit iterator_type( atomic_node_ptr const& pNode )
- : m_pNode( pNode.load( memory_model::memory_order_relaxed ))
- , m_pVal( nullptr )
+ : m_pNode( pNode.load( memory_model::memory_order_acquire ))
{
if ( m_pNode ) {
- m_pVal = m_Guard.protect( m_pNode->data );
- if ( !m_pVal )
+ if ( !m_Guard.protect( m_pNode->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr())
next();
}
}
iterator_type( node_type* pNode, value_type* pVal )
: m_pNode( pNode )
- , m_pVal( pVal )
{
if ( m_pNode ) {
assert( pVal != nullptr );
iterator_type()
: m_pNode( nullptr )
- , m_pVal( nullptr )
{}
iterator_type( iterator_type const& src )
: m_pNode( src.m_pNode )
- , m_pVal( src.m_pVal )
{
- m_Guard.assign( m_pVal );
+ m_Guard.copy( src.m_Guard );
}
value_ptr operator ->() const
{
- return m_pVal;
+ return m_Guard.template get<value_type>();
}
value_ref operator *() const
{
- assert( m_pVal != nullptr );
- return *m_pVal;
+ assert( m_Guard.get_native() != nullptr );
+ return *m_Guard.template get<value_type>();
}
/// Pre-increment
iterator_type& operator = (iterator_type const& src)
{
m_pNode = src.m_pNode;
- m_pVal = src.m_pVal;
- m_Guard.assign( m_pVal );
+ m_Guard.copy( src.m_Guard );
return *this;
}
template <bool C>
bool operator !=(iterator_type<C> const& i ) const
{
- return m_pNode != i.m_pNode;
+ return !( *this == i );
}
};
//@endcond
For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard"
may be thrown if the limit of guard count per thread is exceeded.
- The iterator cannot be moved across thread boundary since it contains thread-private GC's guard.
- - Iterator is thread-safe: event if the element the iterator points to is removed, the iterator stays valid because
+ - Iterator is thread-safe: even if the element the iterator points to is removed, the iterator stays valid because
it contains the guard keeping the value from to be recycled.
The iterator interface:
return update_at( m_pHead, val, func, bInsert );
}
- /// Updates the node
+ /// Insert or update
/**
- The operation performs inserting or changing data with lock-free manner.
+ The operation performs inserting or updating data with lock-free manner.
If the item \p val is not found in the list, then \p val is inserted
iff \p bInsert is \p true.
\p second is \p true if \p val has been added or \p false if the item with that key
already in the list.
*/
- std::pair<bool, bool> update( value_type& val, bool bInsert = true )
+ std::pair<bool, bool> upsert( value_type& val, bool bInsert = true )
{
return update_at( m_pHead, val, []( value_type&, value_type* ) {}, bInsert );
}
ord_list theList;
// ...
{
- ord_list::guarded_ptr gp(theList.extract( 5 ));
+ ord_list::guarded_ptr gp( theList.extract( 5 ));
if ( gp ) {
// Deal with gp
// ...
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_at( m_pHead, gp.guard(), key, key_comparator());
- return gp;
+ return extract_at( m_pHead, key, key_comparator());
}
/// Extracts the item using compare functor \p pred
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
- return gp;
+ return extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Finds \p key in the list
If \p key is not found the function returns \p end().
*/
template <typename Q>
- iterator find( Q& key ) const
- {
- return find_iterator_at( m_pHead, key, key_comparator());
- }
- //@cond
- template <typename Q>
iterator find( Q const& key ) const
{
- return find_iterator_at( m_pHead, key, key_comparator() );
+ return find_iterator_at( m_pHead, key, key_comparator());
}
- //@endcond
/// Finds the \p key using \p pred predicate for searching
/**
If \p key is not found the function returns \p end().
*/
template <typename Q, typename Less>
- iterator find_with( Q& key, Less pred ) const
- {
- CDS_UNUSED( pred );
- return find_iterator_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
- }
- //@cond
- template <typename Q, typename Less>
iterator find_with( Q const& key, Less pred ) const
{
CDS_UNUSED( pred );
return find_iterator_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
- //@endcond
/// Checks whether the list contains \p key
/**
template <typename Q>
guarded_ptr get( Q const& key ) const
{
- guarded_ptr gp;
- get_at( m_pHead, gp.guard(), key, key_comparator());
- return gp;
+ return get_at( m_pHead, key, key_comparator());
}
/// Finds the \p key and return the item found
guarded_ptr get_with( Q const& key, Less pred ) const
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
- return gp;
+ return get_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Clears the list (thread safe, not atomic)
position pos;
for ( pos.pCur = m_pHead.load( memory_model::memory_order_relaxed ); pos.pCur; pos.pCur = pos.pCur->next.load( memory_model::memory_order_relaxed )) {
while ( true ) {
- pos.pFound = pos.guard.protect( pos.pCur->data );
+ pos.pFound = pos.guard.protect( pos.pCur->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr();
if ( !pos.pFound )
break;
if ( cds_likely( unlink_node( pos ))) {
return m_ItemCounter.value();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
protected:
//@cond
#if 0
assert( pos.pFound != nullptr );
assert( key_comparator()(*pos.pFound, val) == 0 );
- if ( cds_likely( pos.pCur->data.compare_exchange_strong( pos.pFound, &val, memory_model::memory_order_release, atomics::memory_order_relaxed ))) {
+ marked_data_ptr pFound( pos.pFound );
+ if ( cds_likely( pos.pCur->data.compare_exchange_strong( pFound, marked_data_ptr( &val ),
+ memory_model::memory_order_release, atomics::memory_order_relaxed )))
+ {
if ( pos.pFound != &val ) {
retire_data( pos.pFound );
func( val, pos.pFound );
}
template <typename Q, typename Compare>
- bool extract_at( atomic_node_ptr& refHead, typename guarded_ptr::native_guard& dest, Q const& val, Compare cmp )
+ guarded_ptr extract_at( atomic_node_ptr& refHead, Q const& val, Compare cmp )
{
position pos;
back_off bkoff;
while ( search( refHead, val, pos, cmp )) {
if ( unlink_node( pos )) {
- dest.set( pos.pFound );
--m_ItemCounter;
m_Stat.onEraseSuccess();
- return true;
+ assert( pos.pFound != nullptr );
+ return guarded_ptr( std::move( pos.guard ));
}
else
bkoff();
}
m_Stat.onEraseFailed();
- return false;
+ return guarded_ptr();
}
template <typename Q, typename Compare>
}
template <typename Q, typename Compare>
- bool get_at( atomic_node_ptr const& refHead, typename guarded_ptr::native_guard& guard, Q const& val, Compare cmp ) const
+ guarded_ptr get_at( atomic_node_ptr const& refHead, Q const& val, Compare cmp ) const
{
position pos;
if ( search( refHead, val, pos, cmp )) {
- guard.set( pos.pFound );
m_Stat.onFindSuccess();
- return true;
+ return guarded_ptr( std::move( pos.guard ));
}
m_Stat.onFindFailed();
- return false;
+ return guarded_ptr();
}
//@endcond
return false;
}
- value_type * pVal = pos.guard.protect( pCur->data );
+ value_type * pVal = pos.guard.protect( pCur->data,
+ []( marked_data_ptr p ) -> value_type*
+ {
+ return p.ptr();
+ }).ptr();
if ( pVal ) {
int nCmp = cmp( *pVal, val );
{
node_type * pNode = m_pHead.load( memory_model::memory_order_relaxed );
while ( pNode ) {
- value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed );
+ value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed ).ptr();
if ( pVal )
retire_data( pVal );
node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed );
bool link_node( value_type * pVal, position& pos )
{
if ( pos.pPrev ) {
- if ( pos.pPrev->data.load( memory_model::memory_order_relaxed ) == nullptr ) {
+ if ( pos.pPrev->data.load( memory_model::memory_order_relaxed ) == marked_data_ptr() ) {
// reuse pPrev
- value_type * p = nullptr;
- return pos.pPrev->data.compare_exchange_strong( p, pVal, memory_model::memory_order_release, atomics::memory_order_relaxed );
+
+ // We need pos.pCur data should be unchanged, otherwise ordering violation can be possible
+ // if current thread will be preempted and another thread deletes pos.pCur data
+ // and then set it to another.
+ // To prevent this we mark pos.pCur data as undeletable by setting LSB
+ marked_data_ptr val( pos.pFound );
+ if ( pos.pCur && !pos.pCur->data.compare_exchange_strong( val, val | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
+ // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data
+ m_Stat.onReuseNodeFailed();
+ return false;
+ }
+
+ // Set pos.pPrev data if it is null
+ marked_data_ptr p;
+ bool result = pos.pPrev->data.compare_exchange_strong( p, marked_data_ptr( pVal ),
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
+
+ // Clear pos.pCur data mark
+ if ( pos.pCur )
+ pos.pCur->data.store( val, memory_model::memory_order_relaxed );
+
+ if ( result )
+ m_Stat.onReuseNode();
+ return result;
}
else {
// insert new node between pos.pPrev and pos.pCur
assert( pos.pCur != nullptr );
assert( pos.pFound != nullptr );
- if ( pos.pCur->data.compare_exchange_strong( pos.pFound, nullptr, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ marked_data_ptr val( pos.pFound );
+ if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
retire_data( pos.pFound );
return true;
}