/*
This file is a part of libcds - Concurrent Data Structures library
- (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
You should select GC you want and include appropriate .h-file:
- for \p gc::HP: <tt> <cds/intrusive/iterable_list_hp.h> </tt>
- for \p gc::DHP: <tt> <cds/intrusive/iterable_list_dhp.h> </tt>
- - for \ref cds_urcu_gc "RCU type" - see \ref cds_intrusive_IterableList_rcu "RCU-based IterableList"
*/
template <
class GC
typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer
- static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 3; ///< Count of hazard pointer required for the algorithm
+ static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm
//@cond
// Rebind traits (split-list support)
this code
\code
if ( it1 == it2 )
- assert( &(*it1) == &(*it2) );
+ assert( &(*it1) == &(*it2));
\endcode
can throw assertion. The point is that the iterator stores the value of element which can be modified later by other thread.
The guard inside the iterator prevents recycling that value so the iterator's value remains valid even after such changing.
insert_position pos;
while ( true ) {
- if ( inserting_search( pHead, *pNode->data.load(memory_model::memory_order_relaxed).ptr(), pos, key_comparator() ) ) {
+ if ( inserting_search( pHead, *pNode->data.load(memory_model::memory_order_relaxed).ptr(), pos, key_comparator())) {
m_Stat.onInsertFailed();
return false;
}
- if ( link_aux_node( pNode, pos ) ) {
+ if ( link_aux_node( pNode, pos, pHead )) {
++m_ItemCounter;
m_Stat.onInsertSuccess();
return true;
insert_position pos;
while ( true ) {
- if ( inserting_search( pHead, val, pos, key_comparator() )) {
+ if ( inserting_search( pHead, val, pos, key_comparator())) {
m_Stat.onInsertFailed();
return false;
}
- if ( link_data( &val, pos ) ) {
+ if ( link_data( &val, pos, pHead )) {
++m_ItemCounter;
m_Stat.onInsertSuccess();
return true;
guard.assign( &val );
while ( true ) {
- if ( inserting_search( pHead, val, pos, key_comparator() ) ) {
+ if ( inserting_search( pHead, val, pos, key_comparator())) {
m_Stat.onInsertFailed();
return false;
}
- if ( link_data( &val, pos ) ) {
+ if ( link_data( &val, pos, pHead )) {
f( val );
++m_ItemCounter;
m_Stat.onInsertSuccess();
guard.assign( &val );
while ( true ) {
- if ( inserting_search( pHead, val, pos, key_comparator() ) ) {
+ if ( inserting_search( pHead, val, pos, key_comparator())) {
// try to replace pCur->data with val
assert( pos.pFound != nullptr );
assert( key_comparator()(*pos.pFound, val) == 0 );
marked_data_ptr pFound( pos.pFound );
if ( cds_likely( pos.pCur->data.compare_exchange_strong( pFound, marked_data_ptr( &val ),
- memory_model::memory_order_release, atomics::memory_order_relaxed )))
+ memory_model::memory_order_release, atomics::memory_order_relaxed )))
{
if ( pos.pFound != &val ) {
retire_data( pos.pFound );
return std::make_pair( false, false );
}
- if ( link_data( &val, pos )) {
+ if ( link_data( &val, pos, pHead )) {
func( val, static_cast<value_type*>( nullptr ));
++m_ItemCounter;
m_Stat.onUpdateNew();
bool find_at( node_type const* pHead, Q const& val, Compare cmp ) const
{
position pos;
- if ( search( pHead, val, pos, cmp ) ) {
+ if ( search( pHead, val, pos, cmp )) {
m_Stat.onFindSuccess();
return true;
}
void destroy( Predicate pred )
{
node_type * pNode = m_Head.next.load( memory_model::memory_order_relaxed );
- while ( pNode != pNode->next.load( memory_model::memory_order_relaxed ) ) {
+ while ( pNode != pNode->next.load( memory_model::memory_order_relaxed )) {
value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed ).ptr();
node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed );
bool const is_regular_node = !pVal || pred( pVal );
}
}
- bool link_data( value_type * pVal, insert_position& pos )
+ bool link_data( value_type* pVal, insert_position& pos, node_type* pHead )
{
assert( pos.pPrev != nullptr );
assert( pos.pCur != nullptr );
// and then set it to another.
// To prevent this we mark pos.pCur data as undeletable by setting LSB
marked_data_ptr valCur( pos.pFound );
- if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
// oops, pos.pCur data has been changed or another thread is setting pos.pPrev data
m_Stat.onNodeMarkFailed();
return false;
}
marked_data_ptr valPrev( pos.pPrevVal );
- if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
m_Stat.onNodeMarkFailed();
return false;
}
// sequence pPrev - pCur is broken
pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
m_Stat.onNodeSeqBreak();
return false;
}
- if ( pos.pPrev != pos.pHead && pos.pPrevVal == nullptr )
- {
+ if ( pos.pPrevVal == nullptr ) {
+ // Check ABA-problem for prev
+ // There is a possibility that the current thread was preempted
+ // on entry of this function. Other threads can link data to prev
+ // and then remove it. As a result, the order of items may be changed
+ if ( find_prev( pHead, *pVal ) != pos.pPrev ) {
+ pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
+ pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
+ m_Stat.onNullPrevABA();
+ return false;
+ }
+ }
+
+ if ( pos.pPrev != pos.pHead && pos.pPrevVal == nullptr ) {
// reuse pPrev
// Set pos.pPrev data if it is null
}
// split-list support
- bool link_aux_node( node_type * pNode, insert_position& pos )
+ bool link_aux_node( node_type * pNode, insert_position& pos, node_type* pHead )
{
assert( pos.pPrev != nullptr );
assert( pos.pCur != nullptr );
// and then set it to another.
// To prevent this we mark pos.pCur data as undeletable by setting LSB
marked_data_ptr valCur( pos.pFound );
- if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
// oops, pos.pCur data has been changed or another thread is setting pos.pPrev data
m_Stat.onNodeMarkFailed();
return false;
}
marked_data_ptr valPrev( pos.pPrevVal );
- if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
m_Stat.onNodeMarkFailed();
return false;
return false;
}
+ if ( pos.pPrevVal == nullptr ) {
+ // Check ABA-problem for prev
+ // There is a possibility that the current thread was preempted
+ // on entry of this function. Other threads can insert (link) an item to prev
+ // and then remove it. As a result, the order of items may be changed
+ if ( find_prev( pHead, *pNode->data.load( memory_model::memory_order_relaxed ).ptr()) != pos.pPrev ) {
+ pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
+ pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
+ m_Stat.onNullPrevABA();
+ return false;
+ }
+ }
+
// insert new node between pos.pPrev and pos.pCur
pNode->next.store( pos.pCur, memory_model::memory_order_relaxed );
assert( pos.pFound != nullptr );
marked_data_ptr val( pos.pFound );
- if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
retire_data( pos.pFound );
return true;
}
return false;
}
+
+ template <typename Q>
+ node_type* find_prev( node_type const* pHead, Q const& val ) const
+ {
+ node_type* pPrev = const_cast<node_type*>(pHead);
+ typename gc::Guard guard;
+ key_comparator cmp;
+
+ while ( true ) {
+ node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed );
+
+ if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) {
+ // end-of-list
+ return pPrev;
+ }
+
+ value_type * pVal = guard.protect( pCur->data,
+ []( marked_data_ptr p ) -> value_type*
+ {
+ return p.ptr();
+ } ).ptr();
+
+ if ( pVal && cmp( *pVal, val ) >= 0 )
+ return pPrev;
+
+ pPrev = pCur;
+ }
+ }
//@endcond
};
}} // namespace cds::intrusive