-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
#ifndef CDSLIB_INTRUSIVE_IMPL_MICHAEL_LIST_H
#define CDSLIB_INTRUSIVE_IMPL_MICHAEL_LIST_H
typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer
+ static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm
+
//@cond
// Rebind traits (split-list support)
template <typename... Options>
struct clean_disposer {
void operator()( value_type * p )
{
- michael_list::node_cleaner<gc, node_type, memory_model>()( node_traits::to_node_ptr( p ) );
+ michael_list::node_cleaner<gc, node_type, memory_model>()( node_traits::to_node_ptr( p ));
disposer()( p );
}
};
static void retire_node( node_type * pNode )
{
assert( pNode != nullptr );
- gc::template retire<clean_disposer>( node_traits::to_value_ptr( *pNode ) );
+ gc::template retire<clean_disposer>( node_traits::to_value_ptr( *pNode ));
}
static bool link_node( node_type * pNode, position& pos )
marked_node_ptr cur(pos.pCur);
pNode->m_pNext.store( cur, memory_model::memory_order_release );
- return pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed );
+ if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed ))
+ return true;
+
+ pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ return false;
}
static bool unlink_node( position& pos )
marked_node_ptr pNext;
do {
pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed);
- g.assign( node_traits::to_value_ptr( pNext.ptr() ));
- } while ( pNext != pCur->m_pNext.load(memory_model::memory_order_acquire) );
+ g.assign( node_traits::to_value_ptr( pNext.ptr()));
+ } while ( pNext != pCur->m_pNext.load(memory_model::memory_order_acquire));
- if ( pNext.ptr() ) {
- m_pNode = m_Guard.assign( g.template get<value_type>() );
- }
+ if ( pNext.ptr())
+ m_pNode = m_Guard.assign( g.template get<value_type>());
else {
m_pNode = nullptr;
m_Guard.clear();
{
for (;;) {
marked_node_ptr p = pNode.load(memory_model::memory_order_relaxed);
- if ( p.ptr() ) {
- m_pNode = m_Guard.assign( node_traits::to_value_ptr( p.ptr() ) );
+ if ( p.ptr()) {
+ m_pNode = m_Guard.assign( node_traits::to_value_ptr( p.ptr()));
}
else {
m_pNode = nullptr;
m_Guard.clear();
}
- if ( p == pNode.load(memory_model::memory_order_acquire) )
+ if ( p == pNode.load(memory_model::memory_order_acquire))
break;
}
}
//@endcond
public:
+ ///@name Forward iterators (only for debugging purpose)
+ //@{
/// Forward iterator
/**
The forward iterator for Michael's list has some features:
may be thrown if the limit of guard count per thread is exceeded.
- The iterator cannot be moved across thread boundary since it contains thread-private GC's guard.
- Iterator ensures thread-safety even if you delete the item the iterator points to. However, in case of concurrent
- deleting operations there is no guarantee that you iterate all item in the list.
+ deleting operations there is no guarantee that you iterate all item in the list.
+ Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread.
- Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container
- for debug purpose only.
+ @warning Use this iterator on the concurrent container for debugging purpose only.
The iterator interface:
\code
{
return const_iterator();
}
+ //@}
public:
/// Default constructor initializes empty list
only if \p val is an item of the list, i.e. the pointer to item found
is equal to <tt> &val </tt>.
+ \p disposer specified in \p Traits is called for deleted item.
+
The function returns \p true if success and \p false otherwise.
*/
bool unlink( value_type& val )
The function searches an item with key equal to \p key in the list,
unlinks it from the list, and returns \p true.
If \p key is not found the function return \p false.
+
+ \p disposer specified in \p Traits is called for deleted item.
*/
template <typename Q>
bool erase( Q const& key )
{
- return erase_at( m_pHead, key, key_comparator() );
+ return erase_at( m_pHead, key, key_comparator());
}
/// Deletes the item from the list using \p pred predicate for searching
but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p pred must imply the same element order as the comparator used for building the list.
+
+ \p disposer specified in \p Traits is called for deleted item.
*/
template <typename Q, typename Less>
bool erase_with( Q const& key, Less pred )
};
\endcode
If \p key is not found the function return \p false, \p func is not called.
+
+ \p disposer specified in \p Traits is called for deleted item.
*/
template <typename Q, typename Func>
bool erase( Q const& key, Func func )
but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p pred must imply the same element order as the comparator used for building the list.
+
+ \p disposer specified in \p Traits is called for deleted item.
*/
template <typename Q, typename Less, typename Func>
bool erase_with( Q const& key, Less pred, Func f )
guarded_ptr extract( Q const& key )
{
guarded_ptr gp;
- extract_at( m_pHead, gp.guard(), key, key_comparator() );
+ extract_at( m_pHead, gp.guard(), key, key_comparator());
return gp;
}
{
CDS_UNUSED( pred );
guarded_ptr gp;
- extract_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>() );
+ extract_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
return gp;
}
template <typename Q>
bool contains( Q const& key )
{
- return find_at( m_pHead, key, key_comparator() );
+ return find_at( m_pHead, key, key_comparator());
}
//@cond
template <typename Q>
bool contains( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>() );
+ return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
//@cond
template <typename Q, typename Less>
guarded_ptr get( Q const& key )
{
guarded_ptr gp;
- get_at( m_pHead, gp.guard(), key, key_comparator() );
+ get_at( m_pHead, gp.guard(), key, key_comparator());
return gp;
}
{
CDS_UNUSED( pred );
guarded_ptr gp;
- get_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>() );
+ get_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
return gp;
}
marked_node_ptr head;
while ( true ) {
head = m_pHead.load(memory_model::memory_order_relaxed);
- if ( head.ptr() )
- guard.assign( node_traits::to_value_ptr( *head.ptr() ));
+ if ( head.ptr())
+ guard.assign( node_traits::to_value_ptr( *head.ptr()));
if ( m_pHead.load(memory_model::memory_order_acquire) == head ) {
if ( head.ptr() == nullptr )
break;
- value_type& val = *node_traits::to_value_ptr( *head.ptr() );
+ value_type& val = *node_traits::to_value_ptr( *head.ptr());
unlink( val );
}
}
// Hack: convert node_type to value_type.
// In principle, auxiliary node can be non-reducible to value_type
// We assume that comparator can correctly distinguish aux and regular node.
- return insert_at( refHead, *node_traits::to_value_ptr( pNode ) );
+ return insert_at( refHead, *node_traits::to_value_ptr( pNode ));
}
bool insert_at( atomic_node_ptr& refHead, value_type& val )
{
node_type * pNode = node_traits::to_node_ptr( val );
- link_checker::is_empty( pNode );
position pos;
while ( true ) {
- if ( search( refHead, val, pos, key_comparator() ) )
+ if ( search( refHead, val, pos, key_comparator()))
return false;
- if ( link_node( pNode, pos ) ) {
+ if ( link_node( pNode, pos )) {
++m_ItemCounter;
return true;
}
bool insert_at( atomic_node_ptr& refHead, value_type& val, Func f )
{
node_type * pNode = node_traits::to_node_ptr( val );
- link_checker::is_empty( pNode );
position pos;
while ( true ) {
- if ( search( refHead, val, pos, key_comparator() ) )
+ if ( search( refHead, val, pos, key_comparator()))
return false;
typename gc::Guard guard;
guard.assign( &val );
- if ( link_node( pNode, pos ) ) {
+ if ( link_node( pNode, pos )) {
f( val );
++m_ItemCounter;
return true;
node_type * pNode = node_traits::to_node_ptr( val );
while ( true ) {
- if ( search( refHead, val, pos, key_comparator() ) ) {
- if ( pos.pCur->m_pNext.load(memory_model::memory_order_acquire).bits() ) {
+ if ( search( refHead, val, pos, key_comparator())) {
+ if ( pos.pCur->m_pNext.load(memory_model::memory_order_acquire).bits()) {
back_off()();
- continue ; // the node found is marked as deleted
+ continue; // the node found is marked as deleted
}
- assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur ) ) == 0 );
+ assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur )) == 0 );
func( false, *node_traits::to_value_ptr( *pos.pCur ) , val );
return std::make_pair( true, false );
typename gc::Guard guard;
guard.assign( &val );
- if ( link_node( pNode, pos ) ) {
+ if ( link_node( pNode, pos )) {
++m_ItemCounter;
func( true, val, val );
return std::make_pair( true, true );
position pos;
back_off bkoff;
- while ( search( refHead, val, pos, key_comparator() ) ) {
+ while ( search( refHead, val, pos, key_comparator())) {
if ( node_traits::to_value_ptr( *pos.pCur ) == &val ) {
- if ( unlink_node( pos ) ) {
+ if ( unlink_node( pos )) {
--m_ItemCounter;
return true;
}
{
back_off bkoff;
while ( search( refHead, val, pos, cmp )) {
- if ( unlink_node( pos ) ) {
- f( *node_traits::to_value_ptr( *pos.pCur ) );
+ if ( unlink_node( pos )) {
+ f( *node_traits::to_value_ptr( *pos.pCur ));
--m_ItemCounter;
return true;
}
position pos;
back_off bkoff;
while ( search( refHead, val, pos, cmp )) {
- if ( unlink_node( pos ) ) {
- dest.set( pos.guards.template get<value_type>( position::guard_current_item ) );
+ if ( unlink_node( pos )) {
+ dest.set( pos.guards.template get<value_type>( position::guard_current_item ));
--m_ItemCounter;
return true;
}
pCur = pos.guards.protect( position::guard_current_item, *pPrev,
[](marked_node_ptr p) -> value_type *
{
- return node_traits::to_value_ptr( p.ptr() );
+ return node_traits::to_value_ptr( p.ptr());
});
while ( true ) {
pNext = pos.guards.protect( position::guard_next_item, pCur->m_pNext,
[](marked_node_ptr p ) -> value_type *
{
- return node_traits::to_value_ptr( p.ptr() );
+ return node_traits::to_value_ptr( p.ptr());
});
- if ( pPrev->load(memory_model::memory_order_acquire).all() != pCur.ptr() ) {
+ if ( pPrev->load(memory_model::memory_order_acquire).all() != pCur.ptr()) {
bkoff();
goto try_again;
}
if ( pNext.bits() == 1 ) {
// pCur marked i.e. logically deleted. Help the erase/unlink function to unlink pCur node
marked_node_ptr cur( pCur.ptr());
- if ( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
- retire_node( pCur.ptr() );
+ if ( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
+ retire_node( pCur.ptr());
}
else {
bkoff();
}
else {
assert( pCur.ptr() != nullptr );
- int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
if ( nCmp >= 0 ) {
pos.pPrev = pPrev;
pos.pCur = pCur.ptr();