Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
any hook in \p T to be stored in the list.
Usually, ordered single-linked list is used as a building block for the hash table implementation.
+ Iterable list is suitable for almost append-only hash table because the list doesn't delete
+ its internal node when erasing a key but it is marked them as empty to be reused in the future.
+ However, plenty of empty nodes degrades performance.
+ Separation of internal nodes and user data implies the need for an allocator for internal node
+ so the iterable list is not fully intrusive. Nevertheless, if you need thread-safe iterator,
+ the iterable list is good choice.
+
The complexity of searching is <tt>O(N)</tt>.
Template arguments:
typedef typename traits::item_counter item_counter; ///< Item counting policy used
typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option
typedef typename traits::node_allocator node_allocator; ///< Node allocator
+ typedef typename traits::stat stat; ///< Internal statistics
typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer
, typename cds::opt::make_options< traits, Options...>::type
> type;
};
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = iterable_list::select_stat_wrapper< Stat >;
//@endcond
protected:
+ //@cond
typedef atomics::atomic< node_type* > atomic_node_ptr; ///< Atomic node pointer
typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support)
+ typedef typename node_type::marked_data_ptr marked_data_ptr;
atomic_node_ptr m_pHead; ///< Head pointer
item_counter m_ItemCounter; ///< Item counter
+ mutable stat m_Stat; ///< Internal statistics
- //@cond
typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator;
/// Position pointer for item search
friend class IterableList;
protected:
- node_type* m_pNode;
- value_type* m_pVal;
- typename gc::Guard m_Guard; // for m_pVal
+ node_type* m_pNode;
+ typename gc::Guard m_Guard; // data guard
void next()
{
while ( m_pNode ) {
- m_pNode = m_pNode->next.load( memory_model::memory_order_relaxed );
- if ( !m_pNode )
+ m_pNode = m_pNode->next.load( memory_model::memory_order_acquire );
+ if ( !m_pNode ) {
+ m_Guard.clear();
break;
- m_pVal = m_Guard.protect( m_pNode->data );
- if ( m_pVal )
+ }
+ if ( m_Guard.protect( m_pNode->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr())
break;
}
}
explicit iterator_type( atomic_node_ptr const& pNode )
- : m_pNode( pNode.load( memory_model::memory_order_relaxed ))
- , m_pVal( nullptr )
+ : m_pNode( pNode.load( memory_model::memory_order_acquire ))
{
if ( m_pNode ) {
- m_pVal = m_Guard.protect( m_pNode->data );
- if ( !m_pVal )
+ if ( !m_Guard.protect( m_pNode->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr())
next();
}
}
iterator_type( node_type* pNode, value_type* pVal )
: m_pNode( pNode )
- , m_pVal( pVal )
{
if ( m_pNode ) {
assert( pVal != nullptr );
iterator_type()
: m_pNode( nullptr )
- , m_pVal( nullptr )
{}
iterator_type( iterator_type const& src )
: m_pNode( src.m_pNode )
- , m_pVal( src.m_pVal )
{
- m_Guard.assign( m_pVal );
+ m_Guard.copy( src.m_Guard );
}
value_ptr operator ->() const
{
- return m_pVal;
+ return m_Guard.template get<value_type>();
}
value_ref operator *() const
{
- assert( m_pVal != nullptr );
- return *m_pVal;
+ assert( m_Guard.get_native() != nullptr );
+ return *m_Guard.template get<value_type>();
}
/// Pre-increment
iterator_type& operator = (iterator_type const& src)
{
m_pNode = src.m_pNode;
- m_pVal = src.m_pVal;
- m_Guard.assign( m_pVal );
+ m_Guard.copy( src.m_Guard );
return *this;
}
template <bool C>
bool operator !=(iterator_type<C> const& i ) const
{
- return m_pNode != i.m_pNode;
+ return !( *this == i );
}
};
//@endcond
For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard"
may be thrown if the limit of guard count per thread is exceeded.
- The iterator cannot be moved across thread boundary since it contains thread-private GC's guard.
- - Iterator is thread-safe: event if the element the iterator points to is removed, the iterator stays valid because
+ - Iterator is thread-safe: even if the element the iterator points to is removed, the iterator stays valid because
it contains the guard keeping the value from to be recycled.
The iterator interface:
: m_pHead( nullptr )
{}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, iterable_list::wrapped_stat<Stat>>::value >>
+ explicit IterableList( Stat& st )
+ : m_pHead( nullptr )
+ , m_Stat( st )
+ {}
+ //@endcond
+
/// Destroys the list object
~IterableList()
{
return update_at( m_pHead, val, func, bInsert );
}
- /// Updates the node
+ /// Insert or update
/**
- The operation performs inserting or changing data with lock-free manner.
+ The operation performs inserting or updating data with lock-free manner.
If the item \p val is not found in the list, then \p val is inserted
iff \p bInsert is \p true.
- Otherwise, the current element is changed to \p val, the element will be retired later
+ Otherwise, the current element is changed to \p val, the old element will be retired later
by call \p Traits::disposer.
Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
\p second is \p true if \p val has been added or \p false if the item with that key
already in the list.
*/
- std::pair<bool, bool> update( value_type& val, bool bInsert = true )
+ std::pair<bool, bool> upsert( value_type& val, bool bInsert = true )
{
return update_at( m_pHead, val, []( value_type&, value_type* ) {}, bInsert );
}
ord_list theList;
// ...
{
- ord_list::guarded_ptr gp(theList.extract( 5 ));
+ ord_list::guarded_ptr gp( theList.extract( 5 ));
if ( gp ) {
// Deal with gp
// ...
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_at( m_pHead, gp.guard(), key, key_comparator());
- return gp;
+ return extract_at( m_pHead, key, key_comparator());
}
/// Extracts the item using compare functor \p pred
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
- return gp;
+ return extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Finds \p key in the list
If \p key is not found the function returns \p end().
*/
template <typename Q>
- iterator find( Q& key ) const
- {
- return find_iterator_at( m_pHead, key, key_comparator());
- }
- //@cond
- template <typename Q>
iterator find( Q const& key ) const
{
- return find_iterator_at( m_pHead, key, key_comparator() );
+ return find_iterator_at( m_pHead, key, key_comparator());
}
- //@endcond
/// Finds the \p key using \p pred predicate for searching
/**
If \p key is not found the function returns \p end().
*/
template <typename Q, typename Less>
- iterator find_with( Q& key, Less pred ) const
- {
- CDS_UNUSED( pred );
- return find_iterator_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
- }
- //@cond
- template <typename Q, typename Less>
iterator find_with( Q const& key, Less pred ) const
{
CDS_UNUSED( pred );
return find_iterator_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
- //@endcond
/// Checks whether the list contains \p key
/**
template <typename Q>
guarded_ptr get( Q const& key ) const
{
- guarded_ptr gp;
- get_at( m_pHead, gp.guard(), key, key_comparator());
- return gp;
+ return get_at( m_pHead, key, key_comparator());
}
/// Finds the \p key and return the item found
guarded_ptr get_with( Q const& key, Less pred ) const
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
- return gp;
+ return get_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Clears the list (thread safe, not atomic)
position pos;
for ( pos.pCur = m_pHead.load( memory_model::memory_order_relaxed ); pos.pCur; pos.pCur = pos.pCur->next.load( memory_model::memory_order_relaxed )) {
while ( true ) {
- pos.pFound = pos.guard.protect( pos.pCur->data );
+ pos.pFound = pos.guard.protect( pos.pCur->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr();
if ( !pos.pFound )
break;
if ( cds_likely( unlink_node( pos ))) {
return m_ItemCounter.value();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
protected:
//@cond
#if 0
position pos;
while ( true ) {
- if ( search( refHead, val, pos, key_comparator() ) )
+ if ( search( refHead, val, pos, key_comparator() )) {
+ m_Stat.onInsertFailed();
return false;
+ }
if ( link_node( &val, pos ) ) {
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
+
+ m_Stat.onInsertRetry();
}
}
guard.assign( &val );
while ( true ) {
- if ( search( refHead, val, pos, key_comparator() ) )
+ if ( search( refHead, val, pos, key_comparator() ) ) {
+ m_Stat.onInsertFailed();
return false;
+ }
if ( link_node( &val, pos ) ) {
f( val );
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
+
+ m_Stat.onInsertRetry();
}
}
assert( pos.pFound != nullptr );
assert( key_comparator()(*pos.pFound, val) == 0 );
- if ( cds_likely( pos.pCur->data.compare_exchange_strong( pos.pFound, &val, memory_model::memory_order_release, atomics::memory_order_relaxed ))) {
+ marked_data_ptr pFound( pos.pFound );
+ if ( cds_likely( pos.pCur->data.compare_exchange_strong( pFound, marked_data_ptr( &val ),
+ memory_model::memory_order_release, atomics::memory_order_relaxed )))
+ {
if ( pos.pFound != &val ) {
retire_data( pos.pFound );
func( val, pos.pFound );
}
+ m_Stat.onUpdateExisting();
return std::make_pair( true, false );
}
}
else {
- if ( !bInsert )
+ if ( !bInsert ) {
+ m_Stat.onUpdateFailed();
return std::make_pair( false, false );
+ }
- if ( link_node( &val, pos ) ) {
+ if ( link_node( &val, pos )) {
func( val, static_cast<value_type*>( nullptr ));
++m_ItemCounter;
+ m_Stat.onUpdateNew();
return std::make_pair( true, true );
}
}
+
+ m_Stat.onUpdateRetry();
}
}
if ( pos.pFound == &val ) {
if ( unlink_node( pos )) {
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
else
}
else
break;
+
+ m_Stat.onEraseRetry();
}
+
+ m_Stat.onEraseFailed();
return false;
}
if ( unlink_node( pos )) {
f( *pos.pFound );
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
else
bkoff();
+
+ m_Stat.onEraseRetry();
}
+
+ m_Stat.onEraseFailed();
return false;
}
}
template <typename Q, typename Compare>
- bool extract_at( atomic_node_ptr& refHead, typename guarded_ptr::native_guard& dest, Q const& val, Compare cmp )
+ guarded_ptr extract_at( atomic_node_ptr& refHead, Q const& val, Compare cmp )
{
position pos;
back_off bkoff;
while ( search( refHead, val, pos, cmp )) {
if ( unlink_node( pos )) {
- dest.set( pos.pFound );
--m_ItemCounter;
- return true;
+ m_Stat.onEraseSuccess();
+ assert( pos.pFound != nullptr );
+ return guarded_ptr( std::move( pos.guard ));
}
else
bkoff();
+
+ m_Stat.onEraseRetry();
}
- return false;
+
+ m_Stat.onEraseFailed();
+ return guarded_ptr();
}
template <typename Q, typename Compare>
bool find_at( atomic_node_ptr const& refHead, Q const& val, Compare cmp ) const
{
position pos;
- return search( refHead, val, pos, cmp );
+ if ( search( refHead, val, pos, cmp ) ) {
+ m_Stat.onFindSuccess();
+ return true;
+ }
+
+ m_Stat.onFindFailed();
+ return false;
}
template <typename Q, typename Compare, typename Func>
if ( search( refHead, val, pos, cmp )) {
assert( pos.pFound != nullptr );
f( *pos.pFound, val );
+ m_Stat.onFindSuccess();
return true;
}
+
+ m_Stat.onFindFailed();
return false;
}
if ( search( refHead, val, pos, cmp )) {
assert( pos.pCur != nullptr );
assert( pos.pFound != nullptr );
+ m_Stat.onFindSuccess();
return iterator( pos.pCur, pos.pFound );
}
+
+ m_Stat.onFindFailed();
return iterator{};
}
template <typename Q, typename Compare>
- bool get_at( atomic_node_ptr const& refHead, typename guarded_ptr::native_guard& guard, Q const& val, Compare cmp ) const
+ guarded_ptr get_at( atomic_node_ptr const& refHead, Q const& val, Compare cmp ) const
{
position pos;
if ( search( refHead, val, pos, cmp )) {
- guard.set( pos.pFound );
- return true;
+ m_Stat.onFindSuccess();
+ return guarded_ptr( std::move( pos.guard ));
}
- return false;
+
+ m_Stat.onFindFailed();
+ return guarded_ptr();
}
//@endcond
return false;
}
- value_type * pVal = pos.guard.protect( pCur->data );
+ value_type * pVal = pos.guard.protect( pCur->data,
+ []( marked_data_ptr p ) -> value_type*
+ {
+ return p.ptr();
+ }).ptr();
if ( pVal ) {
int nCmp = cmp( *pVal, val );
private:
//@cond
- static node_type * alloc_node( value_type * pVal )
+ node_type * alloc_node( value_type * pVal )
{
+ m_Stat.onNodeCreated();
return cxx_node_allocator().New( pVal );
}
- static void delete_node( node_type * pNode )
+ void delete_node( node_type * pNode )
{
+ m_Stat.onNodeRemoved();
cxx_node_allocator().Delete( pNode );
}
{
node_type * pNode = m_pHead.load( memory_model::memory_order_relaxed );
while ( pNode ) {
- value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed );
+ value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed ).ptr();
if ( pVal )
retire_data( pVal );
node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed );
}
}
- static bool link_node( value_type * pVal, position& pos )
+ bool link_node( value_type * pVal, position& pos )
{
if ( pos.pPrev ) {
- if ( pos.pPrev->data.load( memory_model::memory_order_relaxed ) == nullptr ) {
+ if ( pos.pPrev->data.load( memory_model::memory_order_relaxed ) == marked_data_ptr() ) {
// reuse pPrev
- value_type * p = nullptr;
- return pos.pPrev->data.compare_exchange_strong( p, pVal, memory_model::memory_order_release, atomics::memory_order_relaxed );
+
+ // We need pos.pCur data should be unchanged, otherwise ordering violation can be possible
+ // if current thread will be preempted and another thread deletes pos.pCur data
+ // and then set it to another.
+ // To prevent this we mark pos.pCur data as undeletable by setting LSB
+ marked_data_ptr val( pos.pFound );
+ if ( pos.pCur && !pos.pCur->data.compare_exchange_strong( val, val | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
+ // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data
+ m_Stat.onReuseNodeFailed();
+ return false;
+ }
+
+ // Set pos.pPrev data if it is null
+ marked_data_ptr p;
+ bool result = pos.pPrev->data.compare_exchange_strong( p, marked_data_ptr( pVal ),
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
+
+ // Clear pos.pCur data mark
+ if ( pos.pCur )
+ pos.pCur->data.store( val, memory_model::memory_order_relaxed );
+
+ if ( result )
+ m_Stat.onReuseNode();
+ return result;
}
else {
// insert new node between pos.pPrev and pos.pCur
assert( pos.pCur != nullptr );
assert( pos.pFound != nullptr );
- if ( pos.pCur->data.compare_exchange_strong( pos.pFound, nullptr, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ marked_data_ptr val( pos.pFound );
+ if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
retire_data( pos.pFound );
return true;
}