/*
This file is a part of libcds - Concurrent Data Structures library
- (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
protected:
//@cond
- typedef split_list::details::rebind_list_traits<OrderedList, traits> wrapped_ordered_list;
+ typedef split_list::details::rebind_list_traits<OrderedList, traits> ordered_list_adapter;
//@endcond
public:
# ifdef CDS_DOXYGEN_INVOKED
typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list
# else
- typedef typename wrapped_ordered_list::result ordered_list;
+ typedef typename ordered_list_adapter::result ordered_list;
# endif
typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list
typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor
typedef typename ordered_list::disposer disposer; ///< Node disposer functor
+ typedef typename traits::bit_reversal bit_reversal; ///< Bit reversal algorithm, see \p split_list::traits::bit_reversal
typedef typename traits::item_counter item_counter; ///< Item counter type
typedef typename traits::back_off back_off; ///< back-off strategy
typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
"cds::atomicity::empty_item_counter is not allowed as a item counter");
protected:
+ //@cond
typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list
typedef split_list::node<list_node_type> node_type; ///< split-list node type
- typedef node_type aux_node_type; ///< dummy node type
/// Split-list node traits
/**
This traits is intended for converting between underlying ordered list node type \ref list_node_type
and split-list node type \ref node_type
*/
- typedef split_list::node_traits<typename ordered_list::node_traits> node_traits;
+ typedef typename ordered_list_adapter::node_traits node_traits;
- //@cond
/// Bucket table implementation
typedef typename split_list::details::bucket_table_selector<
traits::dynamic_bucket_table
, gc
- , aux_node_type
+ , typename ordered_list_adapter::aux_node
, opt::allocator< typename traits::allocator >
, opt::memory_model< memory_model >
+ , opt::free_list< typename traits::free_list >
>::type bucket_table;
+ typedef typename bucket_table::aux_node_type aux_node_type; ///< dummy node type
+
typedef typename ordered_list::iterator list_iterator;
typedef typename ordered_list::const_iterator list_const_iterator;
//@endcond
*/
SplitListSet()
: m_nBucketCountLog2(1)
- , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
+ , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()))
{
init();
}
)
: m_Buckets( nItemCount, nLoadFactor )
, m_nBucketCountLog2(1)
- , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
+ , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()))
{
init();
}
value_type * contains( Q const& key )
{
iterator it = find_( key );
- if ( it == end() )
+ if ( it == end())
return nullptr;
return &*it;
}
value_type * contains( Q const& key, Less pred )
{
iterator it = find_with_( key, pred );
- if ( it == end() )
+ if ( it == end())
return nullptr;
return &*it;
}
bool find_with( Q& key, Less pred, Func f )
{
CDS_UNUSED( pred );
- return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
+ return find_( key, typename ordered_list_adapter::template make_compare_from_less<Less>(), f );
}
//@cond
template <typename Q, typename Less, typename Func>
bool find_with( Q const& key, Less pred, Func f )
{
CDS_UNUSED( pred );
- return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
+ return find_( key, typename ordered_list_adapter::template make_compare_from_less<Less>(), f );
}
//@endcond
return m_Stat;
}
+ /// Returns internal statistics for \p OrderedList
+ typename OrderedList::stat const& list_statistics() const
+ {
+ return m_List.statistics();
+ }
+
protected:
//@cond
template <bool IsConst>
*/
iterator begin()
{
- return iterator( m_List.begin(), m_List.end() );
+ return iterator( m_List.begin(), m_List.end());
}
/// Returns an iterator that addresses the location succeeding the last element in a split-list
*/
iterator end()
{
- return iterator( m_List.end(), m_List.end() );
+ return iterator( m_List.end(), m_List.end());
}
/// Returns a forward const iterator addressing the first element in a split-list
const_iterator begin() const
{
- return const_iterator( m_List.begin(), m_List.end() );
+ return const_iterator( m_List.begin(), m_List.end());
}
/// Returns a forward const iterator addressing the first element in a split-list
const_iterator cbegin() const
{
- return const_iterator( m_List.cbegin(), m_List.cend() );
+ return const_iterator( m_List.cbegin(), m_List.cend());
}
/// Returns an const iterator that addresses the location succeeding the last element in a split-list
const_iterator end() const
{
- return const_iterator( m_List.end(), m_List.end() );
+ return const_iterator( m_List.end(), m_List.end());
}
/// Returns an const iterator that addresses the location succeeding the last element in a split-list
const_iterator cend() const
{
- return const_iterator( m_List.cend(), m_List.cend() );
+ return const_iterator( m_List.cend(), m_List.cend());
}
//@}
aux_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
+ node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash<bit_reversal>( nHash );
list_iterator it = m_List.insert_at_( pHead, val );
- if ( it != m_List.end() ) {
+ if ( it != m_List.end()) {
inc_item_count();
m_Stat.onInsertSuccess();
- return iterator( it, m_List.end() );
+ return iterator( it, m_List.end());
}
m_Stat.onInsertFailed();
return end();
aux_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
+ node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash<bit_reversal>( nHash );
std::pair<list_iterator, bool> ret = m_List.update_at_( pHead, val, func, bAllowInsert );
- if ( ret.first != m_List.end() ) {
+ if ( ret.first != m_List.end()) {
if ( ret.second ) {
inc_item_count();
m_Stat.onUpdateNew();
{
CDS_UNUSED( pred );
size_t nHash = hash_value( val );
- split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
+ split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash<bit_reversal>( nHash ));
aux_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- auto it = m_List.find_at_( pHead, sv, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
- m_Stat.onFind( it != m_List.end() );
- return iterator( it, m_List.end() );
+ auto it = m_List.find_at_( pHead, sv, typename ordered_list_adapter::template make_compare_from_less<Less>());
+ m_Stat.onFind( it != m_List.end());
+ return iterator( it, m_List.end());
}
template <typename Q>
iterator find_( Q const& val )
{
size_t nHash = hash_value( val );
- split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
+ split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash<bit_reversal>( nHash ));
aux_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- auto it = m_List.find_at_( pHead, sv, key_comparator() );
- m_Stat.onFind( it != m_List.end() );
- return iterator( it, m_List.end() );
+ auto it = m_List.find_at_( pHead, sv, key_comparator());
+ m_Stat.onFind( it != m_List.end());
+ return iterator( it, m_List.end());
}
template <typename Q, typename Compare, typename Func>
bool find_( Q& val, Compare cmp, Func f )
{
size_t nHash = hash_value( val );
- split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
+ split_list::details::search_value_type<Q> sv( val, split_list::regular_hash<bit_reversal>( nHash ));
aux_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
- [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
+ [&f](value_type& item, split_list::details::search_value_type<Q>& v){ f(item, v.val ); }));
}
aux_node_type * alloc_aux_node( size_t nHash )
return nBucket & ~(1 << bitop::MSBnz( nBucket ));
}
- aux_node_type * init_bucket( size_t nBucket )
+ aux_node_type * init_bucket( size_t const nBucket )
{
assert( nBucket > 0 );
size_t nParent = parent_bucket( nBucket );
assert( pParentBucket != nullptr );
- // Allocate a dummy node for new bucket
- {
- aux_node_type * pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
- if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
- m_Buckets.bucket( nBucket, pBucket );
- m_Stat.onNewBucket();
+ // Allocate an aux node for new bucket
+ aux_node_type * pBucket = m_Buckets.bucket( nBucket );
+
+ back_off bkoff;
+ for ( ;; pBucket = m_Buckets.bucket( nBucket )) {
+ if ( pBucket )
return pBucket;
+
+ pBucket = alloc_aux_node( split_list::dummy_hash<bit_reversal>( nBucket ));
+ if ( pBucket ) {
+ if ( m_List.insert_aux_node( pParentBucket, pBucket )) {
+ m_Buckets.bucket( nBucket, pBucket );
+ m_Stat.onNewBucket();
+ return pBucket;
+ }
+
+ // Another thread set the bucket. Wait while it done
+ free_aux_node( pBucket );
+ m_Stat.onBucketInitContenton();
+ break;
}
- free_aux_node( pBucket );
+
+ // There are no free buckets. It means that the bucket table is full
+ // Wait while another thread set the bucket or a free bucket will be available
+ m_Stat.onBucketsExhausted();
+ bkoff();
}
// Another thread set the bucket. Wait while it done
-
- // In this point, we must wait while nBucket is empty.
- // The compiler can decide that waiting loop can be "optimized" (stripped)
- // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
- //
- m_Stat.onBucketInitContenton();
- back_off bkoff;
- while ( true ) {
- aux_node_type volatile * p = m_Buckets.bucket( nBucket );
- if ( p && p != nullptr )
- return const_cast<aux_node_type *>(p);
+ for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) {
bkoff();
m_Stat.onBusyWaitBucketInit();
}
+
+ return pBucket;
}
aux_node_type * get_bucket( size_t nHash )
if ( pHead == nullptr )
pHead = init_bucket( nBucket );
- assert( pHead->is_dummy() );
+ assert( pHead->is_dummy());
return pHead;
}
void init()
{
// Initialize bucket 0
- aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ );
+ aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash<bit_reversal>(0)*/ );
// insert_aux_node cannot return false for empty list
- CDS_VERIFY( m_List.insert_aux_node( pNode ) );
+ CDS_VERIFY( m_List.insert_aux_node( pNode ));
m_Buckets.bucket( 0, pNode );
}
size_t sz = m_nBucketCountLog2.load( memory_model::memory_order_relaxed );
const size_t nBucketCount = static_cast<size_t>(1) << sz;
- if ( nBucketCount < m_Buckets.capacity() ) {
+ if ( nBucketCount < m_Buckets.capacity()) {
// we may grow the bucket table
const size_t nLoadFactor = m_Buckets.load_factor();
- if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ) )
+ if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
return; // someone already have updated m_nBucketCountLog2, so stop here
m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
protected:
//@cond
- typedef typename cds::details::type_padding< bucket_table, traits::padding >::type padded_bucket_table;
+ static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value;
+
+ typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table;
padded_bucket_table m_Buckets; ///< bucket table
- typedef typename cds::details::type_padding< ordered_list_wrapper, traits::padding>::type padded_ordered_list;
+ typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list;
padded_ordered_list m_List; ///< Ordered list containing split-list items
atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
atomics::atomic<size_t> m_nMaxItemCount; ///< number of items container can hold, before we have to resize
- item_counter m_ItemCounter; ///< Item counter
hash m_HashFunctor; ///< Hash functor
+ item_counter m_ItemCounter; ///< Item counter
stat m_Stat; ///< Internal statistics
//@endcond
};