/*
This file is a part of libcds - Concurrent Data Structures library
- (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
#include <cds/intrusive/details/base.h>
#include <cds/algo/atomic.h>
+#include <cds/algo/bit_reversal.h>
#include <cds/details/allocator.h>
#include <cds/algo/int_algo.h>
#include <cds/algo/bitop.h>
#include <cds/opt/hash.h>
#include <cds/intrusive/free_list_selector.h>
+#include <cds/details/size_t_cast.h>
namespace cds { namespace intrusive {
/** @ingroup cds_intrusive_helper
*/
namespace split_list {
+ //@cond
+ struct hash_node
+ {
+ size_t m_nHash; ///< Hash value for node
+
+ /// Default constructor
+ hash_node()
+ : m_nHash( 0 )
+ {
+ assert( is_dummy());
+ }
+
+ /// Initializes dummy node with \p nHash value
+ explicit hash_node( size_t nHash )
+ : m_nHash( nHash )
+ {
+ assert( is_dummy());
+ }
+
+ /// Checks if the node is dummy node
+ bool is_dummy() const
+ {
+ return (m_nHash & 1) == 0;
+ }
+ };
+ //@endcond
+
/// Split-ordered list node
/**
Template parameter:
- \p OrderedListNode - node type for underlying ordered list
*/
template <typename OrderedListNode>
- struct node: public OrderedListNode
+ struct node: public OrderedListNode, public hash_node
{
//@cond
typedef OrderedListNode base_class;
//@endcond
- size_t m_nHash ; ///< Hash value for node
-
/// Default constructor
node()
- : m_nHash(0)
+ : hash_node(0)
{
- assert( is_dummy() );
+ assert( is_dummy());
}
/// Initializes dummy node with \p nHash value
- node( size_t nHash )
- : m_nHash( nHash )
+ explicit node( size_t nHash )
+ : hash_node( nHash )
{
- assert( is_dummy() );
+ assert( is_dummy());
}
/// Checks if the node is dummy node
bool is_dummy() const
{
- return (m_nHash & 1) == 0;
+ return hash_node::is_dummy();
+ }
+ };
+
+ //@cond
+ // for IterableList
+ template <>
+ struct node<void>: public hash_node
+ {
+ // Default ctor
+ node()
+ : hash_node( 0 )
+ {
+ assert( is_dummy());
+ }
+
+ /// Initializes dummy node with \p nHash value
+ explicit node( size_t nHash )
+ : hash_node( nHash )
+ {
+ assert( is_dummy());
+ }
+
+ /// Checks if the node is dummy node
+ bool is_dummy() const
+ {
+ return hash_node::is_dummy();
}
};
+ //@endcond
/// \p SplitListSet internal statistics. May be used for debugging or profiling
/**
//@endcond
};
+ /// Option to control bit reversal algorithm
+ /**
+ Bit reversal is a significant part of split-list.
+ \p Type can be one of predefined algorithm in \p cds::algo::bit_reversal namespace.
+ */
+ template <typename Type>
+ struct bit_reversal {
+ //@cond
+ template <typename Base>
+ struct pack: public Base
+ {
+ typedef Type bit_reversal;
+ };
+ //@endcond
+ };
+
/// SplitListSet traits
struct traits
{
*/
typedef opt::none hash;
+ /// Bit reversal algorithm
+ /**
+ Bit reversal is a significant part of split-list.
+ There are several predefined algorithm in \p cds::algo::bit_reversal namespace,
+ \p cds::algo::bit_reversal::lookup is the best general purpose one.
+
+ There are more efficient bit reversal algoritm for particular processor architecture,
+ for example, based on x86 SIMD/AVX instruction set, see <a href="http://stackoverflow.com/questions/746171/best-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c">here</a>
+ */
+ typedef cds::algo::bit_reversal::lookup bit_reversal;
+
/// Item counter
/**
The item counting is an important part of \p SplitListSet algorithm:
the <tt>empty()</tt> member function depends on correct item counting.
Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
- Default is \p cds::atomicity::item_counter.
+ Default is \p cds::atomicity::item_counter; to avoid false sharing you may use \p atomicity::cache_friendly_item_counter
*/
typedef cds::atomicity::item_counter item_counter;
typedef cds::backoff::Default back_off;
/// Padding; default is cache-line padding
- enum {
+ enum {
padding = cds::opt::cache_line_padding
};
/**
Available \p Options:
- \p opt::hash - mandatory option, specifies hash functor.
+ - \p split_list::bit_reversal - bit reversal algorithm, see \p traits::bit_reversal for explanation
+ default is \p cds::algo::bit_reversal::lookup
- \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
for default type.
- \p opt::memory_model - C++ memory model for atomic operations.
/// Auxiliary node type
struct aux_node_type: public node_type, public free_list::node
- {};
+ {
+# ifdef CDS_DEBUG
+ atomics::atomic<bool> m_busy;
+
+ aux_node_type()
+ {
+ m_busy.store( false, atomics::memory_order_release );
+ }
+# endif
+ };
typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
size_t nLoadFactor ///< Load factor
)
: m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
- , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ) )
+ , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ))
, m_nAuxNodeAllocated( 0 )
{
// m_nCapacity must be power of 2
- assert( cds::beans::is_power2( m_nCapacity ) );
+ assert( cds::beans::is_power2( m_nCapacity ));
allocate_table();
}
/// Returns head node of bucket \p nBucket
aux_node_type * bucket( size_t nBucket ) const
{
- assert( nBucket < capacity() );
+ assert( nBucket < capacity());
return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
}
/// Set \p pNode as a head of bucket \p nBucket
void bucket( size_t nBucket, aux_node_type * pNode )
{
- assert( nBucket < capacity() );
+ assert( nBucket < capacity());
assert( bucket( nBucket ) == nullptr );
m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
/// Allocates auxiliary node; can return \p nullptr if the table exhausted
aux_node_type* alloc_aux_node()
{
- if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity() ) {
+ if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
// alloc next free node from m_auxNode
size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
- if ( idx < capacity() )
+ if ( idx < capacity() ) {
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ) );
return new( &m_auxNode[idx] ) aux_node_type();
+ }
}
// get from free-list
typedef typename options::free_list free_list;
/// Auxiliary node type
- class aux_node_type: public node_type, public free_list::node
- {};
+ struct aux_node_type: public node_type, public free_list::node
+ {
+# ifdef CDS_DEBUG
+ atomics::atomic<bool> m_busy;
+
+ aux_node_type()
+ {
+ m_busy.store( false, atomics::memory_order_release );
+ }
+# endif
+ };
protected:
//@cond
// aux_node_type nodes[];
aux_node_segment()
- : aux_node_count(0)
- , next_segment( nullptr )
- {}
+ : next_segment( nullptr )
+ {
+ aux_node_count.store( 0, atomics::memory_order_release );
+ }
aux_node_type* segment()
{
metrics()
: nSegmentCount( 1024 )
, nSegmentSize( 512 )
- , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ) )
+ , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ))
, nLoadFactor( 1 )
, nCapacity( nSegmentCount * nSegmentSize )
{}
if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
table_entry* pNewSegment = allocate_segment();
table_entry * pNull = nullptr;
- if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
+ if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed ))
destroy_segment( pNewSegment );
- }
}
+
+ assert( segment.load( atomics::memory_order_relaxed )[nBucket & (m_metrics.nSegmentSize - 1)].load( atomics::memory_order_relaxed ) == nullptr );
segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
}
/// Allocates auxiliary node; can return \p nullptr if the table exhausted
aux_node_type* alloc_aux_node()
{
+ aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_acquire );
+
for ( ;; ) {
- aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_relaxed );
assert( aux_segment != nullptr );
// try to allocate from current aux segment
- if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
+ if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) {
size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
- if ( idx < m_metrics.nSegmentSize )
+ if ( idx < m_metrics.nSegmentSize ) {
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ) );
return new( aux_segment->segment() + idx ) aux_node_type();
+ }
}
// try allocate from free-list
// try to allocate new aux segment
// We can allocate more aux segments than we need but it is not a problem in this context
aux_node_segment* new_aux_segment = allocate_aux_segment();
+ new_aux_segment->next_segment = aux_segment;
new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
- if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ))
+
+ if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ) ) {
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ) );
return new( new_aux_segment->segment() ) aux_node_type();
+ }
free_aux_segment( new_aux_segment );
}
// Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
- size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
+ size_t nBucketCount = ( nItemCount + m.nLoadFactor - 1 ) / m.nLoadFactor;
if ( nBucketCount <= 2 ) {
m.nSegmentCount = 1;
m.nSegmentSize = 2;
aux_node_segment* allocate_aux_segment()
{
char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ) );
return new(p) aux_node_segment();
}
void init()
{
// m_nSegmentSize must be 2**N
- assert( cds::beans::is_power2( m_metrics.nSegmentSize ) );
+ assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
// m_nSegmentCount must be 2**K
- assert( cds::beans::is_power2( m_metrics.nSegmentCount ) );
+ assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
m_Segments = allocate_table();
m_auxNodeList = allocate_aux_segment();
//@endcond
};
- /// Split-list node traits
- /**
- This traits is intended for converting between underlying ordered list node type
- and split-list node type
-
- Template parameter:
- - \p BaseNodeTraits - node traits of base ordered list type
- */
- template <class BaseNodeTraits>
- struct node_traits: private BaseNodeTraits
- {
- typedef BaseNodeTraits base_class; ///< Base ordered list node type
- typedef typename base_class::value_type value_type; ///< Value type
- typedef typename base_class::node_type base_node_type; ///< Ordered list node type
- typedef node<base_node_type> node_type; ///< Spit-list node type
-
- /// Convert value reference to node pointer
- static node_type * to_node_ptr( value_type& v )
- {
- return static_cast<node_type *>( base_class::to_node_ptr( v ) );
- }
-
- /// Convert value pointer to node pointer
- static node_type * to_node_ptr( value_type * v )
- {
- return static_cast<node_type *>( base_class::to_node_ptr( v ) );
- }
-
- /// Convert value reference to node pointer (const version)
- static node_type const * to_node_ptr( value_type const& v )
- {
- return static_cast<node_type const*>( base_class::to_node_ptr( v ) );
- }
-
- /// Convert value pointer to node pointer (const version)
- static node_type const * to_node_ptr( value_type const * v )
- {
- return static_cast<node_type const *>( base_class::to_node_ptr( v ) );
- }
-
- /// Convert node reference to value pointer
- static value_type * to_value_ptr( node_type& n )
- {
- return base_class::to_value_ptr( static_cast<base_node_type &>( n ) );
- }
-
- /// Convert node pointer to value pointer
- static value_type * to_value_ptr( node_type * n )
- {
- return base_class::to_value_ptr( static_cast<base_node_type *>( n ) );
- }
-
- /// Convert node reference to value pointer (const version)
- static const value_type * to_value_ptr( node_type const & n )
- {
- return base_class::to_value_ptr( static_cast<base_node_type const &>( n ) );
- }
-
- /// Convert node pointer to value pointer (const version)
- static const value_type * to_value_ptr( node_type const * n )
- {
- return base_class::to_value_ptr( static_cast<base_node_type const *>( n ) );
- }
- };
//@cond
namespace details {
{}
};
+ template <class OrderedList, class Traits, bool Iterable >
+ class ordered_list_adapter;
+
template <class OrderedList, class Traits>
- class rebind_list_traits
+ class ordered_list_adapter< OrderedList, Traits, false >
{
typedef OrderedList native_ordered_list;
typedef Traits traits;
typedef typename native_ordered_list::key_comparator native_key_comparator;
typedef typename native_ordered_list::node_type node_type;
typedef typename native_ordered_list::value_type value_type;
- typedef typename native_ordered_list::node_traits node_traits;
+ typedef typename native_ordered_list::node_traits native_node_traits;
typedef typename native_ordered_list::disposer native_disposer;
typedef split_list::node<node_type> splitlist_node_type;
struct key_compare {
int operator()( value_type const& v1, value_type const& v2 ) const
{
- splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v1 ));
- splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v2 ));
+ splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v1 ));
+ splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v2 ));
if ( n1->m_nHash != n2->m_nHash )
return n1->m_nHash < n2->m_nHash ? -1 : 1;
- if ( n1->is_dummy() ) {
- assert( n2->is_dummy() );
+ if ( n1->is_dummy()) {
+ assert( n2->is_dummy());
return 0;
}
- assert( !n1->is_dummy() && !n2->is_dummy() );
+ assert( !n1->is_dummy() && !n2->is_dummy());
- return native_key_comparator()( v1, v2 );
+ return native_key_comparator()(v1, v2);
}
template <typename Q>
int operator()( value_type const& v, search_value_type<Q> const& q ) const
{
- splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
+ splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
if ( n->m_nHash != q.nHash )
return n->m_nHash < q.nHash ? -1 : 1;
- assert( !n->is_dummy() );
- return native_key_comparator()( v, q.val );
+ assert( !n->is_dummy());
+ return native_key_comparator()(v, q.val);
}
template <typename Q>
{
void operator()( value_type * v )
{
- splitlist_node_type * p = static_cast<splitlist_node_type *>( node_traits::to_node_ptr( v ));
- if ( !p->is_dummy() )
- native_disposer()( v );
+ splitlist_node_type * p = static_cast<splitlist_node_type *>(native_node_traits::to_node_ptr( v ));
+ if ( !p->is_dummy())
+ native_disposer()(v);
}
};
public:
+ typedef node_type ordered_list_node_type;
+ typedef splitlist_node_type aux_node;
+
+ struct node_traits: private native_node_traits
+ {
+ typedef native_node_traits base_class; ///< Base ordered list node type
+ typedef typename base_class::value_type value_type; ///< Value type
+ typedef typename base_class::node_type base_node_type; ///< Ordered list node type
+ typedef node<base_node_type> node_type; ///< Split-list node type
+
+ /// Convert value reference to node pointer
+ static node_type * to_node_ptr( value_type& v )
+ {
+ return static_cast<node_type *>(base_class::to_node_ptr( v ));
+ }
+
+ /// Convert value pointer to node pointer
+ static node_type * to_node_ptr( value_type * v )
+ {
+ return static_cast<node_type *>(base_class::to_node_ptr( v ));
+ }
+
+ /// Convert value reference to node pointer (const version)
+ static node_type const * to_node_ptr( value_type const& v )
+ {
+ return static_cast<node_type const*>(base_class::to_node_ptr( v ));
+ }
+
+ /// Convert value pointer to node pointer (const version)
+ static node_type const * to_node_ptr( value_type const * v )
+ {
+ return static_cast<node_type const *>(base_class::to_node_ptr( v ));
+ }
+
+ /// Convert node reference to value pointer
+ static value_type * to_value_ptr( node_type& n )
+ {
+ return base_class::to_value_ptr( static_cast<base_node_type &>(n));
+ }
+
+ /// Convert node pointer to value pointer
+ static value_type * to_value_ptr( node_type * n )
+ {
+ return base_class::to_value_ptr( static_cast<base_node_type *>(n));
+ }
+
+ /// Convert node reference to value pointer (const version)
+ static const value_type * to_value_ptr( node_type const & n )
+ {
+ return base_class::to_value_ptr( static_cast<base_node_type const &>(n));
+ }
+
+ /// Convert node pointer to value pointer (const version)
+ static const value_type * to_value_ptr( node_type const * n )
+ {
+ return base_class::to_value_ptr( static_cast<base_node_type const *>(n));
+ }
+ };
+
template <typename Less>
struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
{
template <typename Q>
int operator()( value_type const& v, search_value_type<Q> const& q ) const
{
- splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
+ splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
if ( n->m_nHash != q.nHash )
return n->m_nHash < q.nHash ? -1 : 1;
- assert( !n->is_dummy() );
- return base_class()( v, q.val );
+ assert( !n->is_dummy());
+ return base_class()(v, q.val);
}
template <typename Q>
int operator()( search_value_type<Q> const& q, value_type const& v ) const
{
- splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
+ splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
if ( n->m_nHash != q.nHash )
return q.nHash < n->m_nHash ? -1 : 1;
- assert( !n->is_dummy() );
- return base_class()( q.val, v );
+ assert( !n->is_dummy());
+ return base_class()(q.val, v);
+ }
+
+ int operator()( value_type const& lhs, value_type const& rhs ) const
+ {
+ splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( lhs ));
+ splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( rhs ));
+ if ( n1->m_nHash != n2->m_nHash )
+ return n1->m_nHash < n2->m_nHash ? -1 : 1;
+
+ if ( n1->is_dummy()) {
+ assert( n2->is_dummy());
+ return 0;
+ }
+
+ assert( !n1->is_dummy() && !n2->is_dummy());
+
+ return native_key_comparator()( lhs, rhs );
+ }
+ };
+
+ typedef typename native_ordered_list::template rebind_traits<
+ opt::compare< key_compare >
+ , opt::disposer< wrapped_disposer >
+ , opt::boundary_node_type< splitlist_node_type >
+ >::type result;
+ };
+
+ template <class OrderedList, class Traits>
+ class ordered_list_adapter< OrderedList, Traits, true >
+ {
+ typedef OrderedList native_ordered_list;
+ typedef Traits traits;
+
+ typedef typename native_ordered_list::gc gc;
+ typedef typename native_ordered_list::key_comparator native_key_comparator;
+ typedef typename native_ordered_list::value_type value_type;
+ typedef typename native_ordered_list::disposer native_disposer;
+
+ struct key_compare {
+ int operator()( value_type const& v1, value_type const& v2 ) const
+ {
+ hash_node const& n1 = static_cast<hash_node const&>( v1 );
+ hash_node const& n2 = static_cast<hash_node const&>( v2 );
+ if ( n1.m_nHash != n2.m_nHash )
+ return n1.m_nHash < n2.m_nHash ? -1 : 1;
+
+ if ( n1.is_dummy()) {
+ assert( n2.is_dummy());
+ return 0;
+ }
+
+ assert( !n1.is_dummy() && !n2.is_dummy());
+
+ return native_key_comparator()(v1, v2);
+ }
+
+ template <typename Q>
+ int operator()( value_type const& v, search_value_type<Q> const& q ) const
+ {
+ hash_node const& n = static_cast<hash_node const&>( v );
+ if ( n.m_nHash != q.nHash )
+ return n.m_nHash < q.nHash ? -1 : 1;
+
+ assert( !n.is_dummy());
+ return native_key_comparator()(v, q.val);
+ }
+
+ template <typename Q>
+ int operator()( search_value_type<Q> const& q, value_type const& v ) const
+ {
+ return -operator()( v, q );
+ }
+ };
+
+ struct wrapped_disposer
+ {
+ void operator()( value_type * v )
+ {
+ if ( !static_cast<hash_node*>( v )->is_dummy())
+ native_disposer()( v );
}
+ };
+
+ public:
+ typedef void ordered_list_node_type;
- template <typename Q1, typename Q2>
- int operator()( Q1 const& v1, Q2 const& v2 ) const
+ struct aux_node: public native_ordered_list::node_type, public hash_node
+ {
+ aux_node()
{
- return base_class()( v1, v2 );
+ typedef typename native_ordered_list::node_type list_node_type;
+
+ list_node_type::data.store( typename list_node_type::marked_data_ptr(
+ static_cast<value_type*>( static_cast<hash_node *>( this ))),
+ atomics::memory_order_release
+ );
+ }
+ };
+
+ struct node_traits
+ {
+ static hash_node * to_node_ptr( value_type& v )
+ {
+ return static_cast<hash_node *>( &v );
+ }
+
+ static hash_node * to_node_ptr( value_type * v )
+ {
+ return static_cast<hash_node *>( v );
+ }
+
+ static hash_node const * to_node_ptr( value_type const& v )
+ {
+ return static_cast<hash_node const*>( &v );
+ }
+
+ static hash_node const * to_node_ptr( value_type const * v )
+ {
+ return static_cast<hash_node const *>( v );
+ }
+ };
+
+ template <typename Less>
+ struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
+ {
+ typedef cds::opt::details::make_comparator_from_less<Less> base_class;
+
+ template <typename Q>
+ int operator()( value_type const& v, search_value_type<Q> const& q ) const
+ {
+ hash_node const& n = static_cast<hash_node const&>( v );
+ if ( n.m_nHash != q.nHash )
+ return n.m_nHash < q.nHash ? -1 : 1;
+
+ assert( !n.is_dummy());
+ return base_class()(v, q.val);
+ }
+
+ template <typename Q>
+ int operator()( search_value_type<Q> const& q, value_type const& v ) const
+ {
+ hash_node const& n = static_cast<hash_node const&>( v );
+ if ( n.m_nHash != q.nHash )
+ return q.nHash < n.m_nHash ? -1 : 1;
+
+ assert( !n.is_dummy());
+ return base_class()(q.val, v);
+ }
+
+ int operator()( value_type const& lhs, value_type const& rhs ) const
+ {
+ hash_node const& n1 = static_cast<hash_node const&>( lhs );
+ hash_node const& n2 = static_cast<hash_node const&>( rhs );
+ if ( n1.m_nHash != n2.m_nHash )
+ return n1.m_nHash < n2.m_nHash ? -1 : 1;
+
+ if ( n1.is_dummy()) {
+ assert( n2.is_dummy());
+ return 0;
+ }
+
+ assert( !n1.is_dummy() && !n2.is_dummy());
+
+ return base_class()( lhs, rhs );
}
};
typedef typename native_ordered_list::template rebind_traits<
opt::compare< key_compare >
- ,opt::disposer< wrapped_disposer >
- ,opt::boundary_node_type< splitlist_node_type >
+ , opt::disposer< wrapped_disposer >
+ , opt::boundary_node_type< aux_node >
>::type result;
};
+ template <class OrderedList, class Traits>
+ using rebind_list_traits = ordered_list_adapter< OrderedList, Traits, is_iterable_list<OrderedList>::value >;
+
template <typename OrderedList, bool IsConst>
struct select_list_iterator;
, m_itEnd( itEnd )
{
// skip dummy nodes
- while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() )
+ while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy())
++m_itCur;
}
if ( m_itCur != m_itEnd ) {
do {
++m_itCur;
- } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() );
+ } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy());
}
return *this;
}
{
return m_itCur != i.m_itCur;
}
+
+ protected:
+ list_iterator const& underlying_iterator() const
+ {
+ return m_itCur;
+ }
};
} // namespace details
//@endcond
//@cond
// Helper functions
-
- /// Reverses bit order in \p nHash
- static inline size_t reverse_bits( size_t nHash )
- {
- return bitop::RBO( nHash );
- }
-
+ template <typename BitReversalAlgo>
static inline size_t regular_hash( size_t nHash )
{
- return reverse_bits( nHash ) | size_t(1);
+ return static_cast<size_t>( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) | size_t(1);
}
+ template <typename BitReversalAlgo>
static inline size_t dummy_hash( size_t nHash )
{
- return reverse_bits( nHash ) & ~size_t(1);
+ return static_cast<size_t>( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) & ~size_t(1);
}
//@endcond