3 #ifndef __CDS_INTRUSIVE_SPLIT_LIST_BASE_H
4 #define __CDS_INTRUSIVE_SPLIT_LIST_BASE_H
6 #include <cds/intrusive/details/base.h>
7 #include <cds/cxx11_atomic.h>
8 #include <cds/details/allocator.h>
9 #include <cds/algo/int_algo.h>
10 #include <cds/algo/bitop.h>
11 #include <cds/details/functor_wrapper.h>
13 namespace cds { namespace intrusive {
15 /// Split-ordered list related definitions
16 /** @ingroup cds_intrusive_helper
18 namespace split_list {
19 /// Split-ordered list node
22 - OrderedListNode - node type for underlying ordered list
24 template <typename OrderedListNode>
25 struct node: public OrderedListNode
28 typedef OrderedListNode base_class;
31 size_t m_nHash ; ///< Hash value for node
33 /// Default constructor
40 /// Initializes dummy node with \p nHash value
47 /// Checks if the node is dummy node
50 return (m_nHash & 1) == 0;
55 /// Type traits for SplitListSet class
59 Hash function converts the key fields of struct \p T stored in the split list
60 into value of type \p size_t called hash value that is an index of hash table.
62 This is mandatory type and has no predefined one.
64 typedef opt::none hash;
68 The item counting is an important part of SplitListSet algorithm:
69 the <tt>empty()</tt> member function depends on correct item counting.
70 Therefore, atomicity::empty_item_counter is not allowed as a type of the option.
72 Default is atomicity::item_counter.
74 typedef atomicity::item_counter item_counter;
76 /// Bucket table allocator
78 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
80 typedef CDS_DEFAULT_ALLOCATOR allocator;
82 /// C++ memory model for atomic operations
84 Can be opt::v::relaxed_ordering (relaxed memory model, the default) or opt::v::sequential_consistent (sequentially consisnent memory model).
86 typedef opt::v::relaxed_ordering memory_model;
88 /// What type of bucket table is used
90 \p true - use split_list::expandable_bucket_table that can be expanded
91 if the load factor of the set is exhausted
92 \p false - use split_list::static_bucket_table that cannot be expanded
96 static const bool dynamic_bucket_table = true;
98 /// back-off strategy used
100 If the option is not specified, the cds::backoff::Default is used.
102 typedef cds::backoff::Default back_off;
105 /// [value-option] Split-list dynamic bucket table option
107 The option is used to select bucket table implementation.
108 Possible values of \p Value are:
109 - \p true - select \ref expandable_bucket_table implementation
110 - \p false - select \ref static_bucket_table implementation
112 template <bool Value>
113 struct dynamic_bucket_table
116 template <typename Base> struct pack: public Base
118 enum { dynamic_bucket_table = Value };
123 /// Metafunction converting option list to traits struct
125 This is a wrapper for <tt> cds::opt::make_options< type_traits, Options...> </tt>
127 Available \p Options:
128 - opt::hash - mandatory option, specifies hash functor.
129 - opt::item_counter - optional, specifies item counting policy. See type_traits::item_counter
131 - opt::memory_model - C++ memory model for atomic operations.
132 Can be opt::v::relaxed_ordering (relaxed memory model, the default) or opt::v::sequential_consistent (sequentially consisnent memory model).
133 - opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
134 - split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
135 Dynamic bucket table expands its size up to maximum bucket count when necessary
136 - opt::back_off - back-off strategy used for spinning. If the option is not specified, the cds::backoff::Default is used.
138 See \ref MichaelHashSet, \ref type_traits.
140 template <typename... Options>
142 typedef typename cds::opt::make_options< type_traits, Options...>::type type ; ///< Result of metafunction
146 /// Static bucket table
148 Non-resizeable bucket table for SplitListSet class.
149 The capacity of table (max bucket count) is defined in the constructor call.
152 - \p GC - garbage collector used
153 - \p Node - node type, must be a type based on\ref node template
154 - \p Options... - options
157 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
158 - \p opt::memory_model - memory model used. Possible types are opt::v::sequential_consistent, opt::v::relaxed_ordering
160 template <typename GC, typename Node, typename... Options>
161 class static_bucket_table
164 struct default_options
166 typedef CDS_DEFAULT_ALLOCATOR allocator;
167 typedef opt::v::relaxed_ordering memory_model;
169 typedef typename opt::make_options< default_options, Options... >::type options;
173 typedef GC gc ; ///< Garbage collector
174 typedef Node node_type ; ///< Bucket node type
175 typedef atomics::atomic<node_type *> table_entry ; ///< Table entry type
177 /// Bucket table allocator
178 typedef cds::details::Allocator< table_entry, typename options::allocator > bucket_table_allocator;
180 /// Memory model for atomic operations
181 typedef typename options::memory_model memory_model;
184 const size_t m_nLoadFactor ; ///< load factor (average count of items per bucket)
185 const size_t m_nCapacity ; ///< Bucket table capacity
186 table_entry * m_Table ; ///< Bucket table
190 void allocate_table()
192 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
197 bucket_table_allocator().Delete( m_Table, m_nCapacity );
202 /// Constructs bucket table for 512K buckets. Load factor is 1.
203 static_bucket_table()
205 , m_nCapacity( 512 * 1024 )
212 size_t nItemCount, ///< Max expected item count in split-ordered list
213 size_t nLoadFactor ///< Load factor
215 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 ),
216 m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ) )
218 // m_nCapacity must be power of 2
219 assert( cds::beans::is_power2( m_nCapacity ) );
223 /// Destroy bucket table
224 ~static_bucket_table()
229 /// Returns head node of bucket \p nBucket
230 node_type * bucket( size_t nBucket ) const
232 assert( nBucket < capacity() );
233 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
236 /// Set head node \p pNode of bucket \p nBucket
237 void bucket( size_t nBucket, node_type * pNode )
239 assert( nBucket < capacity() );
240 assert( bucket( nBucket ) == nullptr );
242 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
245 /// Returns the capacity of the bucket table
246 size_t capacity() const
251 /// Returns the load factor, i.e. average count of items per bucket
252 size_t load_factor() const
254 return m_nLoadFactor;
258 /// Expandable bucket table
260 This bucket table can dynamically grow its capacity when necessary
261 up to maximum bucket count.
264 - \p GC - garbage collector used
265 - \p Node - node type, must be an instantiation of \ref node template
266 - \p Options... - options
269 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
270 - \p opt::memory_model - memory model used. Possible types are opt::v::sequential_consistent, opt::v::relaxed_ordering
272 template <typename GC, typename Node, typename... Options>
273 class expandable_bucket_table
276 struct default_options
278 typedef CDS_DEFAULT_ALLOCATOR allocator;
279 typedef opt::v::relaxed_ordering memory_model;
281 typedef typename opt::make_options< default_options, Options... >::type options;
284 typedef GC gc ; ///< Garbage collector
285 typedef Node node_type ; ///< Bucket node type
286 typedef atomics::atomic<node_type *> table_entry ; ///< Table entry type
288 /// Memory model for atomic operations
289 typedef typename options::memory_model memory_model;
292 typedef atomics::atomic<table_entry *> segment_type ; ///< Bucket table segment type
295 /// Bucket table allocator
296 typedef cds::details::Allocator< segment_type, typename options::allocator > bucket_table_allocator;
298 /// Bucket table segment allocator
299 typedef cds::details::Allocator< table_entry, typename options::allocator > segment_allocator;
302 /// Bucket table metrics
304 size_t nSegmentCount ; ///< max count of segments in bucket table
305 size_t nSegmentSize ; ///< the segment's capacity. The capacity must be power of two.
306 size_t nSegmentSizeLog2 ; ///< log2( m_nSegmentSize )
307 size_t nLoadFactor ; ///< load factor
308 size_t nCapacity ; ///< max capacity of bucket table
311 : nSegmentCount(1024)
313 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ) )
315 , nCapacity( nSegmentCount * nSegmentSize )
319 const metrics m_metrics ; ///< Dynamic bucket table metrics
322 //const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
323 //const size_t m_nCapacity ; ///< Bucket table capacity
324 segment_type * m_Segments ; ///< bucket table - array of segments
328 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
332 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
333 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
335 size_t nBucketCount = (size_t)( ((float) nItemCount) / m.nLoadFactor );
336 if ( nBucketCount <= 2 ) {
340 else if ( nBucketCount <= 1024 ) {
342 m.nSegmentSize = ((size_t) 1) << beans::log2ceil( nBucketCount );
345 nBucketCount = beans::log2ceil( nBucketCount );
347 m.nSegmentSize = ((size_t) 1) << ( nBucketCount / 2 );
348 if ( nBucketCount & 1 )
350 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
353 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
354 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
355 assert( m.nSegmentSizeLog2 != 0 ) ; //
359 segment_type * allocate_table()
361 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
364 void destroy_table( segment_type * pTable )
366 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
369 table_entry * allocate_segment()
371 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
374 void destroy_segment( table_entry * pSegment )
376 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
381 // m_nSegmentSize must be 2**N
382 assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
383 assert( ( ((size_t) 1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
385 // m_nSegmentCount must be 2**K
386 assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
388 m_Segments = allocate_table();
394 /// Constructs bucket table for 512K buckets. Load factor is 1.
395 expandable_bucket_table()
396 : m_metrics( calc_metrics( 512 * 1024, 1 ))
402 expandable_bucket_table(
403 size_t nItemCount, ///< Max expected item count in split-ordered list
404 size_t nLoadFactor ///< Load factor
406 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
411 /// Destroy bucket table
412 ~expandable_bucket_table()
414 segment_type * pSegments = m_Segments;
415 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
416 table_entry * pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
417 if ( pEntry != nullptr )
418 destroy_segment( pEntry );
420 destroy_table( pSegments );
423 /// Returns head node of the bucket \p nBucket
424 node_type * bucket( size_t nBucket ) const
426 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
427 assert( nSegment < m_metrics.nSegmentCount );
429 table_entry * pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
430 if ( pSegment == nullptr )
431 return nullptr; // uninitialized bucket
432 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
435 /// Set head node \p pNode of bucket \p nBucket
436 void bucket( size_t nBucket, node_type * pNode )
438 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
439 assert( nSegment < m_metrics.nSegmentCount );
441 segment_type& segment = m_Segments[nSegment];
442 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
443 table_entry * pNewSegment = allocate_segment();
444 table_entry * pNull = nullptr;
445 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
446 destroy_segment( pNewSegment );
449 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
452 /// Returns the capacity of the bucket table
453 size_t capacity() const
455 return m_metrics.nCapacity;
458 /// Returns the load factor, i.e. average count of items per bucket
459 size_t load_factor() const
461 return m_metrics.nLoadFactor;
465 /// Split-list node traits
467 This traits is intended for converting between underlying ordered list node type
468 and split-list node type
471 - \p BaseNodeTraits - node traits of base ordered list type
473 template <class BaseNodeTraits>
474 struct node_traits: private BaseNodeTraits
476 typedef BaseNodeTraits base_class ; ///< Base ordered list type
477 typedef typename base_class::value_type value_type ; ///< Value type
478 typedef typename base_class::node_type base_node_type ; ///< Ordered list node type
479 typedef node<base_node_type> node_type ; ///< Spit-list node type
481 /// Convert value reference to node pointer
482 static node_type * to_node_ptr( value_type& v )
484 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
487 /// Convert value pointer to node pointer
488 static node_type * to_node_ptr( value_type * v )
490 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
493 /// Convert value reference to node pointer (const version)
494 static node_type const * to_node_ptr( value_type const& v )
496 return static_cast<node_type const*>( base_class::to_node_ptr( v ) );
499 /// Convert value pointer to node pointer (const version)
500 static node_type const * to_node_ptr( value_type const * v )
502 return static_cast<node_type const *>( base_class::to_node_ptr( v ) );
505 /// Convert node refernce to value pointer
506 static value_type * to_value_ptr( node_type& n )
508 return base_class::to_value_ptr( static_cast<base_node_type &>( n ) );
511 /// Convert node pointer to value pointer
512 static value_type * to_value_ptr( node_type * n )
514 return base_class::to_value_ptr( static_cast<base_node_type *>( n ) );
517 /// Convert node reference to value pointer (const version)
518 static const value_type * to_value_ptr( node_type const & n )
520 return base_class::to_value_ptr( static_cast<base_node_type const &>( n ) );
523 /// Convert node pointer to value pointer (const version)
524 static const value_type * to_value_ptr( node_type const * n )
526 return base_class::to_value_ptr( static_cast<base_node_type const *>( n ) );
533 template <bool Value, typename GC, typename Node, typename... Options>
534 struct bucket_table_selector;
536 template <typename GC, typename Node, typename... Options>
537 struct bucket_table_selector< true, GC, Node, Options...>
539 typedef expandable_bucket_table<GC, Node, Options...> type;
542 template <typename GC, typename Node, typename... Options>
543 struct bucket_table_selector< false, GC, Node, Options...>
545 typedef static_bucket_table<GC, Node, Options...> type;
548 template <typename GC, class Alloc >
549 struct dummy_node_disposer {
550 template <typename Node>
551 void operator()( Node * p )
553 typedef cds::details::Allocator< Node, Alloc > node_deallocator;
554 node_deallocator().Delete( p );
558 template <typename Q>
559 struct search_value_type
564 search_value_type( Q& v, size_t h )
576 template <class OrderedList, class Options>
577 class rebind_list_options
579 typedef OrderedList native_ordered_list;
580 typedef Options options;
582 typedef typename native_ordered_list::gc gc;
583 typedef typename native_ordered_list::key_comparator native_key_comparator;
584 typedef typename native_ordered_list::node_type node_type;
585 typedef typename native_ordered_list::value_type value_type;
586 typedef typename native_ordered_list::node_traits node_traits;
587 typedef typename native_ordered_list::disposer native_disposer;
589 typedef split_list::node<node_type> splitlist_node_type;
592 int operator()( value_type const& v1, value_type const& v2 ) const
594 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v1 ));
595 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v2 ));
596 if ( n1->m_nHash != n2->m_nHash )
597 return n1->m_nHash < n2->m_nHash ? -1 : 1;
599 if ( n1->is_dummy() ) {
600 assert( n2->is_dummy() );
604 assert( !n1->is_dummy() && !n2->is_dummy() );
606 return native_key_comparator()( v1, v2 );
609 template <typename Q>
610 int operator()( value_type const& v, search_value_type<Q> const& q ) const
612 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
613 if ( n->m_nHash != q.nHash )
614 return n->m_nHash < q.nHash ? -1 : 1;
616 assert( !n->is_dummy() );
617 return native_key_comparator()( v, q.val );
620 template <typename Q>
621 int operator()( search_value_type<Q> const& q, value_type const& v ) const
623 return -operator()( v, q );
627 struct wrapped_disposer
629 void operator()( value_type * v )
631 splitlist_node_type * p = static_cast<splitlist_node_type *>( node_traits::to_node_ptr( v ));
633 dummy_node_disposer<gc, typename options::allocator>()( p );
635 native_disposer()( v );
640 template <typename Less>
641 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
643 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
645 template <typename Q>
646 int operator()( value_type const& v, search_value_type<Q> const& q ) const
648 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
649 if ( n->m_nHash != q.nHash )
650 return n->m_nHash < q.nHash ? -1 : 1;
652 assert( !n->is_dummy() );
653 return base_class()( v, q.val );
656 template <typename Q>
657 int operator()( search_value_type<Q> const& q, value_type const& v ) const
659 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
660 if ( n->m_nHash != q.nHash )
661 return q.nHash < n->m_nHash ? -1 : 1;
663 assert( !n->is_dummy() );
664 return base_class()( q.val, v );
667 template <typename Q1, typename Q2>
668 int operator()( Q1 const& v1, Q2 const& v2 ) const
670 return base_class()( v1, v2 );
674 typedef typename native_ordered_list::template rebind_options<
675 opt::compare< key_compare >
676 ,opt::disposer< wrapped_disposer >
677 ,opt::boundary_node_type< splitlist_node_type >
681 template <typename OrderedList, bool IsConst>
682 struct select_list_iterator;
684 template <typename OrderedList>
685 struct select_list_iterator<OrderedList, false>
687 typedef typename OrderedList::iterator type;
690 template <typename OrderedList>
691 struct select_list_iterator<OrderedList, true>
693 typedef typename OrderedList::const_iterator type;
696 template <typename NodeTraits, typename OrderedList, bool IsConst>
699 typedef OrderedList ordered_list_type;
701 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
702 typedef NodeTraits node_traits;
705 list_iterator m_itCur;
706 list_iterator m_itEnd;
709 typedef typename list_iterator::value_ptr value_ptr;
710 typedef typename list_iterator::value_ref value_ref;
716 iterator_type( iterator_type const& src )
717 : m_itCur( src.m_itCur )
718 , m_itEnd( src.m_itEnd )
721 // This ctor should be protected...
722 iterator_type( list_iterator itCur, list_iterator itEnd )
727 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() )
732 value_ptr operator ->() const
734 return m_itCur.operator->();
737 value_ref operator *() const
739 return m_itCur.operator*();
743 iterator_type& operator ++()
745 if ( m_itCur != m_itEnd ) {
748 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() );
753 iterator_type& operator = (iterator_type const& src)
755 m_itCur = src.m_itCur;
756 m_itEnd = src.m_itEnd;
761 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
763 return m_itCur == i.m_itCur;
766 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
768 return m_itCur != i.m_itCur;
773 } // namespace details
779 /// Reverses bit order in \p nHash
780 static inline size_t reverse_bits( size_t nHash )
782 return bitop::RBO( nHash );
785 static inline size_t regular_hash( size_t nHash )
787 return reverse_bits( nHash ) | size_t(1);
790 static inline size_t dummy_hash( size_t nHash )
792 return reverse_bits( nHash ) & ~size_t(1);
796 } // namespace split_list
799 // Forward declaration
800 template <class GC, class OrderedList, class Traits = split_list::type_traits>
804 }} // namespace cds::intrusive
806 #endif // #ifndef __CDS_INTRUSIVE_SPLIT_LIST_BASE_H