2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/details/allocator.h>
37 #include <cds/algo/int_algo.h>
38 #include <cds/algo/bitop.h>
39 #include <cds/opt/hash.h>
40 #include <cds/intrusive/free_list_selector.h>
42 namespace cds { namespace intrusive {
44 /// Split-ordered list related definitions
45 /** @ingroup cds_intrusive_helper
47 namespace split_list {
48 /// Split-ordered list node
51 - \p OrderedListNode - node type for underlying ordered list
53 template <typename OrderedListNode>
54 struct node: public OrderedListNode
57 typedef OrderedListNode base_class;
60 size_t m_nHash ; ///< Hash value for node
62 /// Default constructor
69 /// Initializes dummy node with \p nHash value
76 /// Checks if the node is dummy node
79 return (m_nHash & 1) == 0;
83 /// \p SplitListSet internal statistics. May be used for debugging or profiling
85 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
87 template <typename Counter = cds::atomicity::event_counter >
90 typedef Counter counter_type; ///< Counter type
92 counter_type m_nInsertSuccess; ///< Count of success inserting
93 counter_type m_nInsertFailed; ///< Count of failed inserting
94 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
95 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
96 counter_type m_nEraseSuccess; ///< Count of success erasing of items
97 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
98 counter_type m_nExtractSuccess; ///< Count of success extracting of items
99 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
100 counter_type m_nFindSuccess; ///< Count of success finding
101 counter_type m_nFindFailed; ///< Count of failed finding
102 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
103 counter_type m_nHeadNodeFreed; ///< Count of freed head node
104 counter_type m_nBucketCount; ///< Current bucket count
105 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
106 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
107 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
108 counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation
111 void onInsertSuccess() { ++m_nInsertSuccess; }
112 void onInsertFailed() { ++m_nInsertFailed; }
113 void onUpdateNew() { ++m_nUpdateNew; }
114 void onUpdateExist() { ++m_nUpdateExist; }
115 void onEraseSuccess() { ++m_nEraseSuccess; }
116 void onEraseFailed() { ++m_nEraseFailed; }
117 void onExtractSuccess() { ++m_nExtractSuccess; }
118 void onExtractFailed() { ++m_nExtractFailed; }
119 void onFindSuccess() { ++m_nFindSuccess; }
120 void onFindFailed() { ++m_nFindFailed; }
121 bool onFind(bool bSuccess)
129 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
130 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
131 void onNewBucket() { ++m_nBucketCount; }
132 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
133 void onBucketInitContenton() { ++m_nInitBucketContention; }
134 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
135 void onBucketsExhausted() { ++m_nBucketsExhausted; }
139 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
142 void onInsertSuccess() const {}
143 void onInsertFailed() const {}
144 void onUpdateNew() const {}
145 void onUpdateExist() const {}
146 void onEraseSuccess() const {}
147 void onEraseFailed() const {}
148 void onExtractSuccess() const {}
149 void onExtractFailed() const {}
150 void onFindSuccess() const {}
151 void onFindFailed() const {}
152 bool onFind( bool bSuccess ) const { return bSuccess; }
153 void onHeadNodeAllocated() const {}
154 void onHeadNodeFreed() const {}
155 void onNewBucket() const {}
156 void onRecursiveInitBucket() const {}
157 void onBucketInitContenton() const {}
158 void onBusyWaitBucketInit() const {}
159 void onBucketsExhausted() const {}
163 /// SplitListSet traits
168 Hash function converts the key fields of struct \p T stored in the split list
169 into hash value of type \p size_t that is an index in hash table.
170 By default, \p std::hash is used.
172 typedef opt::none hash;
176 The item counting is an important part of \p SplitListSet algorithm:
177 the <tt>empty()</tt> member function depends on correct item counting.
178 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
180 Default is \p cds::atomicity::item_counter.
182 typedef cds::atomicity::item_counter item_counter;
184 /// Bucket table allocator
186 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
188 typedef CDS_DEFAULT_ALLOCATOR allocator;
190 /// Internal statistics (by default, disabled)
192 Possible statistics types are: \p split_list::stat (enable internal statistics),
193 \p split_list::empty_stat (the default, internal statistics disabled),
194 user-provided class that supports \p %split_list::stat interface.
196 typedef split_list::empty_stat stat;
199 /// C++ memory ordering model
201 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
202 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
204 typedef opt::v::relaxed_ordering memory_model;
206 /// What type of bucket table is used
208 \p true - use \p split_list::expandable_bucket_table that can be expanded
209 if the load factor of the set is exhausted.
210 \p false - use \p split_list::static_bucket_table that cannot be expanded
211 and is allocated in \p SplitListSet constructor.
215 static const bool dynamic_bucket_table = true;
217 /// Back-off strategy
218 typedef cds::backoff::Default back_off;
220 /// Padding; default is cache-line padding
222 padding = cds::opt::cache_line_padding
225 /// Free-list of auxiliary nodes
227 The split-list contains auxiliary nodes marked the start of buckets.
228 To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list
232 - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive
233 - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive
235 typedef FreeListImpl free_list;
238 /// [value-option] Split-list dynamic bucket table option
240 The option is used to select bucket table implementation.
241 Possible values of \p Value are:
242 - \p true - select \p expandable_bucket_table
243 - \p false - select \p static_bucket_table
245 template <bool Value>
246 struct dynamic_bucket_table
249 template <typename Base> struct pack: public Base
251 enum { dynamic_bucket_table = Value };
256 /// Metafunction converting option list to \p split_list::traits
258 Available \p Options:
259 - \p opt::hash - mandatory option, specifies hash functor.
260 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
262 - \p opt::memory_model - C++ memory model for atomic operations.
263 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
264 or \p opt::v::sequential_consistent (sequentially consistent memory model).
265 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
266 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
267 Dynamic bucket table expands its size up to maximum bucket count when necessary
268 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
269 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
270 To enable internal statistics use \p split_list::stat.
271 - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding
272 - \p opt::free_list - a free-list implementation, see \p traits::free_list
274 template <typename... Options>
276 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
279 /// Static bucket table
281 Non-resizeable bucket table for \p SplitListSet class.
282 The capacity of table (max bucket count) is defined in the constructor call.
285 - \p GC - garbage collector
286 - \p Node - node type, must be a type based on \p split_list::node
287 - \p Options... - options
290 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
291 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
292 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
293 otherwise \p FreeList.
295 template <typename GC, typename Node, typename... Options>
296 class static_bucket_table
299 struct default_options
301 typedef CDS_DEFAULT_ALLOCATOR allocator;
302 typedef opt::v::relaxed_ordering memory_model;
303 typedef FreeListImpl free_list;
305 typedef typename opt::make_options< default_options, Options... >::type options;
309 typedef GC gc; ///< Garbage collector
310 typedef Node node_type; ///< Bucket node type
311 typedef typename options::allocator allocator; ///< allocator
312 typedef typename options::memory_model memory_model; ///< Memory model for atomic operations
313 typedef typename options::free_list free_list; ///< Free-list
315 /// Auxiliary node type
316 struct aux_node_type: public node_type, public free_list::node
319 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
320 typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
324 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
325 const size_t m_nCapacity; ///< Bucket table capacity
326 table_entry * m_Table; ///< Bucket table
328 typedef typename allocator::template rebind< aux_node_type >::other aux_node_allocator;
330 aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes
331 atomics::atomic<size_t> m_nAuxNodeAllocated; ///< how many auxiliary node allocated
332 free_list m_freeList; ///< Free list
337 void allocate_table()
339 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
340 m_auxNode = aux_node_allocator().allocate( m_nCapacity );
345 m_freeList.clear( []( typename free_list::node* ) {} );
346 aux_node_allocator().deallocate( m_auxNode, m_nCapacity );
347 bucket_table_allocator().Delete( m_Table, m_nCapacity );
352 /// Constructs bucket table for 512K buckets. Load factor is 1.
353 static_bucket_table()
355 , m_nCapacity( 512 * 1024 )
356 , m_nAuxNodeAllocated( 0 )
361 /// Creates the table with specified size rounded up to nearest power-of-two
363 size_t nItemCount, ///< Max expected item count in split-ordered list
364 size_t nLoadFactor ///< Load factor
366 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
367 , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ) )
368 , m_nAuxNodeAllocated( 0 )
370 // m_nCapacity must be power of 2
371 assert( cds::beans::is_power2( m_nCapacity ) );
375 /// Destroys bucket table
376 ~static_bucket_table()
381 /// Returns head node of bucket \p nBucket
382 aux_node_type * bucket( size_t nBucket ) const
384 assert( nBucket < capacity() );
385 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
388 /// Set \p pNode as a head of bucket \p nBucket
389 void bucket( size_t nBucket, aux_node_type * pNode )
391 assert( nBucket < capacity() );
392 assert( bucket( nBucket ) == nullptr );
394 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
397 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
398 aux_node_type* alloc_aux_node()
400 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity() ) {
401 // alloc next free node from m_auxNode
402 size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
403 if ( idx < capacity() )
404 return new( &m_auxNode[idx] ) aux_node_type();
407 // get from free-list
408 auto pFree = m_freeList.get();
410 return static_cast<aux_node_type*>( pFree );
416 /// Places node type to free-list
417 void free_aux_node( aux_node_type* p )
419 m_freeList.put( static_cast<aux_node_type*>( p ));
422 /// Returns the capacity of the bucket table
423 size_t capacity() const
428 /// Returns the load factor, i.e. average count of items per bucket
429 size_t load_factor() const
431 return m_nLoadFactor;
435 /// Expandable bucket table
437 This bucket table can dynamically grow its capacity when necessary
438 up to maximum bucket count.
441 - \p GC - garbage collector
442 - \p Node - node type, must be derived from \p split_list::node
443 - \p Options... - options
446 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
447 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
448 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
449 otherwise \p FreeList.
451 template <typename GC, typename Node, typename... Options>
452 class expandable_bucket_table
455 struct default_options
457 typedef CDS_DEFAULT_ALLOCATOR allocator;
458 typedef opt::v::relaxed_ordering memory_model;
459 typedef FreeListImpl free_list;
461 typedef typename opt::make_options< default_options, Options... >::type options;
465 typedef GC gc; ///< Garbage collector
466 typedef Node node_type; ///< Bucket node type
467 typedef typename options::allocator allocator; ///< allocator
469 /// Memory model for atomic operations
470 typedef typename options::memory_model memory_model;
473 typedef typename options::free_list free_list;
475 /// Auxiliary node type
476 class aux_node_type: public node_type, public free_list::node
481 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
482 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
484 struct aux_node_segment {
485 atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment
486 aux_node_segment* next_segment;
487 // aux_node_type nodes[];
491 , next_segment( nullptr )
494 aux_node_type* segment()
496 return reinterpret_cast<aux_node_type*>( this + 1 );
500 /// Bucket table metrics
502 size_t nSegmentCount; ///< max count of segments in bucket table
503 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
504 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
505 size_t nLoadFactor; ///< load factor
506 size_t nCapacity; ///< max capacity of bucket table
509 : nSegmentCount( 1024 )
510 , nSegmentSize( 512 )
511 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ) )
513 , nCapacity( nSegmentCount * nSegmentSize )
517 /// Bucket table allocator
518 typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator;
520 /// Bucket table segment allocator
521 typedef cds::details::Allocator< table_entry, allocator > segment_allocator;
523 // Aux node segment allocator
524 typedef typename allocator::template rebind<char>::other raw_allocator;
529 /// Constructs bucket table for 512K buckets. Load factor is 1.
530 expandable_bucket_table()
531 : m_metrics( calc_metrics( 512 * 1024, 1 ))
536 /// Creates the table with specified capacity rounded up to nearest power-of-two
537 expandable_bucket_table(
538 size_t nItemCount, ///< Max expected item count in split-ordered list
539 size_t nLoadFactor ///< Load factor
541 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
546 /// Destroys bucket table
547 ~expandable_bucket_table()
549 m_freeList.clear( []( typename free_list::node* ) {} );
551 for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) {
552 auto next_segment = aux_segment->next_segment;
553 free_aux_segment( aux_segment );
554 aux_segment = next_segment;
557 segment_type * pSegments = m_Segments;
558 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
559 table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
560 if ( pEntry != nullptr )
561 destroy_segment( pEntry );
564 destroy_table( pSegments );
567 /// Returns head node of the bucket \p nBucket
568 aux_node_type * bucket( size_t nBucket ) const
570 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
571 assert( nSegment < m_metrics.nSegmentCount );
573 table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
574 if ( pSegment == nullptr )
575 return nullptr; // uninitialized bucket
576 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
579 /// Set \p pNode as a head of bucket \p nBucket
580 void bucket( size_t nBucket, aux_node_type * pNode )
582 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
583 assert( nSegment < m_metrics.nSegmentCount );
585 segment_type& segment = m_Segments[nSegment];
586 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
587 table_entry* pNewSegment = allocate_segment();
588 table_entry * pNull = nullptr;
589 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
590 destroy_segment( pNewSegment );
593 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
596 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
597 aux_node_type* alloc_aux_node()
600 aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_relaxed );
601 assert( aux_segment != nullptr );
603 // try to allocate from current aux segment
604 if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
605 size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
606 if ( idx < m_metrics.nSegmentSize )
607 return new( aux_segment->segment() + idx ) aux_node_type();
610 // try allocate from free-list
611 auto pFree = m_freeList.get();
613 return static_cast<aux_node_type*>( pFree );
615 // free-list is empty, current segment is full
616 // try to allocate new aux segment
617 // We can allocate more aux segments than we need but it is not a problem in this context
618 aux_node_segment* new_aux_segment = allocate_aux_segment();
619 new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
620 if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ))
621 return new( new_aux_segment->segment() ) aux_node_type();
623 free_aux_segment( new_aux_segment );
627 /// Places auxiliary node type to free-list
628 void free_aux_node( aux_node_type* p )
630 m_freeList.put( static_cast<aux_node_type*>( p ));
633 /// Returns the capacity of the bucket table
634 size_t capacity() const
636 return m_metrics.nCapacity;
639 /// Returns the load factor, i.e. average count of items per bucket
640 size_t load_factor() const
642 return m_metrics.nLoadFactor;
647 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
651 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
652 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
654 size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
655 if ( nBucketCount <= 2 ) {
659 else if ( nBucketCount <= 1024 ) {
661 m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount );
664 nBucketCount = beans::log2ceil( nBucketCount );
666 m.nSegmentSize = ((size_t)1) << (nBucketCount / 2);
667 if ( nBucketCount & 1 )
669 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
672 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
673 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
674 assert( m.nSegmentSizeLog2 != 0 ); //
678 segment_type * allocate_table()
680 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
683 void destroy_table( segment_type * pTable )
685 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
688 table_entry* allocate_segment()
690 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
693 void destroy_segment( table_entry* pSegment )
695 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
698 aux_node_segment* allocate_aux_segment()
700 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
701 return new(p) aux_node_segment();
704 void free_aux_segment( aux_node_segment* p )
706 raw_allocator().deallocate( reinterpret_cast<char*>( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
711 // m_nSegmentSize must be 2**N
712 assert( cds::beans::is_power2( m_metrics.nSegmentSize ) );
713 assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
715 // m_nSegmentCount must be 2**K
716 assert( cds::beans::is_power2( m_metrics.nSegmentCount ) );
718 m_Segments = allocate_table();
719 m_auxNodeList = allocate_aux_segment();
725 metrics const m_metrics; ///< Dynamic bucket table metrics
726 segment_type* m_Segments; ///< bucket table - array of segments
727 atomics::atomic<aux_node_segment*> m_auxNodeList; ///< segment list of aux nodes
728 free_list m_freeList; ///< List of free aux nodes
732 /// Split-list node traits
734 This traits is intended for converting between underlying ordered list node type
735 and split-list node type
738 - \p BaseNodeTraits - node traits of base ordered list type
740 template <class BaseNodeTraits>
741 struct node_traits: private BaseNodeTraits
743 typedef BaseNodeTraits base_class; ///< Base ordered list node type
744 typedef typename base_class::value_type value_type; ///< Value type
745 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
746 typedef node<base_node_type> node_type; ///< Spit-list node type
748 /// Convert value reference to node pointer
749 static node_type * to_node_ptr( value_type& v )
751 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
754 /// Convert value pointer to node pointer
755 static node_type * to_node_ptr( value_type * v )
757 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
760 /// Convert value reference to node pointer (const version)
761 static node_type const * to_node_ptr( value_type const& v )
763 return static_cast<node_type const*>( base_class::to_node_ptr( v ) );
766 /// Convert value pointer to node pointer (const version)
767 static node_type const * to_node_ptr( value_type const * v )
769 return static_cast<node_type const *>( base_class::to_node_ptr( v ) );
772 /// Convert node reference to value pointer
773 static value_type * to_value_ptr( node_type& n )
775 return base_class::to_value_ptr( static_cast<base_node_type &>( n ) );
778 /// Convert node pointer to value pointer
779 static value_type * to_value_ptr( node_type * n )
781 return base_class::to_value_ptr( static_cast<base_node_type *>( n ) );
784 /// Convert node reference to value pointer (const version)
785 static const value_type * to_value_ptr( node_type const & n )
787 return base_class::to_value_ptr( static_cast<base_node_type const &>( n ) );
790 /// Convert node pointer to value pointer (const version)
791 static const value_type * to_value_ptr( node_type const * n )
793 return base_class::to_value_ptr( static_cast<base_node_type const *>( n ) );
799 template <bool Value, typename GC, typename Node, typename... Options>
800 struct bucket_table_selector;
802 template <typename GC, typename Node, typename... Options>
803 struct bucket_table_selector< true, GC, Node, Options...>
805 typedef expandable_bucket_table<GC, Node, Options...> type;
808 template <typename GC, typename Node, typename... Options>
809 struct bucket_table_selector< false, GC, Node, Options...>
811 typedef static_bucket_table<GC, Node, Options...> type;
814 template <typename Q>
815 struct search_value_type
820 search_value_type( Q& v, size_t h )
826 template <class OrderedList, class Traits>
827 class rebind_list_traits
829 typedef OrderedList native_ordered_list;
830 typedef Traits traits;
832 typedef typename native_ordered_list::gc gc;
833 typedef typename native_ordered_list::key_comparator native_key_comparator;
834 typedef typename native_ordered_list::node_type node_type;
835 typedef typename native_ordered_list::value_type value_type;
836 typedef typename native_ordered_list::node_traits node_traits;
837 typedef typename native_ordered_list::disposer native_disposer;
839 typedef split_list::node<node_type> splitlist_node_type;
842 int operator()( value_type const& v1, value_type const& v2 ) const
844 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v1 ));
845 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v2 ));
846 if ( n1->m_nHash != n2->m_nHash )
847 return n1->m_nHash < n2->m_nHash ? -1 : 1;
849 if ( n1->is_dummy() ) {
850 assert( n2->is_dummy() );
854 assert( !n1->is_dummy() && !n2->is_dummy() );
856 return native_key_comparator()( v1, v2 );
859 template <typename Q>
860 int operator()( value_type const& v, search_value_type<Q> const& q ) const
862 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
863 if ( n->m_nHash != q.nHash )
864 return n->m_nHash < q.nHash ? -1 : 1;
866 assert( !n->is_dummy() );
867 return native_key_comparator()( v, q.val );
870 template <typename Q>
871 int operator()( search_value_type<Q> const& q, value_type const& v ) const
873 return -operator()( v, q );
877 struct wrapped_disposer
879 void operator()( value_type * v )
881 splitlist_node_type * p = static_cast<splitlist_node_type *>( node_traits::to_node_ptr( v ));
882 if ( !p->is_dummy() )
883 native_disposer()( v );
888 template <typename Less>
889 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
891 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
893 template <typename Q>
894 int operator()( value_type const& v, search_value_type<Q> const& q ) const
896 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
897 if ( n->m_nHash != q.nHash )
898 return n->m_nHash < q.nHash ? -1 : 1;
900 assert( !n->is_dummy() );
901 return base_class()( v, q.val );
904 template <typename Q>
905 int operator()( search_value_type<Q> const& q, value_type const& v ) const
907 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
908 if ( n->m_nHash != q.nHash )
909 return q.nHash < n->m_nHash ? -1 : 1;
911 assert( !n->is_dummy() );
912 return base_class()( q.val, v );
915 template <typename Q1, typename Q2>
916 int operator()( Q1 const& v1, Q2 const& v2 ) const
918 return base_class()( v1, v2 );
922 typedef typename native_ordered_list::template rebind_traits<
923 opt::compare< key_compare >
924 ,opt::disposer< wrapped_disposer >
925 ,opt::boundary_node_type< splitlist_node_type >
929 template <typename OrderedList, bool IsConst>
930 struct select_list_iterator;
932 template <typename OrderedList>
933 struct select_list_iterator<OrderedList, false>
935 typedef typename OrderedList::iterator type;
938 template <typename OrderedList>
939 struct select_list_iterator<OrderedList, true>
941 typedef typename OrderedList::const_iterator type;
944 template <typename NodeTraits, typename OrderedList, bool IsConst>
947 typedef OrderedList ordered_list_type;
948 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
951 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
952 typedef NodeTraits node_traits;
955 list_iterator m_itCur;
956 list_iterator m_itEnd;
959 typedef typename list_iterator::value_ptr value_ptr;
960 typedef typename list_iterator::value_ref value_ref;
966 iterator_type( iterator_type const& src )
967 : m_itCur( src.m_itCur )
968 , m_itEnd( src.m_itEnd )
971 // This ctor should be protected...
972 iterator_type( list_iterator itCur, list_iterator itEnd )
977 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() )
981 value_ptr operator ->() const
983 return m_itCur.operator->();
986 value_ref operator *() const
988 return m_itCur.operator*();
992 iterator_type& operator ++()
994 if ( m_itCur != m_itEnd ) {
997 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() );
1002 iterator_type& operator = (iterator_type const& src)
1004 m_itCur = src.m_itCur;
1005 m_itEnd = src.m_itEnd;
1010 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1012 return m_itCur == i.m_itCur;
1015 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1017 return m_itCur != i.m_itCur;
1020 } // namespace details
1026 /// Reverses bit order in \p nHash
1027 static inline size_t reverse_bits( size_t nHash )
1029 return bitop::RBO( nHash );
1032 static inline size_t regular_hash( size_t nHash )
1034 return reverse_bits( nHash ) | size_t(1);
1037 static inline size_t dummy_hash( size_t nHash )
1039 return reverse_bits( nHash ) & ~size_t(1);
1043 } // namespace split_list
1046 // Forward declaration
1047 template <class GC, class OrderedList, class Traits = split_list::traits>
1051 }} // namespace cds::intrusive
1053 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H