2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/details/allocator.h>
37 #include <cds/algo/int_algo.h>
38 #include <cds/algo/bitop.h>
39 #include <cds/opt/hash.h>
40 #include <cds/intrusive/free_list_selector.h>
42 namespace cds { namespace intrusive {
44 /// Split-ordered list related definitions
45 /** @ingroup cds_intrusive_helper
47 namespace split_list {
48 /// Split-ordered list node
51 - \p OrderedListNode - node type for underlying ordered list
53 template <typename OrderedListNode>
54 struct node: public OrderedListNode
57 typedef OrderedListNode base_class;
60 size_t m_nHash ; ///< Hash value for node
62 /// Default constructor
69 /// Initializes dummy node with \p nHash value
76 /// Checks if the node is dummy node
79 return (m_nHash & 1) == 0;
83 /// \p SplitListSet internal statistics. May be used for debugging or profiling
85 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
87 template <typename Counter = cds::atomicity::event_counter >
90 typedef Counter counter_type; ///< Counter type
92 counter_type m_nInsertSuccess; ///< Count of success inserting
93 counter_type m_nInsertFailed; ///< Count of failed inserting
94 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
95 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
96 counter_type m_nEraseSuccess; ///< Count of success erasing of items
97 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
98 counter_type m_nExtractSuccess; ///< Count of success extracting of items
99 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
100 counter_type m_nFindSuccess; ///< Count of success finding
101 counter_type m_nFindFailed; ///< Count of failed finding
102 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
103 counter_type m_nHeadNodeFreed; ///< Count of freed head node
104 counter_type m_nBucketCount; ///< Current bucket count
105 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
106 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
107 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
108 counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation
111 void onInsertSuccess() { ++m_nInsertSuccess; }
112 void onInsertFailed() { ++m_nInsertFailed; }
113 void onUpdateNew() { ++m_nUpdateNew; }
114 void onUpdateExist() { ++m_nUpdateExist; }
115 void onEraseSuccess() { ++m_nEraseSuccess; }
116 void onEraseFailed() { ++m_nEraseFailed; }
117 void onExtractSuccess() { ++m_nExtractSuccess; }
118 void onExtractFailed() { ++m_nExtractFailed; }
119 void onFindSuccess() { ++m_nFindSuccess; }
120 void onFindFailed() { ++m_nFindFailed; }
121 bool onFind(bool bSuccess)
129 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
130 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
131 void onNewBucket() { ++m_nBucketCount; }
132 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
133 void onBucketInitContenton() { ++m_nInitBucketContention; }
134 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
135 void onBucketsExhausted() { ++m_nBucketsExhausted; }
139 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
142 void onInsertSuccess() const {}
143 void onInsertFailed() const {}
144 void onUpdateNew() const {}
145 void onUpdateExist() const {}
146 void onEraseSuccess() const {}
147 void onEraseFailed() const {}
148 void onExtractSuccess() const {}
149 void onExtractFailed() const {}
150 void onFindSuccess() const {}
151 void onFindFailed() const {}
152 bool onFind( bool bSuccess ) const { return bSuccess; }
153 void onHeadNodeAllocated() const {}
154 void onHeadNodeFreed() const {}
155 void onNewBucket() const {}
156 void onRecursiveInitBucket() const {}
157 void onBucketInitContenton() const {}
158 void onBusyWaitBucketInit() const {}
159 void onBucketsExhausted() const {}
163 /// SplitListSet traits
168 Hash function converts the key fields of struct \p T stored in the split list
169 into hash value of type \p size_t that is an index in hash table.
170 By default, \p std::hash is used.
172 typedef opt::none hash;
176 The item counting is an important part of \p SplitListSet algorithm:
177 the <tt>empty()</tt> member function depends on correct item counting.
178 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
180 Default is \p cds::atomicity::item_counter.
182 typedef cds::atomicity::item_counter item_counter;
184 /// Bucket table allocator
186 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
188 typedef CDS_DEFAULT_ALLOCATOR allocator;
190 /// Internal statistics (by default, disabled)
192 Possible statistics types are: \p split_list::stat (enable internal statistics),
193 \p split_list::empty_stat (the default, internal statistics disabled),
194 user-provided class that supports \p %split_list::stat interface.
196 typedef split_list::empty_stat stat;
199 /// C++ memory ordering model
201 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
202 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
204 typedef opt::v::relaxed_ordering memory_model;
206 /// What type of bucket table is used
208 \p true - use \p split_list::expandable_bucket_table that can be expanded
209 if the load factor of the set is exhausted.
210 \p false - use \p split_list::static_bucket_table that cannot be expanded
211 and is allocated in \p SplitListSet constructor.
215 static const bool dynamic_bucket_table = true;
217 /// Back-off strategy
218 typedef cds::backoff::Default back_off;
220 /// Padding; default is cache-line padding
222 padding = cds::opt::cache_line_padding
225 /// Free-list of auxiliary nodes
227 The split-list contains auxiliary nodes marked the start of buckets.
228 To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list
232 - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive
233 - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive
235 typedef FreeListImpl free_list;
238 /// [value-option] Split-list dynamic bucket table option
240 The option is used to select bucket table implementation.
241 Possible values of \p Value are:
242 - \p true - select \p expandable_bucket_table
243 - \p false - select \p static_bucket_table
245 template <bool Value>
246 struct dynamic_bucket_table
249 template <typename Base> struct pack: public Base
251 enum { dynamic_bucket_table = Value };
256 /// Metafunction converting option list to \p split_list::traits
258 Available \p Options:
259 - \p opt::hash - mandatory option, specifies hash functor.
260 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
262 - \p opt::memory_model - C++ memory model for atomic operations.
263 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
264 or \p opt::v::sequential_consistent (sequentially consistent memory model).
265 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
266 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
267 Dynamic bucket table expands its size up to maximum bucket count when necessary
268 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
269 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
270 To enable internal statistics use \p split_list::stat.
271 - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding
272 - \p opt::free_list - a free-list implementation, see \p traits::free_list
274 template <typename... Options>
276 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
279 /// Static bucket table
281 Non-resizeable bucket table for \p SplitListSet class.
282 The capacity of table (max bucket count) is defined in the constructor call.
285 - \p GC - garbage collector
286 - \p Node - node type, must be a type based on \p split_list::node
287 - \p Options... - options
290 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
291 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
292 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
293 otherwise \p FreeList.
295 template <typename GC, typename Node, typename... Options>
296 class static_bucket_table
299 struct default_options
301 typedef CDS_DEFAULT_ALLOCATOR allocator;
302 typedef opt::v::relaxed_ordering memory_model;
303 typedef FreeListImpl free_list;
305 typedef typename opt::make_options< default_options, Options... >::type options;
309 typedef GC gc; ///< Garbage collector
310 typedef Node node_type; ///< Bucket node type
311 typedef atomics::atomic<node_type *> table_entry; ///< Table entry type
312 typedef typename options::allocator allocator; ///< allocator
314 /// Bucket table allocator
315 typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator;
317 /// Memory model for atomic operations
318 typedef typename options::memory_model memory_model;
321 typedef typename options::free_list free_list;
325 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
326 const size_t m_nCapacity; ///< Bucket table capacity
327 table_entry * m_Table; ///< Bucket table
329 typedef typename free_list::node free_list_node;
330 union internal_node_type {
332 free_list_node free_node;
334 ~internal_node_type() {} // to block compiler warnings
337 typedef typename allocator::template rebind< internal_node_type >::other aux_node_allocator;
339 internal_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes
340 atomics::atomic<size_t> m_nAuxNodeAllocated; ///< how many auxiliary node allocated
341 free_list m_freeList; ///< Free list
346 void allocate_table()
348 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
349 m_auxNode = aux_node_allocator().allocate( m_nCapacity );
354 m_freeList.clear( []( typename free_list::node* ) {} );
355 aux_node_allocator().deallocate( m_auxNode, m_nCapacity );
356 bucket_table_allocator().Delete( m_Table, m_nCapacity );
361 /// Constructs bucket table for 512K buckets. Load factor is 1.
362 static_bucket_table()
364 , m_nCapacity( 512 * 1024 )
365 , m_nAuxNodeAllocated( 0 )
370 /// Creates the table with specified size rounded up to nearest power-of-two
372 size_t nItemCount, ///< Max expected item count in split-ordered list
373 size_t nLoadFactor ///< Load factor
375 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
376 , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ) )
377 , m_nAuxNodeAllocated( 0 )
379 // m_nCapacity must be power of 2
380 assert( cds::beans::is_power2( m_nCapacity ) );
384 /// Destroys bucket table
385 ~static_bucket_table()
390 /// Returns head node of bucket \p nBucket
391 node_type * bucket( size_t nBucket ) const
393 assert( nBucket < capacity() );
394 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
397 /// Set \p pNode as a head of bucket \p nBucket
398 void bucket( size_t nBucket, node_type * pNode )
400 assert( nBucket < capacity() );
401 assert( bucket( nBucket ) == nullptr );
403 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
406 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
407 node_type* alloc_aux_node()
409 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity() ) {
410 // alloc next free node from m_auxNode
411 size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
412 if ( idx < capacity() )
413 return new( &m_auxNode[idx].node ) node_type;
416 // get from free-list
417 auto pFree = m_freeList.get();
419 return new( pFree ) node_type;
425 /// Places node type to free-list
426 void free_aux_node( node_type* p )
429 m_freeList.put( new(p) free_list_node());
432 /// Returns the capacity of the bucket table
433 size_t capacity() const
438 /// Returns the load factor, i.e. average count of items per bucket
439 size_t load_factor() const
441 return m_nLoadFactor;
445 /// Expandable bucket table
447 This bucket table can dynamically grow its capacity when necessary
448 up to maximum bucket count.
451 - \p GC - garbage collector
452 - \p Node - node type, must be derived from \p split_list::node
453 - \p Options... - options
456 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
457 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
458 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
459 otherwise \p FreeList.
461 template <typename GC, typename Node, typename... Options>
462 class expandable_bucket_table
465 struct default_options
467 typedef CDS_DEFAULT_ALLOCATOR allocator;
468 typedef opt::v::relaxed_ordering memory_model;
469 typedef FreeListImpl free_list;
471 typedef typename opt::make_options< default_options, Options... >::type options;
475 typedef GC gc; ///< Garbage collector
476 typedef Node node_type; ///< Bucket node type
477 typedef typename options::allocator allocator; ///< allocator
479 /// Memory model for atomic operations
480 typedef typename options::memory_model memory_model;
483 typedef typename options::free_list free_list;
487 typedef atomics::atomic<node_type *> table_entry; ///< Table entry type
488 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
490 typedef typename free_list::node free_list_node;
492 union internal_node_type {
494 free_list_node free_node;
496 ~internal_node_type() {} // to block compiler grumble
499 struct aux_node_segment {
500 atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment
501 aux_node_segment* next_segment;
502 // internal_node_type nodes[];
506 , next_segment( nullptr )
509 internal_node_type* segment()
511 return reinterpret_cast<internal_node_type*>( this + 1 );
515 /// Bucket table metrics
517 size_t nSegmentCount; ///< max count of segments in bucket table
518 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
519 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
520 size_t nLoadFactor; ///< load factor
521 size_t nCapacity; ///< max capacity of bucket table
524 : nSegmentCount( 1024 )
525 , nSegmentSize( 512 )
526 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ) )
528 , nCapacity( nSegmentCount * nSegmentSize )
532 /// Bucket table allocator
533 typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator;
535 /// Bucket table segment allocator
536 typedef cds::details::Allocator< table_entry, allocator > segment_allocator;
538 // Aux node segment allocator
539 typedef typename allocator::template rebind<char>::other raw_allocator;
544 /// Constructs bucket table for 512K buckets. Load factor is 1.
545 expandable_bucket_table()
546 : m_metrics( calc_metrics( 512 * 1024, 1 ))
551 /// Creates the table with specified capacity rounded up to nearest power-of-two
552 expandable_bucket_table(
553 size_t nItemCount, ///< Max expected item count in split-ordered list
554 size_t nLoadFactor ///< Load factor
556 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
561 /// Destroys bucket table
562 ~expandable_bucket_table()
564 m_freeList.clear( []( typename free_list::node* ) {} );
566 for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) {
567 auto next_segment = aux_segment->next_segment;
568 free_aux_segment( aux_segment );
569 aux_segment = next_segment;
572 segment_type * pSegments = m_Segments;
573 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
574 table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
575 if ( pEntry != nullptr )
576 destroy_segment( pEntry );
579 destroy_table( pSegments );
582 /// Returns head node of the bucket \p nBucket
583 node_type * bucket( size_t nBucket ) const
585 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
586 assert( nSegment < m_metrics.nSegmentCount );
588 table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
589 if ( pSegment == nullptr )
590 return nullptr; // uninitialized bucket
591 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
594 /// Set \p pNode as a head of bucket \p nBucket
595 void bucket( size_t nBucket, node_type * pNode )
597 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
598 assert( nSegment < m_metrics.nSegmentCount );
600 segment_type& segment = m_Segments[nSegment];
601 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
602 table_entry* pNewSegment = allocate_segment();
603 table_entry * pNull = nullptr;
604 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
605 destroy_segment( pNewSegment );
608 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
611 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
612 node_type* alloc_aux_node()
615 aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_relaxed );
616 assert( aux_segment != nullptr );
618 // try to allocate from current aux segment
619 if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
620 size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
621 if ( idx < m_metrics.nSegmentSize )
622 return new( aux_segment->segment() + idx ) node_type();
625 // try allocate from free-list
626 auto pFree = m_freeList.get();
628 return new( pFree ) node_type;
630 // free-list is empty, current segment is full
631 // try to allocate new aux segment
632 // We can allocate more aux segments than we need but it is not a problem in this context
633 aux_node_segment* new_aux_segment = allocate_aux_segment();
634 new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
635 if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ))
636 return new( new_aux_segment->segment() ) node_type();
638 free_aux_segment( new_aux_segment );
642 /// Places auxiliary node type to free-list
643 void free_aux_node( node_type* p )
646 m_freeList.put( new(p) free_list_node() );
649 /// Returns the capacity of the bucket table
650 size_t capacity() const
652 return m_metrics.nCapacity;
655 /// Returns the load factor, i.e. average count of items per bucket
656 size_t load_factor() const
658 return m_metrics.nLoadFactor;
663 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
667 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
668 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
670 size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
671 if ( nBucketCount <= 2 ) {
675 else if ( nBucketCount <= 1024 ) {
677 m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount );
680 nBucketCount = beans::log2ceil( nBucketCount );
682 m.nSegmentSize = ((size_t)1) << (nBucketCount / 2);
683 if ( nBucketCount & 1 )
685 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
688 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
689 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
690 assert( m.nSegmentSizeLog2 != 0 ); //
694 segment_type * allocate_table()
696 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
699 void destroy_table( segment_type * pTable )
701 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
704 table_entry* allocate_segment()
706 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
709 void destroy_segment( table_entry* pSegment )
711 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
714 aux_node_segment* allocate_aux_segment()
716 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( internal_node_type ) * m_metrics.nSegmentSize );
717 return new(p) aux_node_segment();
720 void free_aux_segment( aux_node_segment* p )
722 raw_allocator().deallocate( reinterpret_cast<char*>( p ), sizeof( aux_node_segment ) + sizeof( internal_node_type ) * m_metrics.nSegmentSize );
727 // m_nSegmentSize must be 2**N
728 assert( cds::beans::is_power2( m_metrics.nSegmentSize ) );
729 assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
731 // m_nSegmentCount must be 2**K
732 assert( cds::beans::is_power2( m_metrics.nSegmentCount ) );
734 m_Segments = allocate_table();
735 m_auxNodeList = allocate_aux_segment();
741 metrics const m_metrics; ///< Dynamic bucket table metrics
742 segment_type* m_Segments; ///< bucket table - array of segments
743 atomics::atomic<aux_node_segment*> m_auxNodeList; ///< segment list of aux nodes
744 free_list m_freeList; ///< List of free aux nodes
748 /// Split-list node traits
750 This traits is intended for converting between underlying ordered list node type
751 and split-list node type
754 - \p BaseNodeTraits - node traits of base ordered list type
756 template <class BaseNodeTraits>
757 struct node_traits: private BaseNodeTraits
759 typedef BaseNodeTraits base_class; ///< Base ordered list node type
760 typedef typename base_class::value_type value_type; ///< Value type
761 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
762 typedef node<base_node_type> node_type; ///< Spit-list node type
764 /// Convert value reference to node pointer
765 static node_type * to_node_ptr( value_type& v )
767 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
770 /// Convert value pointer to node pointer
771 static node_type * to_node_ptr( value_type * v )
773 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
776 /// Convert value reference to node pointer (const version)
777 static node_type const * to_node_ptr( value_type const& v )
779 return static_cast<node_type const*>( base_class::to_node_ptr( v ) );
782 /// Convert value pointer to node pointer (const version)
783 static node_type const * to_node_ptr( value_type const * v )
785 return static_cast<node_type const *>( base_class::to_node_ptr( v ) );
788 /// Convert node reference to value pointer
789 static value_type * to_value_ptr( node_type& n )
791 return base_class::to_value_ptr( static_cast<base_node_type &>( n ) );
794 /// Convert node pointer to value pointer
795 static value_type * to_value_ptr( node_type * n )
797 return base_class::to_value_ptr( static_cast<base_node_type *>( n ) );
800 /// Convert node reference to value pointer (const version)
801 static const value_type * to_value_ptr( node_type const & n )
803 return base_class::to_value_ptr( static_cast<base_node_type const &>( n ) );
806 /// Convert node pointer to value pointer (const version)
807 static const value_type * to_value_ptr( node_type const * n )
809 return base_class::to_value_ptr( static_cast<base_node_type const *>( n ) );
815 template <bool Value, typename GC, typename Node, typename... Options>
816 struct bucket_table_selector;
818 template <typename GC, typename Node, typename... Options>
819 struct bucket_table_selector< true, GC, Node, Options...>
821 typedef expandable_bucket_table<GC, Node, Options...> type;
824 template <typename GC, typename Node, typename... Options>
825 struct bucket_table_selector< false, GC, Node, Options...>
827 typedef static_bucket_table<GC, Node, Options...> type;
830 template <typename Q>
831 struct search_value_type
836 search_value_type( Q& v, size_t h )
842 template <class OrderedList, class Traits>
843 class rebind_list_traits
845 typedef OrderedList native_ordered_list;
846 typedef Traits traits;
848 typedef typename native_ordered_list::gc gc;
849 typedef typename native_ordered_list::key_comparator native_key_comparator;
850 typedef typename native_ordered_list::node_type node_type;
851 typedef typename native_ordered_list::value_type value_type;
852 typedef typename native_ordered_list::node_traits node_traits;
853 typedef typename native_ordered_list::disposer native_disposer;
855 typedef split_list::node<node_type> splitlist_node_type;
858 int operator()( value_type const& v1, value_type const& v2 ) const
860 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v1 ));
861 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v2 ));
862 if ( n1->m_nHash != n2->m_nHash )
863 return n1->m_nHash < n2->m_nHash ? -1 : 1;
865 if ( n1->is_dummy() ) {
866 assert( n2->is_dummy() );
870 assert( !n1->is_dummy() && !n2->is_dummy() );
872 return native_key_comparator()( v1, v2 );
875 template <typename Q>
876 int operator()( value_type const& v, search_value_type<Q> const& q ) const
878 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
879 if ( n->m_nHash != q.nHash )
880 return n->m_nHash < q.nHash ? -1 : 1;
882 assert( !n->is_dummy() );
883 return native_key_comparator()( v, q.val );
886 template <typename Q>
887 int operator()( search_value_type<Q> const& q, value_type const& v ) const
889 return -operator()( v, q );
893 struct wrapped_disposer
895 void operator()( value_type * v )
897 splitlist_node_type * p = static_cast<splitlist_node_type *>( node_traits::to_node_ptr( v ));
898 if ( !p->is_dummy() )
899 native_disposer()( v );
904 template <typename Less>
905 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
907 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
909 template <typename Q>
910 int operator()( value_type const& v, search_value_type<Q> const& q ) const
912 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
913 if ( n->m_nHash != q.nHash )
914 return n->m_nHash < q.nHash ? -1 : 1;
916 assert( !n->is_dummy() );
917 return base_class()( v, q.val );
920 template <typename Q>
921 int operator()( search_value_type<Q> const& q, value_type const& v ) const
923 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
924 if ( n->m_nHash != q.nHash )
925 return q.nHash < n->m_nHash ? -1 : 1;
927 assert( !n->is_dummy() );
928 return base_class()( q.val, v );
931 template <typename Q1, typename Q2>
932 int operator()( Q1 const& v1, Q2 const& v2 ) const
934 return base_class()( v1, v2 );
938 typedef typename native_ordered_list::template rebind_traits<
939 opt::compare< key_compare >
940 ,opt::disposer< wrapped_disposer >
941 ,opt::boundary_node_type< splitlist_node_type >
945 template <typename OrderedList, bool IsConst>
946 struct select_list_iterator;
948 template <typename OrderedList>
949 struct select_list_iterator<OrderedList, false>
951 typedef typename OrderedList::iterator type;
954 template <typename OrderedList>
955 struct select_list_iterator<OrderedList, true>
957 typedef typename OrderedList::const_iterator type;
960 template <typename NodeTraits, typename OrderedList, bool IsConst>
963 typedef OrderedList ordered_list_type;
964 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
967 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
968 typedef NodeTraits node_traits;
971 list_iterator m_itCur;
972 list_iterator m_itEnd;
975 typedef typename list_iterator::value_ptr value_ptr;
976 typedef typename list_iterator::value_ref value_ref;
982 iterator_type( iterator_type const& src )
983 : m_itCur( src.m_itCur )
984 , m_itEnd( src.m_itEnd )
987 // This ctor should be protected...
988 iterator_type( list_iterator itCur, list_iterator itEnd )
993 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() )
997 value_ptr operator ->() const
999 return m_itCur.operator->();
1002 value_ref operator *() const
1004 return m_itCur.operator*();
1008 iterator_type& operator ++()
1010 if ( m_itCur != m_itEnd ) {
1013 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() );
1018 iterator_type& operator = (iterator_type const& src)
1020 m_itCur = src.m_itCur;
1021 m_itEnd = src.m_itEnd;
1026 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1028 return m_itCur == i.m_itCur;
1031 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1033 return m_itCur != i.m_itCur;
1036 } // namespace details
1042 /// Reverses bit order in \p nHash
1043 static inline size_t reverse_bits( size_t nHash )
1045 return bitop::RBO( nHash );
1048 static inline size_t regular_hash( size_t nHash )
1050 return reverse_bits( nHash ) | size_t(1);
1053 static inline size_t dummy_hash( size_t nHash )
1055 return reverse_bits( nHash ) & ~size_t(1);
1059 } // namespace split_list
1062 // Forward declaration
1063 template <class GC, class OrderedList, class Traits = split_list::traits>
1067 }} // namespace cds::intrusive
1069 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H