2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/details/allocator.h>
37 #include <cds/algo/int_algo.h>
38 #include <cds/algo/bitop.h>
39 #include <cds/opt/hash.h>
40 #include <cds/intrusive/free_list_selector.h>
42 namespace cds { namespace intrusive {
44 /// Split-ordered list related definitions
45 /** @ingroup cds_intrusive_helper
47 namespace split_list {
48 /// Split-ordered list node
51 - \p OrderedListNode - node type for underlying ordered list
53 template <typename OrderedListNode>
54 struct node: public OrderedListNode
57 typedef OrderedListNode base_class;
60 size_t m_nHash ; ///< Hash value for node
62 /// Default constructor
69 /// Initializes dummy node with \p nHash value
76 /// Checks if the node is dummy node
79 return (m_nHash & 1) == 0;
83 /// \p SplitListSet internal statistics. May be used for debugging or profiling
85 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
87 template <typename Counter = cds::atomicity::event_counter >
90 typedef Counter counter_type; ///< Counter type
92 counter_type m_nInsertSuccess; ///< Count of success inserting
93 counter_type m_nInsertFailed; ///< Count of failed inserting
94 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
95 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
96 counter_type m_nEraseSuccess; ///< Count of success erasing of items
97 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
98 counter_type m_nExtractSuccess; ///< Count of success extracting of items
99 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
100 counter_type m_nFindSuccess; ///< Count of success finding
101 counter_type m_nFindFailed; ///< Count of failed finding
102 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
103 counter_type m_nHeadNodeFreed; ///< Count of freed head node
104 counter_type m_nBucketCount; ///< Current bucket count
105 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
106 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
107 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
108 counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation
111 void onInsertSuccess() { ++m_nInsertSuccess; }
112 void onInsertFailed() { ++m_nInsertFailed; }
113 void onUpdateNew() { ++m_nUpdateNew; }
114 void onUpdateExist() { ++m_nUpdateExist; }
115 void onEraseSuccess() { ++m_nEraseSuccess; }
116 void onEraseFailed() { ++m_nEraseFailed; }
117 void onExtractSuccess() { ++m_nExtractSuccess; }
118 void onExtractFailed() { ++m_nExtractFailed; }
119 void onFindSuccess() { ++m_nFindSuccess; }
120 void onFindFailed() { ++m_nFindFailed; }
121 bool onFind(bool bSuccess)
129 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
130 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
131 void onNewBucket() { ++m_nBucketCount; }
132 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
133 void onBucketInitContenton() { ++m_nInitBucketContention; }
134 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
135 void onBucketsExhausted() { ++m_nBucketsExhausted; }
139 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
142 void onInsertSuccess() const {}
143 void onInsertFailed() const {}
144 void onUpdateNew() const {}
145 void onUpdateExist() const {}
146 void onEraseSuccess() const {}
147 void onEraseFailed() const {}
148 void onExtractSuccess() const {}
149 void onExtractFailed() const {}
150 void onFindSuccess() const {}
151 void onFindFailed() const {}
152 bool onFind( bool bSuccess ) const { return bSuccess; }
153 void onHeadNodeAllocated() const {}
154 void onHeadNodeFreed() const {}
155 void onNewBucket() const {}
156 void onRecursiveInitBucket() const {}
157 void onBucketInitContenton() const {}
158 void onBusyWaitBucketInit() const {}
159 void onBucketsExhausted() const {}
163 /// SplitListSet traits
168 Hash function converts the key fields of struct \p T stored in the split list
169 into hash value of type \p size_t that is an index in hash table.
170 By default, \p std::hash is used.
172 typedef opt::none hash;
176 The item counting is an important part of \p SplitListSet algorithm:
177 the <tt>empty()</tt> member function depends on correct item counting.
178 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
180 Default is \p cds::atomicity::item_counter.
182 typedef cds::atomicity::item_counter item_counter;
184 /// Bucket table allocator
186 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
188 typedef CDS_DEFAULT_ALLOCATOR allocator;
190 /// Internal statistics (by default, disabled)
192 Possible statistics types are: \p split_list::stat (enable internal statistics),
193 \p split_list::empty_stat (the default, internal statistics disabled),
194 user-provided class that supports \p %split_list::stat interface.
196 typedef split_list::empty_stat stat;
199 /// C++ memory ordering model
201 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
202 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
204 typedef opt::v::relaxed_ordering memory_model;
206 /// What type of bucket table is used
208 \p true - use \p split_list::expandable_bucket_table that can be expanded
209 if the load factor of the set is exhausted.
210 \p false - use \p split_list::static_bucket_table that cannot be expanded
211 and is allocated in \p SplitListSet constructor.
215 static const bool dynamic_bucket_table = true;
217 /// Back-off strategy
218 typedef cds::backoff::Default back_off;
220 /// Padding; default is cache-line padding
222 padding = cds::opt::cache_line_padding
225 /// Free-list of auxiliary nodes
227 The split-list contains auxiliary nodes marked the start of buckets.
228 To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list
232 - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive
233 - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive
235 typedef FreeListImpl free_list;
238 /// [value-option] Split-list dynamic bucket table option
240 The option is used to select bucket table implementation.
241 Possible values of \p Value are:
242 - \p true - select \p expandable_bucket_table
243 - \p false - select \p static_bucket_table
245 template <bool Value>
246 struct dynamic_bucket_table
249 template <typename Base> struct pack: public Base
251 enum { dynamic_bucket_table = Value };
256 /// Metafunction converting option list to \p split_list::traits
258 Available \p Options:
259 - \p opt::hash - mandatory option, specifies hash functor.
260 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
262 - \p opt::memory_model - C++ memory model for atomic operations.
263 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
264 or \p opt::v::sequential_consistent (sequentially consistent memory model).
265 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
266 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
267 Dynamic bucket table expands its size up to maximum bucket count when necessary
268 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
269 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
270 To enable internal statistics use \p split_list::stat.
271 - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding
272 - \p opt::free_list - a free-list implementation, see \p traits::free_list
274 template <typename... Options>
276 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
279 /// Static bucket table
281 Non-resizeable bucket table for \p SplitListSet class.
282 The capacity of table (max bucket count) is defined in the constructor call.
285 - \p GC - garbage collector
286 - \p Node - node type, must be a type based on \p split_list::node
287 - \p Options... - options
290 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
291 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
292 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
293 otherwise \p FreeList.
295 template <typename GC, typename Node, typename... Options>
296 class static_bucket_table
299 struct default_options
301 typedef CDS_DEFAULT_ALLOCATOR allocator;
302 typedef opt::v::relaxed_ordering memory_model;
303 typedef FreeListImpl free_list;
305 typedef typename opt::make_options< default_options, Options... >::type options;
309 typedef GC gc; ///< Garbage collector
310 typedef Node node_type; ///< Bucket node type
311 typedef atomics::atomic<node_type *> table_entry; ///< Table entry type
312 typedef typename options::allocator allocator; ///< allocator
314 /// Bucket table allocator
315 typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator;
317 /// Memory model for atomic operations
318 typedef typename options::memory_model memory_model;
321 typedef typename options::free_list free_list;
325 struct aux_node_type: public node_type, public free_list::node
328 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
329 const size_t m_nCapacity; ///< Bucket table capacity
330 table_entry * m_Table; ///< Bucket table
332 typedef typename allocator::template rebind< aux_node_type >::other aux_node_allocator;
334 aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes
335 atomics::atomic<size_t> m_nAuxNodeAllocated; ///< how many auxiliary node allocated
336 free_list m_freeList; ///< Free list
341 void allocate_table()
343 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
344 m_auxNode = aux_node_allocator().allocate( m_nCapacity );
349 m_freeList.clear( []( typename free_list::node* ) {} );
350 aux_node_allocator().deallocate( m_auxNode, m_nCapacity );
351 bucket_table_allocator().Delete( m_Table, m_nCapacity );
356 /// Constructs bucket table for 512K buckets. Load factor is 1.
357 static_bucket_table()
359 , m_nCapacity( 512 * 1024 )
360 , m_nAuxNodeAllocated( 0 )
365 /// Creates the table with specified size rounded up to nearest power-of-two
367 size_t nItemCount, ///< Max expected item count in split-ordered list
368 size_t nLoadFactor ///< Load factor
370 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
371 , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ) )
372 , m_nAuxNodeAllocated( 0 )
374 // m_nCapacity must be power of 2
375 assert( cds::beans::is_power2( m_nCapacity ) );
379 /// Destroys bucket table
380 ~static_bucket_table()
385 /// Returns head node of bucket \p nBucket
386 node_type * bucket( size_t nBucket ) const
388 assert( nBucket < capacity() );
389 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
392 /// Set \p pNode as a head of bucket \p nBucket
393 void bucket( size_t nBucket, node_type * pNode )
395 assert( nBucket < capacity() );
396 assert( bucket( nBucket ) == nullptr );
398 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
401 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
402 node_type* alloc_aux_node()
404 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity() ) {
405 // alloc next free node from m_auxNode
406 size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
407 if ( idx < capacity() )
408 return new( &m_auxNode[idx] ) aux_node_type();
411 // get from free-list
412 auto pFree = m_freeList.get();
414 return static_cast<aux_node_type*>( pFree );
420 /// Places node type to free-list
421 void free_aux_node( node_type* p )
423 m_freeList.put( static_cast<aux_node_type*>( p ));
426 /// Returns the capacity of the bucket table
427 size_t capacity() const
432 /// Returns the load factor, i.e. average count of items per bucket
433 size_t load_factor() const
435 return m_nLoadFactor;
439 /// Expandable bucket table
441 This bucket table can dynamically grow its capacity when necessary
442 up to maximum bucket count.
445 - \p GC - garbage collector
446 - \p Node - node type, must be derived from \p split_list::node
447 - \p Options... - options
450 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
451 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
452 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
453 otherwise \p FreeList.
455 template <typename GC, typename Node, typename... Options>
456 class expandable_bucket_table
459 struct default_options
461 typedef CDS_DEFAULT_ALLOCATOR allocator;
462 typedef opt::v::relaxed_ordering memory_model;
463 typedef FreeListImpl free_list;
465 typedef typename opt::make_options< default_options, Options... >::type options;
469 typedef GC gc; ///< Garbage collector
470 typedef Node node_type; ///< Bucket node type
471 typedef typename options::allocator allocator; ///< allocator
473 /// Memory model for atomic operations
474 typedef typename options::memory_model memory_model;
477 typedef typename options::free_list free_list;
481 typedef atomics::atomic<node_type *> table_entry; ///< Table entry type
482 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
484 class aux_node_type: public node_type, public free_list::node
487 struct aux_node_segment {
488 atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment
489 aux_node_segment* next_segment;
490 // aux_node_type nodes[];
494 , next_segment( nullptr )
497 aux_node_type* segment()
499 return reinterpret_cast<aux_node_type*>( this + 1 );
503 /// Bucket table metrics
505 size_t nSegmentCount; ///< max count of segments in bucket table
506 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
507 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
508 size_t nLoadFactor; ///< load factor
509 size_t nCapacity; ///< max capacity of bucket table
512 : nSegmentCount( 1024 )
513 , nSegmentSize( 512 )
514 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ) )
516 , nCapacity( nSegmentCount * nSegmentSize )
520 /// Bucket table allocator
521 typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator;
523 /// Bucket table segment allocator
524 typedef cds::details::Allocator< table_entry, allocator > segment_allocator;
526 // Aux node segment allocator
527 typedef typename allocator::template rebind<char>::other raw_allocator;
532 /// Constructs bucket table for 512K buckets. Load factor is 1.
533 expandable_bucket_table()
534 : m_metrics( calc_metrics( 512 * 1024, 1 ))
539 /// Creates the table with specified capacity rounded up to nearest power-of-two
540 expandable_bucket_table(
541 size_t nItemCount, ///< Max expected item count in split-ordered list
542 size_t nLoadFactor ///< Load factor
544 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
549 /// Destroys bucket table
550 ~expandable_bucket_table()
552 m_freeList.clear( []( typename free_list::node* ) {} );
554 for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) {
555 auto next_segment = aux_segment->next_segment;
556 free_aux_segment( aux_segment );
557 aux_segment = next_segment;
560 segment_type * pSegments = m_Segments;
561 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
562 table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
563 if ( pEntry != nullptr )
564 destroy_segment( pEntry );
567 destroy_table( pSegments );
570 /// Returns head node of the bucket \p nBucket
571 node_type * bucket( size_t nBucket ) const
573 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
574 assert( nSegment < m_metrics.nSegmentCount );
576 table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
577 if ( pSegment == nullptr )
578 return nullptr; // uninitialized bucket
579 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
582 /// Set \p pNode as a head of bucket \p nBucket
583 void bucket( size_t nBucket, node_type * pNode )
585 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
586 assert( nSegment < m_metrics.nSegmentCount );
588 segment_type& segment = m_Segments[nSegment];
589 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
590 table_entry* pNewSegment = allocate_segment();
591 table_entry * pNull = nullptr;
592 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
593 destroy_segment( pNewSegment );
596 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
599 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
600 node_type* alloc_aux_node()
603 aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_relaxed );
604 assert( aux_segment != nullptr );
606 // try to allocate from current aux segment
607 if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
608 size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
609 if ( idx < m_metrics.nSegmentSize )
610 return new( aux_segment->segment() + idx ) aux_node_type();
613 // try allocate from free-list
614 auto pFree = m_freeList.get();
616 return static_cast<aux_node_type*>( pFree );
618 // free-list is empty, current segment is full
619 // try to allocate new aux segment
620 // We can allocate more aux segments than we need but it is not a problem in this context
621 aux_node_segment* new_aux_segment = allocate_aux_segment();
622 new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
623 if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ))
624 return new( new_aux_segment->segment() ) aux_node_type();
626 free_aux_segment( new_aux_segment );
630 /// Places auxiliary node type to free-list
631 void free_aux_node( node_type* p )
633 m_freeList.put( static_cast<aux_node_type*>( p ));
636 /// Returns the capacity of the bucket table
637 size_t capacity() const
639 return m_metrics.nCapacity;
642 /// Returns the load factor, i.e. average count of items per bucket
643 size_t load_factor() const
645 return m_metrics.nLoadFactor;
650 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
654 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
655 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
657 size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
658 if ( nBucketCount <= 2 ) {
662 else if ( nBucketCount <= 1024 ) {
664 m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount );
667 nBucketCount = beans::log2ceil( nBucketCount );
669 m.nSegmentSize = ((size_t)1) << (nBucketCount / 2);
670 if ( nBucketCount & 1 )
672 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
675 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
676 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
677 assert( m.nSegmentSizeLog2 != 0 ); //
681 segment_type * allocate_table()
683 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
686 void destroy_table( segment_type * pTable )
688 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
691 table_entry* allocate_segment()
693 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
696 void destroy_segment( table_entry* pSegment )
698 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
701 aux_node_segment* allocate_aux_segment()
703 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
704 return new(p) aux_node_segment();
707 void free_aux_segment( aux_node_segment* p )
709 raw_allocator().deallocate( reinterpret_cast<char*>( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
714 // m_nSegmentSize must be 2**N
715 assert( cds::beans::is_power2( m_metrics.nSegmentSize ) );
716 assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
718 // m_nSegmentCount must be 2**K
719 assert( cds::beans::is_power2( m_metrics.nSegmentCount ) );
721 m_Segments = allocate_table();
722 m_auxNodeList = allocate_aux_segment();
728 metrics const m_metrics; ///< Dynamic bucket table metrics
729 segment_type* m_Segments; ///< bucket table - array of segments
730 atomics::atomic<aux_node_segment*> m_auxNodeList; ///< segment list of aux nodes
731 free_list m_freeList; ///< List of free aux nodes
735 /// Split-list node traits
737 This traits is intended for converting between underlying ordered list node type
738 and split-list node type
741 - \p BaseNodeTraits - node traits of base ordered list type
743 template <class BaseNodeTraits>
744 struct node_traits: private BaseNodeTraits
746 typedef BaseNodeTraits base_class; ///< Base ordered list node type
747 typedef typename base_class::value_type value_type; ///< Value type
748 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
749 typedef node<base_node_type> node_type; ///< Spit-list node type
751 /// Convert value reference to node pointer
752 static node_type * to_node_ptr( value_type& v )
754 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
757 /// Convert value pointer to node pointer
758 static node_type * to_node_ptr( value_type * v )
760 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
763 /// Convert value reference to node pointer (const version)
764 static node_type const * to_node_ptr( value_type const& v )
766 return static_cast<node_type const*>( base_class::to_node_ptr( v ) );
769 /// Convert value pointer to node pointer (const version)
770 static node_type const * to_node_ptr( value_type const * v )
772 return static_cast<node_type const *>( base_class::to_node_ptr( v ) );
775 /// Convert node reference to value pointer
776 static value_type * to_value_ptr( node_type& n )
778 return base_class::to_value_ptr( static_cast<base_node_type &>( n ) );
781 /// Convert node pointer to value pointer
782 static value_type * to_value_ptr( node_type * n )
784 return base_class::to_value_ptr( static_cast<base_node_type *>( n ) );
787 /// Convert node reference to value pointer (const version)
788 static const value_type * to_value_ptr( node_type const & n )
790 return base_class::to_value_ptr( static_cast<base_node_type const &>( n ) );
793 /// Convert node pointer to value pointer (const version)
794 static const value_type * to_value_ptr( node_type const * n )
796 return base_class::to_value_ptr( static_cast<base_node_type const *>( n ) );
802 template <bool Value, typename GC, typename Node, typename... Options>
803 struct bucket_table_selector;
805 template <typename GC, typename Node, typename... Options>
806 struct bucket_table_selector< true, GC, Node, Options...>
808 typedef expandable_bucket_table<GC, Node, Options...> type;
811 template <typename GC, typename Node, typename... Options>
812 struct bucket_table_selector< false, GC, Node, Options...>
814 typedef static_bucket_table<GC, Node, Options...> type;
817 template <typename Q>
818 struct search_value_type
823 search_value_type( Q& v, size_t h )
829 template <class OrderedList, class Traits>
830 class rebind_list_traits
832 typedef OrderedList native_ordered_list;
833 typedef Traits traits;
835 typedef typename native_ordered_list::gc gc;
836 typedef typename native_ordered_list::key_comparator native_key_comparator;
837 typedef typename native_ordered_list::node_type node_type;
838 typedef typename native_ordered_list::value_type value_type;
839 typedef typename native_ordered_list::node_traits node_traits;
840 typedef typename native_ordered_list::disposer native_disposer;
842 typedef split_list::node<node_type> splitlist_node_type;
845 int operator()( value_type const& v1, value_type const& v2 ) const
847 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v1 ));
848 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v2 ));
849 if ( n1->m_nHash != n2->m_nHash )
850 return n1->m_nHash < n2->m_nHash ? -1 : 1;
852 if ( n1->is_dummy() ) {
853 assert( n2->is_dummy() );
857 assert( !n1->is_dummy() && !n2->is_dummy() );
859 return native_key_comparator()( v1, v2 );
862 template <typename Q>
863 int operator()( value_type const& v, search_value_type<Q> const& q ) const
865 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
866 if ( n->m_nHash != q.nHash )
867 return n->m_nHash < q.nHash ? -1 : 1;
869 assert( !n->is_dummy() );
870 return native_key_comparator()( v, q.val );
873 template <typename Q>
874 int operator()( search_value_type<Q> const& q, value_type const& v ) const
876 return -operator()( v, q );
880 struct wrapped_disposer
882 void operator()( value_type * v )
884 splitlist_node_type * p = static_cast<splitlist_node_type *>( node_traits::to_node_ptr( v ));
885 if ( !p->is_dummy() )
886 native_disposer()( v );
891 template <typename Less>
892 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
894 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
896 template <typename Q>
897 int operator()( value_type const& v, search_value_type<Q> const& q ) const
899 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
900 if ( n->m_nHash != q.nHash )
901 return n->m_nHash < q.nHash ? -1 : 1;
903 assert( !n->is_dummy() );
904 return base_class()( v, q.val );
907 template <typename Q>
908 int operator()( search_value_type<Q> const& q, value_type const& v ) const
910 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
911 if ( n->m_nHash != q.nHash )
912 return q.nHash < n->m_nHash ? -1 : 1;
914 assert( !n->is_dummy() );
915 return base_class()( q.val, v );
918 template <typename Q1, typename Q2>
919 int operator()( Q1 const& v1, Q2 const& v2 ) const
921 return base_class()( v1, v2 );
925 typedef typename native_ordered_list::template rebind_traits<
926 opt::compare< key_compare >
927 ,opt::disposer< wrapped_disposer >
928 ,opt::boundary_node_type< splitlist_node_type >
932 template <typename OrderedList, bool IsConst>
933 struct select_list_iterator;
935 template <typename OrderedList>
936 struct select_list_iterator<OrderedList, false>
938 typedef typename OrderedList::iterator type;
941 template <typename OrderedList>
942 struct select_list_iterator<OrderedList, true>
944 typedef typename OrderedList::const_iterator type;
947 template <typename NodeTraits, typename OrderedList, bool IsConst>
950 typedef OrderedList ordered_list_type;
951 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
954 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
955 typedef NodeTraits node_traits;
958 list_iterator m_itCur;
959 list_iterator m_itEnd;
962 typedef typename list_iterator::value_ptr value_ptr;
963 typedef typename list_iterator::value_ref value_ref;
969 iterator_type( iterator_type const& src )
970 : m_itCur( src.m_itCur )
971 , m_itEnd( src.m_itEnd )
974 // This ctor should be protected...
975 iterator_type( list_iterator itCur, list_iterator itEnd )
980 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() )
984 value_ptr operator ->() const
986 return m_itCur.operator->();
989 value_ref operator *() const
991 return m_itCur.operator*();
995 iterator_type& operator ++()
997 if ( m_itCur != m_itEnd ) {
1000 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() );
1005 iterator_type& operator = (iterator_type const& src)
1007 m_itCur = src.m_itCur;
1008 m_itEnd = src.m_itEnd;
1013 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1015 return m_itCur == i.m_itCur;
1018 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1020 return m_itCur != i.m_itCur;
1023 } // namespace details
1029 /// Reverses bit order in \p nHash
1030 static inline size_t reverse_bits( size_t nHash )
1032 return bitop::RBO( nHash );
1035 static inline size_t regular_hash( size_t nHash )
1037 return reverse_bits( nHash ) | size_t(1);
1040 static inline size_t dummy_hash( size_t nHash )
1042 return reverse_bits( nHash ) & ~size_t(1);
1046 } // namespace split_list
1049 // Forward declaration
1050 template <class GC, class OrderedList, class Traits = split_list::traits>
1054 }} // namespace cds::intrusive
1056 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H