2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/details/allocator.h>
37 #include <cds/algo/int_algo.h>
38 #include <cds/algo/bitop.h>
39 #include <cds/opt/hash.h>
40 #include <cds/intrusive/free_list_selector.h>
42 namespace cds { namespace intrusive {
44 /// Split-ordered list related definitions
45 /** @ingroup cds_intrusive_helper
47 namespace split_list {
51 size_t m_nHash; ///< Hash value for node
53 /// Default constructor
60 /// Initializes dummy node with \p nHash value
61 hash_node( size_t nHash )
67 /// Checks if the node is dummy node
70 return (m_nHash & 1) == 0;
75 /// Split-ordered list node
78 - \p OrderedListNode - node type for underlying ordered list
80 template <typename OrderedListNode>
81 struct node: public OrderedListNode, public hash_node
84 typedef OrderedListNode base_class;
87 /// Default constructor
94 /// Initializes dummy node with \p nHash value
101 /// Checks if the node is dummy node
102 bool is_dummy() const
104 return hash_node::is_dummy();
111 struct node<void>: public hash_node
120 /// Initializes dummy node with \p nHash value
127 /// Checks if the node is dummy node
128 bool is_dummy() const
130 return hash_node::is_dummy();
135 /// \p SplitListSet internal statistics. May be used for debugging or profiling
137 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
139 template <typename Counter = cds::atomicity::event_counter >
142 typedef Counter counter_type; ///< Counter type
144 counter_type m_nInsertSuccess; ///< Count of success inserting
145 counter_type m_nInsertFailed; ///< Count of failed inserting
146 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
147 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
148 counter_type m_nEraseSuccess; ///< Count of success erasing of items
149 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
150 counter_type m_nExtractSuccess; ///< Count of success extracting of items
151 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
152 counter_type m_nFindSuccess; ///< Count of success finding
153 counter_type m_nFindFailed; ///< Count of failed finding
154 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
155 counter_type m_nHeadNodeFreed; ///< Count of freed head node
156 counter_type m_nBucketCount; ///< Current bucket count
157 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
158 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
159 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
160 counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation
163 void onInsertSuccess() { ++m_nInsertSuccess; }
164 void onInsertFailed() { ++m_nInsertFailed; }
165 void onUpdateNew() { ++m_nUpdateNew; }
166 void onUpdateExist() { ++m_nUpdateExist; }
167 void onEraseSuccess() { ++m_nEraseSuccess; }
168 void onEraseFailed() { ++m_nEraseFailed; }
169 void onExtractSuccess() { ++m_nExtractSuccess; }
170 void onExtractFailed() { ++m_nExtractFailed; }
171 void onFindSuccess() { ++m_nFindSuccess; }
172 void onFindFailed() { ++m_nFindFailed; }
173 bool onFind(bool bSuccess)
181 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
182 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
183 void onNewBucket() { ++m_nBucketCount; }
184 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
185 void onBucketInitContenton() { ++m_nInitBucketContention; }
186 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
187 void onBucketsExhausted() { ++m_nBucketsExhausted; }
191 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
194 void onInsertSuccess() const {}
195 void onInsertFailed() const {}
196 void onUpdateNew() const {}
197 void onUpdateExist() const {}
198 void onEraseSuccess() const {}
199 void onEraseFailed() const {}
200 void onExtractSuccess() const {}
201 void onExtractFailed() const {}
202 void onFindSuccess() const {}
203 void onFindFailed() const {}
204 bool onFind( bool bSuccess ) const { return bSuccess; }
205 void onHeadNodeAllocated() const {}
206 void onHeadNodeFreed() const {}
207 void onNewBucket() const {}
208 void onRecursiveInitBucket() const {}
209 void onBucketInitContenton() const {}
210 void onBusyWaitBucketInit() const {}
211 void onBucketsExhausted() const {}
215 /// SplitListSet traits
220 Hash function converts the key fields of struct \p T stored in the split list
221 into hash value of type \p size_t that is an index in hash table.
222 By default, \p std::hash is used.
224 typedef opt::none hash;
228 The item counting is an important part of \p SplitListSet algorithm:
229 the <tt>empty()</tt> member function depends on correct item counting.
230 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
232 Default is \p cds::atomicity::item_counter.
234 typedef cds::atomicity::item_counter item_counter;
236 /// Bucket table allocator
238 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
240 typedef CDS_DEFAULT_ALLOCATOR allocator;
242 /// Internal statistics (by default, disabled)
244 Possible statistics types are: \p split_list::stat (enable internal statistics),
245 \p split_list::empty_stat (the default, internal statistics disabled),
246 user-provided class that supports \p %split_list::stat interface.
248 typedef split_list::empty_stat stat;
251 /// C++ memory ordering model
253 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
254 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
256 typedef opt::v::relaxed_ordering memory_model;
258 /// What type of bucket table is used
260 \p true - use \p split_list::expandable_bucket_table that can be expanded
261 if the load factor of the set is exhausted.
262 \p false - use \p split_list::static_bucket_table that cannot be expanded
263 and is allocated in \p SplitListSet constructor.
267 static const bool dynamic_bucket_table = true;
269 /// Back-off strategy
270 typedef cds::backoff::Default back_off;
272 /// Padding; default is cache-line padding
274 padding = cds::opt::cache_line_padding
277 /// Free-list of auxiliary nodes
279 The split-list contains auxiliary nodes marked the start of buckets.
280 To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list
284 - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive
285 - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive
287 typedef FreeListImpl free_list;
290 /// [value-option] Split-list dynamic bucket table option
292 The option is used to select bucket table implementation.
293 Possible values of \p Value are:
294 - \p true - select \p expandable_bucket_table
295 - \p false - select \p static_bucket_table
297 template <bool Value>
298 struct dynamic_bucket_table
301 template <typename Base> struct pack: public Base
303 enum { dynamic_bucket_table = Value };
308 /// Metafunction converting option list to \p split_list::traits
310 Available \p Options:
311 - \p opt::hash - mandatory option, specifies hash functor.
312 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
314 - \p opt::memory_model - C++ memory model for atomic operations.
315 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
316 or \p opt::v::sequential_consistent (sequentially consistent memory model).
317 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
318 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
319 Dynamic bucket table expands its size up to maximum bucket count when necessary
320 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
321 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
322 To enable internal statistics use \p split_list::stat.
323 - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding
324 - \p opt::free_list - a free-list implementation, see \p traits::free_list
326 template <typename... Options>
328 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
331 /// Static bucket table
333 Non-resizeable bucket table for \p SplitListSet class.
334 The capacity of table (max bucket count) is defined in the constructor call.
337 - \p GC - garbage collector
338 - \p Node - node type, must be a type based on \p split_list::node
339 - \p Options... - options
342 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
343 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
344 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
345 otherwise \p FreeList.
347 template <typename GC, typename Node, typename... Options>
348 class static_bucket_table
351 struct default_options
353 typedef CDS_DEFAULT_ALLOCATOR allocator;
354 typedef opt::v::relaxed_ordering memory_model;
355 typedef FreeListImpl free_list;
357 typedef typename opt::make_options< default_options, Options... >::type options;
361 typedef GC gc; ///< Garbage collector
362 typedef Node node_type; ///< Bucket node type
363 typedef typename options::allocator allocator; ///< allocator
364 typedef typename options::memory_model memory_model; ///< Memory model for atomic operations
365 typedef typename options::free_list free_list; ///< Free-list
367 /// Auxiliary node type
368 struct aux_node_type: public node_type, public free_list::node
371 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
372 typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
376 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
377 const size_t m_nCapacity; ///< Bucket table capacity
378 table_entry * m_Table; ///< Bucket table
380 typedef typename allocator::template rebind< aux_node_type >::other aux_node_allocator;
382 aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes
383 atomics::atomic<size_t> m_nAuxNodeAllocated; ///< how many auxiliary node allocated
384 free_list m_freeList; ///< Free list
389 void allocate_table()
391 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
392 m_auxNode = aux_node_allocator().allocate( m_nCapacity );
397 m_freeList.clear( []( typename free_list::node* ) {} );
398 aux_node_allocator().deallocate( m_auxNode, m_nCapacity );
399 bucket_table_allocator().Delete( m_Table, m_nCapacity );
404 /// Constructs bucket table for 512K buckets. Load factor is 1.
405 static_bucket_table()
407 , m_nCapacity( 512 * 1024 )
408 , m_nAuxNodeAllocated( 0 )
413 /// Creates the table with specified size rounded up to nearest power-of-two
415 size_t nItemCount, ///< Max expected item count in split-ordered list
416 size_t nLoadFactor ///< Load factor
418 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
419 , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ))
420 , m_nAuxNodeAllocated( 0 )
422 // m_nCapacity must be power of 2
423 assert( cds::beans::is_power2( m_nCapacity ));
427 /// Destroys bucket table
428 ~static_bucket_table()
433 /// Returns head node of bucket \p nBucket
434 aux_node_type * bucket( size_t nBucket ) const
436 assert( nBucket < capacity());
437 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
440 /// Set \p pNode as a head of bucket \p nBucket
441 void bucket( size_t nBucket, aux_node_type * pNode )
443 assert( nBucket < capacity());
444 assert( bucket( nBucket ) == nullptr );
446 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
449 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
450 aux_node_type* alloc_aux_node()
452 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
453 // alloc next free node from m_auxNode
454 size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
455 if ( idx < capacity())
456 return new( &m_auxNode[idx] ) aux_node_type();
459 // get from free-list
460 auto pFree = m_freeList.get();
462 return static_cast<aux_node_type*>( pFree );
468 /// Places node type to free-list
469 void free_aux_node( aux_node_type* p )
471 m_freeList.put( static_cast<aux_node_type*>( p ));
474 /// Returns the capacity of the bucket table
475 size_t capacity() const
480 /// Returns the load factor, i.e. average count of items per bucket
481 size_t load_factor() const
483 return m_nLoadFactor;
487 /// Expandable bucket table
489 This bucket table can dynamically grow its capacity when necessary
490 up to maximum bucket count.
493 - \p GC - garbage collector
494 - \p Node - node type, must be derived from \p split_list::node
495 - \p Options... - options
498 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
499 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
500 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
501 otherwise \p FreeList.
503 template <typename GC, typename Node, typename... Options>
504 class expandable_bucket_table
507 struct default_options
509 typedef CDS_DEFAULT_ALLOCATOR allocator;
510 typedef opt::v::relaxed_ordering memory_model;
511 typedef FreeListImpl free_list;
513 typedef typename opt::make_options< default_options, Options... >::type options;
517 typedef GC gc; ///< Garbage collector
518 typedef Node node_type; ///< Bucket node type
519 typedef typename options::allocator allocator; ///< allocator
521 /// Memory model for atomic operations
522 typedef typename options::memory_model memory_model;
525 typedef typename options::free_list free_list;
527 /// Auxiliary node type
528 class aux_node_type: public node_type, public free_list::node
533 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
534 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
536 struct aux_node_segment {
537 atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment
538 aux_node_segment* next_segment;
539 // aux_node_type nodes[];
543 , next_segment( nullptr )
546 aux_node_type* segment()
548 return reinterpret_cast<aux_node_type*>( this + 1 );
552 /// Bucket table metrics
554 size_t nSegmentCount; ///< max count of segments in bucket table
555 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
556 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
557 size_t nLoadFactor; ///< load factor
558 size_t nCapacity; ///< max capacity of bucket table
561 : nSegmentCount( 1024 )
562 , nSegmentSize( 512 )
563 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ))
565 , nCapacity( nSegmentCount * nSegmentSize )
569 /// Bucket table allocator
570 typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator;
572 /// Bucket table segment allocator
573 typedef cds::details::Allocator< table_entry, allocator > segment_allocator;
575 // Aux node segment allocator
576 typedef typename allocator::template rebind<char>::other raw_allocator;
581 /// Constructs bucket table for 512K buckets. Load factor is 1.
582 expandable_bucket_table()
583 : m_metrics( calc_metrics( 512 * 1024, 1 ))
588 /// Creates the table with specified capacity rounded up to nearest power-of-two
589 expandable_bucket_table(
590 size_t nItemCount, ///< Max expected item count in split-ordered list
591 size_t nLoadFactor ///< Load factor
593 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
598 /// Destroys bucket table
599 ~expandable_bucket_table()
601 m_freeList.clear( []( typename free_list::node* ) {} );
603 for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) {
604 auto next_segment = aux_segment->next_segment;
605 free_aux_segment( aux_segment );
606 aux_segment = next_segment;
609 segment_type * pSegments = m_Segments;
610 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
611 table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
612 if ( pEntry != nullptr )
613 destroy_segment( pEntry );
616 destroy_table( pSegments );
619 /// Returns head node of the bucket \p nBucket
620 aux_node_type * bucket( size_t nBucket ) const
622 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
623 assert( nSegment < m_metrics.nSegmentCount );
625 table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
626 if ( pSegment == nullptr )
627 return nullptr; // uninitialized bucket
628 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
631 /// Set \p pNode as a head of bucket \p nBucket
632 void bucket( size_t nBucket, aux_node_type * pNode )
634 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
635 assert( nSegment < m_metrics.nSegmentCount );
637 segment_type& segment = m_Segments[nSegment];
638 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
639 table_entry* pNewSegment = allocate_segment();
640 table_entry * pNull = nullptr;
641 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
642 destroy_segment( pNewSegment );
645 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
648 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
649 aux_node_type* alloc_aux_node()
652 aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_relaxed );
653 assert( aux_segment != nullptr );
655 // try to allocate from current aux segment
656 if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
657 size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
658 if ( idx < m_metrics.nSegmentSize )
659 return new( aux_segment->segment() + idx ) aux_node_type();
662 // try allocate from free-list
663 auto pFree = m_freeList.get();
665 return static_cast<aux_node_type*>( pFree );
667 // free-list is empty, current segment is full
668 // try to allocate new aux segment
669 // We can allocate more aux segments than we need but it is not a problem in this context
670 aux_node_segment* new_aux_segment = allocate_aux_segment();
671 new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
672 if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ))
673 return new( new_aux_segment->segment()) aux_node_type();
675 free_aux_segment( new_aux_segment );
679 /// Places auxiliary node type to free-list
680 void free_aux_node( aux_node_type* p )
682 m_freeList.put( static_cast<aux_node_type*>( p ));
685 /// Returns the capacity of the bucket table
686 size_t capacity() const
688 return m_metrics.nCapacity;
691 /// Returns the load factor, i.e. average count of items per bucket
692 size_t load_factor() const
694 return m_metrics.nLoadFactor;
699 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
703 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
704 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
706 size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
707 if ( nBucketCount <= 2 ) {
711 else if ( nBucketCount <= 1024 ) {
713 m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount );
716 nBucketCount = beans::log2ceil( nBucketCount );
718 m.nSegmentSize = ((size_t)1) << (nBucketCount / 2);
719 if ( nBucketCount & 1 )
721 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
724 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
725 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
726 assert( m.nSegmentSizeLog2 != 0 ); //
730 segment_type * allocate_table()
732 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
735 void destroy_table( segment_type * pTable )
737 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
740 table_entry* allocate_segment()
742 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
745 void destroy_segment( table_entry* pSegment )
747 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
750 aux_node_segment* allocate_aux_segment()
752 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
753 return new(p) aux_node_segment();
756 void free_aux_segment( aux_node_segment* p )
758 raw_allocator().deallocate( reinterpret_cast<char*>( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
763 // m_nSegmentSize must be 2**N
764 assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
765 assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
767 // m_nSegmentCount must be 2**K
768 assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
770 m_Segments = allocate_table();
771 m_auxNodeList = allocate_aux_segment();
777 metrics const m_metrics; ///< Dynamic bucket table metrics
778 segment_type* m_Segments; ///< bucket table - array of segments
779 atomics::atomic<aux_node_segment*> m_auxNodeList; ///< segment list of aux nodes
780 free_list m_freeList; ///< List of free aux nodes
787 template <bool Value, typename GC, typename Node, typename... Options>
788 struct bucket_table_selector;
790 template <typename GC, typename Node, typename... Options>
791 struct bucket_table_selector< true, GC, Node, Options...>
793 typedef expandable_bucket_table<GC, Node, Options...> type;
796 template <typename GC, typename Node, typename... Options>
797 struct bucket_table_selector< false, GC, Node, Options...>
799 typedef static_bucket_table<GC, Node, Options...> type;
802 template <typename Q>
803 struct search_value_type
808 search_value_type( Q& v, size_t h )
814 template <class OrderedList, class Traits, bool Iterable >
815 class ordered_list_adapter;
817 template <class OrderedList, class Traits>
818 class ordered_list_adapter< OrderedList, Traits, false >
820 typedef OrderedList native_ordered_list;
821 typedef Traits traits;
823 typedef typename native_ordered_list::gc gc;
824 typedef typename native_ordered_list::key_comparator native_key_comparator;
825 typedef typename native_ordered_list::node_type node_type;
826 typedef typename native_ordered_list::value_type value_type;
827 typedef typename native_ordered_list::node_traits native_node_traits;
828 typedef typename native_ordered_list::disposer native_disposer;
830 typedef split_list::node<node_type> splitlist_node_type;
833 int operator()( value_type const& v1, value_type const& v2 ) const
835 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v1 ));
836 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v2 ));
837 if ( n1->m_nHash != n2->m_nHash )
838 return n1->m_nHash < n2->m_nHash ? -1 : 1;
840 if ( n1->is_dummy()) {
841 assert( n2->is_dummy());
845 assert( !n1->is_dummy() && !n2->is_dummy());
847 return native_key_comparator()(v1, v2);
850 template <typename Q>
851 int operator()( value_type const& v, search_value_type<Q> const& q ) const
853 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
854 if ( n->m_nHash != q.nHash )
855 return n->m_nHash < q.nHash ? -1 : 1;
857 assert( !n->is_dummy());
858 return native_key_comparator()(v, q.val);
861 template <typename Q>
862 int operator()( search_value_type<Q> const& q, value_type const& v ) const
864 return -operator()( v, q );
868 struct wrapped_disposer
870 void operator()( value_type * v )
872 splitlist_node_type * p = static_cast<splitlist_node_type *>(native_node_traits::to_node_ptr( v ));
874 native_disposer()(v);
879 typedef node_type ordered_list_node_type;
880 typedef splitlist_node_type aux_node;
882 struct node_traits: private native_node_traits
884 typedef native_node_traits base_class; ///< Base ordered list node type
885 typedef typename base_class::value_type value_type; ///< Value type
886 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
887 typedef node<base_node_type> node_type; ///< Split-list node type
889 /// Convert value reference to node pointer
890 static node_type * to_node_ptr( value_type& v )
892 return static_cast<node_type *>(base_class::to_node_ptr( v ));
895 /// Convert value pointer to node pointer
896 static node_type * to_node_ptr( value_type * v )
898 return static_cast<node_type *>(base_class::to_node_ptr( v ));
901 /// Convert value reference to node pointer (const version)
902 static node_type const * to_node_ptr( value_type const& v )
904 return static_cast<node_type const*>(base_class::to_node_ptr( v ));
907 /// Convert value pointer to node pointer (const version)
908 static node_type const * to_node_ptr( value_type const * v )
910 return static_cast<node_type const *>(base_class::to_node_ptr( v ));
913 /// Convert node reference to value pointer
914 static value_type * to_value_ptr( node_type& n )
916 return base_class::to_value_ptr( static_cast<base_node_type &>(n));
919 /// Convert node pointer to value pointer
920 static value_type * to_value_ptr( node_type * n )
922 return base_class::to_value_ptr( static_cast<base_node_type *>(n));
925 /// Convert node reference to value pointer (const version)
926 static const value_type * to_value_ptr( node_type const & n )
928 return base_class::to_value_ptr( static_cast<base_node_type const &>(n));
931 /// Convert node pointer to value pointer (const version)
932 static const value_type * to_value_ptr( node_type const * n )
934 return base_class::to_value_ptr( static_cast<base_node_type const *>(n));
938 template <typename Less>
939 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
941 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
943 template <typename Q>
944 int operator()( value_type const& v, search_value_type<Q> const& q ) const
946 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
947 if ( n->m_nHash != q.nHash )
948 return n->m_nHash < q.nHash ? -1 : 1;
950 assert( !n->is_dummy());
951 return base_class()(v, q.val);
954 template <typename Q>
955 int operator()( search_value_type<Q> const& q, value_type const& v ) const
957 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
958 if ( n->m_nHash != q.nHash )
959 return q.nHash < n->m_nHash ? -1 : 1;
961 assert( !n->is_dummy());
962 return base_class()(q.val, v);
965 template <typename Q1, typename Q2>
966 int operator()( Q1 const& v1, Q2 const& v2 ) const
968 return base_class()(v1, v2);
972 typedef typename native_ordered_list::template rebind_traits<
973 opt::compare< key_compare >
974 , opt::disposer< wrapped_disposer >
975 , opt::boundary_node_type< splitlist_node_type >
979 template <class OrderedList, class Traits>
980 class ordered_list_adapter< OrderedList, Traits, true >
982 typedef OrderedList native_ordered_list;
983 typedef Traits traits;
985 typedef typename native_ordered_list::gc gc;
986 typedef typename native_ordered_list::key_comparator native_key_comparator;
987 typedef typename native_ordered_list::value_type value_type;
988 typedef typename native_ordered_list::disposer native_disposer;
991 int operator()( value_type const& v1, value_type const& v2 ) const
993 hash_node const& n1 = static_cast<hash_node const&>( v1 );
994 hash_node const& n2 = static_cast<hash_node const&>( v2 );
995 if ( n1.m_nHash != n2.m_nHash )
996 return n1.m_nHash < n2.m_nHash ? -1 : 1;
998 if ( n1.is_dummy()) {
999 assert( n2.is_dummy());
1003 assert( !n1.is_dummy() && !n2.is_dummy());
1005 return native_key_comparator()(v1, v2);
1008 template <typename Q>
1009 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1011 hash_node const& n = static_cast<hash_node const&>( v );
1012 if ( n.m_nHash != q.nHash )
1013 return n.m_nHash < q.nHash ? -1 : 1;
1015 assert( !n.is_dummy());
1016 return native_key_comparator()(v, q.val);
1019 template <typename Q>
1020 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1022 return -operator()( v, q );
1026 struct wrapped_disposer
1028 void operator()( value_type * v )
1030 hash_node* p = static_cast<hash_node*>( v );
1031 if ( !p->is_dummy())
1032 native_disposer()(v);
1037 typedef void ordered_list_node_type;
1039 struct aux_node: public native_ordered_list::node_type, public hash_node
1043 typedef typename native_ordered_list::node_type list_node_type;
1044 list_node_type::data.store( typename list_node_type::marked_data_ptr(
1045 static_cast<value_type*>( static_cast<hash_node *>( this ))),
1046 atomics::memory_order_release
1053 static hash_node * to_node_ptr( value_type& v )
1055 return static_cast<hash_node *>( &v );
1058 static hash_node * to_node_ptr( value_type * v )
1060 return static_cast<hash_node *>( v );
1063 static hash_node const * to_node_ptr( value_type const& v )
1065 return static_cast<hash_node const*>( &v );
1068 static hash_node const * to_node_ptr( value_type const * v )
1070 return static_cast<hash_node const *>( v );
1074 template <typename Less>
1075 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
1077 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
1079 template <typename Q>
1080 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1082 hash_node const& n = static_cast<hash_node const&>( v );
1083 if ( n.m_nHash != q.nHash )
1084 return n.m_nHash < q.nHash ? -1 : 1;
1086 assert( !n.is_dummy());
1087 return base_class()(v, q.val);
1090 template <typename Q>
1091 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1093 hash_node const& n = static_cast<hash_node const&>( v );
1094 if ( n.m_nHash != q.nHash )
1095 return q.nHash < n.m_nHash ? -1 : 1;
1097 assert( !n.is_dummy());
1098 return base_class()(q.val, v);
1101 template <typename Q1, typename Q2>
1102 int operator()( Q1 const& v1, Q2 const& v2 ) const
1104 return base_class()(v1, v2);
1108 typedef typename native_ordered_list::template rebind_traits<
1109 opt::compare< key_compare >
1110 , opt::disposer< wrapped_disposer >
1111 , opt::boundary_node_type< aux_node >
1115 template <class OrderedList, class Traits>
1116 using rebind_list_traits = ordered_list_adapter< OrderedList, Traits, is_iterable_list<OrderedList>::value >;
1118 template <typename OrderedList, bool IsConst>
1119 struct select_list_iterator;
1121 template <typename OrderedList>
1122 struct select_list_iterator<OrderedList, false>
1124 typedef typename OrderedList::iterator type;
1127 template <typename OrderedList>
1128 struct select_list_iterator<OrderedList, true>
1130 typedef typename OrderedList::const_iterator type;
1133 template <typename NodeTraits, typename OrderedList, bool IsConst>
1136 typedef OrderedList ordered_list_type;
1137 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
1140 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
1141 typedef NodeTraits node_traits;
1144 list_iterator m_itCur;
1145 list_iterator m_itEnd;
1148 typedef typename list_iterator::value_ptr value_ptr;
1149 typedef typename list_iterator::value_ref value_ref;
1155 iterator_type( iterator_type const& src )
1156 : m_itCur( src.m_itCur )
1157 , m_itEnd( src.m_itEnd )
1160 // This ctor should be protected...
1161 iterator_type( list_iterator itCur, list_iterator itEnd )
1166 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy())
1170 value_ptr operator ->() const
1172 return m_itCur.operator->();
1175 value_ref operator *() const
1177 return m_itCur.operator*();
1181 iterator_type& operator ++()
1183 if ( m_itCur != m_itEnd ) {
1186 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy());
1191 iterator_type& operator = (iterator_type const& src)
1193 m_itCur = src.m_itCur;
1194 m_itEnd = src.m_itEnd;
1199 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1201 return m_itCur == i.m_itCur;
1204 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1206 return m_itCur != i.m_itCur;
1209 } // namespace details
1215 /// Reverses bit order in \p nHash
1216 static inline size_t reverse_bits( size_t nHash )
1218 return bitop::RBO( nHash );
1221 static inline size_t regular_hash( size_t nHash )
1223 return reverse_bits( nHash ) | size_t(1);
1226 static inline size_t dummy_hash( size_t nHash )
1228 return reverse_bits( nHash ) & ~size_t(1);
1232 } // namespace split_list
1235 // Forward declaration
1236 template <class GC, class OrderedList, class Traits = split_list::traits>
1240 }} // namespace cds::intrusive
1242 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H