2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/details/allocator.h>
37 #include <cds/algo/int_algo.h>
38 #include <cds/algo/bitop.h>
39 #include <cds/opt/hash.h>
40 #include <cds/intrusive/free_list_selector.h>
42 namespace cds { namespace intrusive {
44 /// Split-ordered list related definitions
45 /** @ingroup cds_intrusive_helper
47 namespace split_list {
51 size_t m_nHash; ///< Hash value for node
53 /// Default constructor
60 /// Initializes dummy node with \p nHash value
61 hash_node( size_t nHash )
67 /// Checks if the node is dummy node
70 return (m_nHash & 1) == 0;
75 /// Split-ordered list node
78 - \p OrderedListNode - node type for underlying ordered list
80 template <typename OrderedListNode>
81 struct node: public OrderedListNode, public hash_node
84 typedef OrderedListNode base_class;
87 /// Default constructor
94 /// Initializes dummy node with \p nHash value
101 /// Checks if the node is dummy node
102 bool is_dummy() const
104 return hash_node::is_dummy();
111 struct node<void>: public hash_node
120 /// Initializes dummy node with \p nHash value
127 /// Checks if the node is dummy node
128 bool is_dummy() const
130 return hash_node::is_dummy();
135 /// \p SplitListSet internal statistics. May be used for debugging or profiling
137 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
139 template <typename Counter = cds::atomicity::event_counter >
142 typedef Counter counter_type; ///< Counter type
144 counter_type m_nInsertSuccess; ///< Count of success inserting
145 counter_type m_nInsertFailed; ///< Count of failed inserting
146 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
147 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
148 counter_type m_nEraseSuccess; ///< Count of success erasing of items
149 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
150 counter_type m_nExtractSuccess; ///< Count of success extracting of items
151 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
152 counter_type m_nFindSuccess; ///< Count of success finding
153 counter_type m_nFindFailed; ///< Count of failed finding
154 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
155 counter_type m_nHeadNodeFreed; ///< Count of freed head node
156 counter_type m_nBucketCount; ///< Current bucket count
157 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
158 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
159 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
160 counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation
163 void onInsertSuccess() { ++m_nInsertSuccess; }
164 void onInsertFailed() { ++m_nInsertFailed; }
165 void onUpdateNew() { ++m_nUpdateNew; }
166 void onUpdateExist() { ++m_nUpdateExist; }
167 void onEraseSuccess() { ++m_nEraseSuccess; }
168 void onEraseFailed() { ++m_nEraseFailed; }
169 void onExtractSuccess() { ++m_nExtractSuccess; }
170 void onExtractFailed() { ++m_nExtractFailed; }
171 void onFindSuccess() { ++m_nFindSuccess; }
172 void onFindFailed() { ++m_nFindFailed; }
173 bool onFind(bool bSuccess)
181 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
182 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
183 void onNewBucket() { ++m_nBucketCount; }
184 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
185 void onBucketInitContenton() { ++m_nInitBucketContention; }
186 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
187 void onBucketsExhausted() { ++m_nBucketsExhausted; }
191 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
194 void onInsertSuccess() const {}
195 void onInsertFailed() const {}
196 void onUpdateNew() const {}
197 void onUpdateExist() const {}
198 void onEraseSuccess() const {}
199 void onEraseFailed() const {}
200 void onExtractSuccess() const {}
201 void onExtractFailed() const {}
202 void onFindSuccess() const {}
203 void onFindFailed() const {}
204 bool onFind( bool bSuccess ) const { return bSuccess; }
205 void onHeadNodeAllocated() const {}
206 void onHeadNodeFreed() const {}
207 void onNewBucket() const {}
208 void onRecursiveInitBucket() const {}
209 void onBucketInitContenton() const {}
210 void onBusyWaitBucketInit() const {}
211 void onBucketsExhausted() const {}
215 /// SplitListSet traits
220 Hash function converts the key fields of struct \p T stored in the split list
221 into hash value of type \p size_t that is an index in hash table.
222 By default, \p std::hash is used.
224 typedef opt::none hash;
228 The item counting is an important part of \p SplitListSet algorithm:
229 the <tt>empty()</tt> member function depends on correct item counting.
230 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
232 Default is \p cds::atomicity::item_counter.
234 typedef cds::atomicity::item_counter item_counter;
236 /// Bucket table allocator
238 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
240 typedef CDS_DEFAULT_ALLOCATOR allocator;
242 /// Internal statistics (by default, disabled)
244 Possible statistics types are: \p split_list::stat (enable internal statistics),
245 \p split_list::empty_stat (the default, internal statistics disabled),
246 user-provided class that supports \p %split_list::stat interface.
248 typedef split_list::empty_stat stat;
251 /// C++ memory ordering model
253 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
254 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
256 typedef opt::v::relaxed_ordering memory_model;
258 /// What type of bucket table is used
260 \p true - use \p split_list::expandable_bucket_table that can be expanded
261 if the load factor of the set is exhausted.
262 \p false - use \p split_list::static_bucket_table that cannot be expanded
263 and is allocated in \p SplitListSet constructor.
267 static const bool dynamic_bucket_table = true;
269 /// Back-off strategy
270 typedef cds::backoff::Default back_off;
272 /// Padding; default is cache-line padding
274 padding = cds::opt::cache_line_padding
277 /// Free-list of auxiliary nodes
279 The split-list contains auxiliary nodes marked the start of buckets.
280 To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list
284 - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive
285 - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive
287 typedef FreeListImpl free_list;
290 /// [value-option] Split-list dynamic bucket table option
292 The option is used to select bucket table implementation.
293 Possible values of \p Value are:
294 - \p true - select \p expandable_bucket_table
295 - \p false - select \p static_bucket_table
297 template <bool Value>
298 struct dynamic_bucket_table
301 template <typename Base> struct pack: public Base
303 enum { dynamic_bucket_table = Value };
308 /// Metafunction converting option list to \p split_list::traits
310 Available \p Options:
311 - \p opt::hash - mandatory option, specifies hash functor.
312 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
314 - \p opt::memory_model - C++ memory model for atomic operations.
315 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
316 or \p opt::v::sequential_consistent (sequentially consistent memory model).
317 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
318 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
319 Dynamic bucket table expands its size up to maximum bucket count when necessary
320 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
321 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
322 To enable internal statistics use \p split_list::stat.
323 - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding
324 - \p opt::free_list - a free-list implementation, see \p traits::free_list
326 template <typename... Options>
328 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
331 /// Static bucket table
333 Non-resizeable bucket table for \p SplitListSet class.
334 The capacity of table (max bucket count) is defined in the constructor call.
337 - \p GC - garbage collector
338 - \p Node - node type, must be a type based on \p split_list::node
339 - \p Options... - options
342 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
343 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
344 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
345 otherwise \p FreeList.
347 template <typename GC, typename Node, typename... Options>
348 class static_bucket_table
351 struct default_options
353 typedef CDS_DEFAULT_ALLOCATOR allocator;
354 typedef opt::v::relaxed_ordering memory_model;
355 typedef FreeListImpl free_list;
357 typedef typename opt::make_options< default_options, Options... >::type options;
361 typedef GC gc; ///< Garbage collector
362 typedef Node node_type; ///< Bucket node type
363 typedef typename options::allocator allocator; ///< allocator
364 typedef typename options::memory_model memory_model; ///< Memory model for atomic operations
365 typedef typename options::free_list free_list; ///< Free-list
367 /// Auxiliary node type
368 struct aux_node_type: public node_type, public free_list::node
371 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
372 typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
376 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
377 const size_t m_nCapacity; ///< Bucket table capacity
378 table_entry * m_Table; ///< Bucket table
380 typedef typename allocator::template rebind< aux_node_type >::other aux_node_allocator;
382 aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes
383 atomics::atomic<size_t> m_nAuxNodeAllocated; ///< how many auxiliary node allocated
384 free_list m_freeList; ///< Free list
389 void allocate_table()
391 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
392 m_auxNode = aux_node_allocator().allocate( m_nCapacity );
397 m_freeList.clear( []( typename free_list::node* ) {} );
398 aux_node_allocator().deallocate( m_auxNode, m_nCapacity );
399 bucket_table_allocator().Delete( m_Table, m_nCapacity );
404 /// Constructs bucket table for 512K buckets. Load factor is 1.
405 static_bucket_table()
407 , m_nCapacity( 512 * 1024 )
408 , m_nAuxNodeAllocated( 0 )
413 /// Creates the table with specified size rounded up to nearest power-of-two
415 size_t nItemCount, ///< Max expected item count in split-ordered list
416 size_t nLoadFactor ///< Load factor
418 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
419 , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ))
420 , m_nAuxNodeAllocated( 0 )
422 // m_nCapacity must be power of 2
423 assert( cds::beans::is_power2( m_nCapacity ));
427 /// Destroys bucket table
428 ~static_bucket_table()
433 /// Returns head node of bucket \p nBucket
434 aux_node_type * bucket( size_t nBucket ) const
436 assert( nBucket < capacity());
437 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
440 /// Set \p pNode as a head of bucket \p nBucket
441 void bucket( size_t nBucket, aux_node_type * pNode )
443 assert( nBucket < capacity());
444 assert( bucket( nBucket ) == nullptr );
446 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
449 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
450 aux_node_type* alloc_aux_node()
452 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
453 // alloc next free node from m_auxNode
454 size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
455 if ( idx < capacity())
456 return new( &m_auxNode[idx] ) aux_node_type();
459 // get from free-list
460 auto pFree = m_freeList.get();
462 return static_cast<aux_node_type*>( pFree );
468 /// Places node type to free-list
469 void free_aux_node( aux_node_type* p )
471 m_freeList.put( static_cast<aux_node_type*>( p ));
474 /// Returns the capacity of the bucket table
475 size_t capacity() const
480 /// Returns the load factor, i.e. average count of items per bucket
481 size_t load_factor() const
483 return m_nLoadFactor;
487 /// Expandable bucket table
489 This bucket table can dynamically grow its capacity when necessary
490 up to maximum bucket count.
493 - \p GC - garbage collector
494 - \p Node - node type, must be derived from \p split_list::node
495 - \p Options... - options
498 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
499 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
500 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
501 otherwise \p FreeList.
503 template <typename GC, typename Node, typename... Options>
504 class expandable_bucket_table
507 struct default_options
509 typedef CDS_DEFAULT_ALLOCATOR allocator;
510 typedef opt::v::relaxed_ordering memory_model;
511 typedef FreeListImpl free_list;
513 typedef typename opt::make_options< default_options, Options... >::type options;
517 typedef GC gc; ///< Garbage collector
518 typedef Node node_type; ///< Bucket node type
519 typedef typename options::allocator allocator; ///< allocator
521 /// Memory model for atomic operations
522 typedef typename options::memory_model memory_model;
525 typedef typename options::free_list free_list;
527 /// Auxiliary node type
528 class aux_node_type: public node_type, public free_list::node
533 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
534 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
536 struct aux_node_segment {
537 atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment
538 aux_node_segment* next_segment;
539 // aux_node_type nodes[];
543 , next_segment( nullptr )
546 aux_node_type* segment()
548 return reinterpret_cast<aux_node_type*>( this + 1 );
552 /// Bucket table metrics
554 size_t nSegmentCount; ///< max count of segments in bucket table
555 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
556 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
557 size_t nLoadFactor; ///< load factor
558 size_t nCapacity; ///< max capacity of bucket table
561 : nSegmentCount( 1024 )
562 , nSegmentSize( 512 )
563 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ))
565 , nCapacity( nSegmentCount * nSegmentSize )
569 /// Bucket table allocator
570 typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator;
572 /// Bucket table segment allocator
573 typedef cds::details::Allocator< table_entry, allocator > segment_allocator;
575 // Aux node segment allocator
576 typedef typename allocator::template rebind<char>::other raw_allocator;
581 /// Constructs bucket table for 512K buckets. Load factor is 1.
582 expandable_bucket_table()
583 : m_metrics( calc_metrics( 512 * 1024, 1 ))
588 /// Creates the table with specified capacity rounded up to nearest power-of-two
589 expandable_bucket_table(
590 size_t nItemCount, ///< Max expected item count in split-ordered list
591 size_t nLoadFactor ///< Load factor
593 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
598 /// Destroys bucket table
599 ~expandable_bucket_table()
601 m_freeList.clear( []( typename free_list::node* ) {} );
603 for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) {
604 auto next_segment = aux_segment->next_segment;
605 free_aux_segment( aux_segment );
606 aux_segment = next_segment;
609 segment_type * pSegments = m_Segments;
610 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
611 table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
612 if ( pEntry != nullptr )
613 destroy_segment( pEntry );
616 destroy_table( pSegments );
619 /// Returns head node of the bucket \p nBucket
620 aux_node_type * bucket( size_t nBucket ) const
622 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
623 assert( nSegment < m_metrics.nSegmentCount );
625 table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
626 if ( pSegment == nullptr )
627 return nullptr; // uninitialized bucket
628 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
631 /// Set \p pNode as a head of bucket \p nBucket
632 void bucket( size_t nBucket, aux_node_type * pNode )
634 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
635 assert( nSegment < m_metrics.nSegmentCount );
637 segment_type& segment = m_Segments[nSegment];
638 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
639 table_entry* pNewSegment = allocate_segment();
640 table_entry * pNull = nullptr;
641 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed ))
642 destroy_segment( pNewSegment );
645 assert( segment.load( atomics::memory_order_relaxed )[nBucket & (m_metrics.nSegmentSize - 1)].load( atomics::memory_order_relaxed ) == nullptr );
646 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
649 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
650 aux_node_type* alloc_aux_node()
653 aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_relaxed );
654 assert( aux_segment != nullptr );
656 // try to allocate from current aux segment
657 if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
658 size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
659 if ( idx < m_metrics.nSegmentSize )
660 return new( aux_segment->segment() + idx ) aux_node_type();
663 // try allocate from free-list
664 auto pFree = m_freeList.get();
666 return static_cast<aux_node_type*>( pFree );
668 // free-list is empty, current segment is full
669 // try to allocate new aux segment
670 // We can allocate more aux segments than we need but it is not a problem in this context
671 aux_node_segment* new_aux_segment = allocate_aux_segment();
672 new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
673 if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ))
674 return new( new_aux_segment->segment()) aux_node_type();
676 free_aux_segment( new_aux_segment );
680 /// Places auxiliary node type to free-list
681 void free_aux_node( aux_node_type* p )
683 m_freeList.put( static_cast<aux_node_type*>( p ));
686 /// Returns the capacity of the bucket table
687 size_t capacity() const
689 return m_metrics.nCapacity;
692 /// Returns the load factor, i.e. average count of items per bucket
693 size_t load_factor() const
695 return m_metrics.nLoadFactor;
700 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
704 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
705 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
707 size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
708 if ( nBucketCount <= 2 ) {
712 else if ( nBucketCount <= 1024 ) {
714 m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount );
717 nBucketCount = beans::log2ceil( nBucketCount );
719 m.nSegmentSize = ((size_t)1) << (nBucketCount / 2);
720 if ( nBucketCount & 1 )
722 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
725 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
726 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
727 assert( m.nSegmentSizeLog2 != 0 ); //
731 segment_type * allocate_table()
733 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
736 void destroy_table( segment_type * pTable )
738 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
741 table_entry* allocate_segment()
743 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
746 void destroy_segment( table_entry* pSegment )
748 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
751 aux_node_segment* allocate_aux_segment()
753 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
754 return new(p) aux_node_segment();
757 void free_aux_segment( aux_node_segment* p )
759 raw_allocator().deallocate( reinterpret_cast<char*>( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
764 // m_nSegmentSize must be 2**N
765 assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
766 assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
768 // m_nSegmentCount must be 2**K
769 assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
771 m_Segments = allocate_table();
772 m_auxNodeList = allocate_aux_segment();
778 metrics const m_metrics; ///< Dynamic bucket table metrics
779 segment_type* m_Segments; ///< bucket table - array of segments
780 atomics::atomic<aux_node_segment*> m_auxNodeList; ///< segment list of aux nodes
781 free_list m_freeList; ///< List of free aux nodes
788 template <bool Value, typename GC, typename Node, typename... Options>
789 struct bucket_table_selector;
791 template <typename GC, typename Node, typename... Options>
792 struct bucket_table_selector< true, GC, Node, Options...>
794 typedef expandable_bucket_table<GC, Node, Options...> type;
797 template <typename GC, typename Node, typename... Options>
798 struct bucket_table_selector< false, GC, Node, Options...>
800 typedef static_bucket_table<GC, Node, Options...> type;
803 template <typename Q>
804 struct search_value_type
809 search_value_type( Q& v, size_t h )
815 template <class OrderedList, class Traits, bool Iterable >
816 class ordered_list_adapter;
818 template <class OrderedList, class Traits>
819 class ordered_list_adapter< OrderedList, Traits, false >
821 typedef OrderedList native_ordered_list;
822 typedef Traits traits;
824 typedef typename native_ordered_list::gc gc;
825 typedef typename native_ordered_list::key_comparator native_key_comparator;
826 typedef typename native_ordered_list::node_type node_type;
827 typedef typename native_ordered_list::value_type value_type;
828 typedef typename native_ordered_list::node_traits native_node_traits;
829 typedef typename native_ordered_list::disposer native_disposer;
831 typedef split_list::node<node_type> splitlist_node_type;
834 int operator()( value_type const& v1, value_type const& v2 ) const
836 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v1 ));
837 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v2 ));
838 if ( n1->m_nHash != n2->m_nHash )
839 return n1->m_nHash < n2->m_nHash ? -1 : 1;
841 if ( n1->is_dummy()) {
842 assert( n2->is_dummy());
846 assert( !n1->is_dummy() && !n2->is_dummy());
848 return native_key_comparator()(v1, v2);
851 template <typename Q>
852 int operator()( value_type const& v, search_value_type<Q> const& q ) const
854 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
855 if ( n->m_nHash != q.nHash )
856 return n->m_nHash < q.nHash ? -1 : 1;
858 assert( !n->is_dummy());
859 return native_key_comparator()(v, q.val);
862 template <typename Q>
863 int operator()( search_value_type<Q> const& q, value_type const& v ) const
865 return -operator()( v, q );
869 struct wrapped_disposer
871 void operator()( value_type * v )
873 splitlist_node_type * p = static_cast<splitlist_node_type *>(native_node_traits::to_node_ptr( v ));
875 native_disposer()(v);
880 typedef node_type ordered_list_node_type;
881 typedef splitlist_node_type aux_node;
883 struct node_traits: private native_node_traits
885 typedef native_node_traits base_class; ///< Base ordered list node type
886 typedef typename base_class::value_type value_type; ///< Value type
887 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
888 typedef node<base_node_type> node_type; ///< Split-list node type
890 /// Convert value reference to node pointer
891 static node_type * to_node_ptr( value_type& v )
893 return static_cast<node_type *>(base_class::to_node_ptr( v ));
896 /// Convert value pointer to node pointer
897 static node_type * to_node_ptr( value_type * v )
899 return static_cast<node_type *>(base_class::to_node_ptr( v ));
902 /// Convert value reference to node pointer (const version)
903 static node_type const * to_node_ptr( value_type const& v )
905 return static_cast<node_type const*>(base_class::to_node_ptr( v ));
908 /// Convert value pointer to node pointer (const version)
909 static node_type const * to_node_ptr( value_type const * v )
911 return static_cast<node_type const *>(base_class::to_node_ptr( v ));
914 /// Convert node reference to value pointer
915 static value_type * to_value_ptr( node_type& n )
917 return base_class::to_value_ptr( static_cast<base_node_type &>(n));
920 /// Convert node pointer to value pointer
921 static value_type * to_value_ptr( node_type * n )
923 return base_class::to_value_ptr( static_cast<base_node_type *>(n));
926 /// Convert node reference to value pointer (const version)
927 static const value_type * to_value_ptr( node_type const & n )
929 return base_class::to_value_ptr( static_cast<base_node_type const &>(n));
932 /// Convert node pointer to value pointer (const version)
933 static const value_type * to_value_ptr( node_type const * n )
935 return base_class::to_value_ptr( static_cast<base_node_type const *>(n));
939 template <typename Less>
940 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
942 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
944 template <typename Q>
945 int operator()( value_type const& v, search_value_type<Q> const& q ) const
947 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
948 if ( n->m_nHash != q.nHash )
949 return n->m_nHash < q.nHash ? -1 : 1;
951 assert( !n->is_dummy());
952 return base_class()(v, q.val);
955 template <typename Q>
956 int operator()( search_value_type<Q> const& q, value_type const& v ) const
958 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
959 if ( n->m_nHash != q.nHash )
960 return q.nHash < n->m_nHash ? -1 : 1;
962 assert( !n->is_dummy());
963 return base_class()(q.val, v);
966 int operator()( value_type const& lhs, value_type const& rhs ) const
968 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( lhs ));
969 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( rhs ));
970 if ( n1->m_nHash != n2->m_nHash )
971 return n1->m_nHash < n2->m_nHash ? -1 : 1;
973 if ( n1->is_dummy()) {
974 assert( n2->is_dummy());
978 assert( !n1->is_dummy() && !n2->is_dummy());
980 return native_key_comparator()( lhs, rhs );
984 typedef typename native_ordered_list::template rebind_traits<
985 opt::compare< key_compare >
986 , opt::disposer< wrapped_disposer >
987 , opt::boundary_node_type< splitlist_node_type >
991 template <class OrderedList, class Traits>
992 class ordered_list_adapter< OrderedList, Traits, true >
994 typedef OrderedList native_ordered_list;
995 typedef Traits traits;
997 typedef typename native_ordered_list::gc gc;
998 typedef typename native_ordered_list::key_comparator native_key_comparator;
999 typedef typename native_ordered_list::value_type value_type;
1000 typedef typename native_ordered_list::disposer native_disposer;
1002 struct key_compare {
1003 int operator()( value_type const& v1, value_type const& v2 ) const
1005 hash_node const& n1 = static_cast<hash_node const&>( v1 );
1006 hash_node const& n2 = static_cast<hash_node const&>( v2 );
1007 if ( n1.m_nHash != n2.m_nHash )
1008 return n1.m_nHash < n2.m_nHash ? -1 : 1;
1010 if ( n1.is_dummy()) {
1011 assert( n2.is_dummy());
1015 assert( !n1.is_dummy() && !n2.is_dummy());
1017 return native_key_comparator()(v1, v2);
1020 template <typename Q>
1021 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1023 hash_node const& n = static_cast<hash_node const&>( v );
1024 if ( n.m_nHash != q.nHash )
1025 return n.m_nHash < q.nHash ? -1 : 1;
1027 assert( !n.is_dummy());
1028 return native_key_comparator()(v, q.val);
1031 template <typename Q>
1032 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1034 return -operator()( v, q );
1038 struct wrapped_disposer
1040 void operator()( value_type * v )
1042 if ( !static_cast<hash_node*>( v )->is_dummy())
1043 native_disposer()( v );
1048 typedef void ordered_list_node_type;
1050 struct aux_node: public native_ordered_list::node_type, public hash_node
1054 typedef typename native_ordered_list::node_type list_node_type;
1056 list_node_type::data.store( typename list_node_type::marked_data_ptr(
1057 static_cast<value_type*>( static_cast<hash_node *>( this ))),
1058 atomics::memory_order_release
1065 static hash_node * to_node_ptr( value_type& v )
1067 return static_cast<hash_node *>( &v );
1070 static hash_node * to_node_ptr( value_type * v )
1072 return static_cast<hash_node *>( v );
1075 static hash_node const * to_node_ptr( value_type const& v )
1077 return static_cast<hash_node const*>( &v );
1080 static hash_node const * to_node_ptr( value_type const * v )
1082 return static_cast<hash_node const *>( v );
1086 template <typename Less>
1087 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
1089 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
1091 template <typename Q>
1092 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1094 hash_node const& n = static_cast<hash_node const&>( v );
1095 if ( n.m_nHash != q.nHash )
1096 return n.m_nHash < q.nHash ? -1 : 1;
1098 assert( !n.is_dummy());
1099 return base_class()(v, q.val);
1102 template <typename Q>
1103 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1105 hash_node const& n = static_cast<hash_node const&>( v );
1106 if ( n.m_nHash != q.nHash )
1107 return q.nHash < n.m_nHash ? -1 : 1;
1109 assert( !n.is_dummy());
1110 return base_class()(q.val, v);
1113 int operator()( value_type const& lhs, value_type const& rhs ) const
1115 hash_node const& n1 = static_cast<hash_node const&>( lhs );
1116 hash_node const& n2 = static_cast<hash_node const&>( rhs );
1117 if ( n1.m_nHash != n2.m_nHash )
1118 return n1.m_nHash < n2.m_nHash ? -1 : 1;
1120 if ( n1.is_dummy()) {
1121 assert( n2.is_dummy());
1125 assert( !n1.is_dummy() && !n2.is_dummy());
1127 return base_class()( lhs, rhs );
1131 typedef typename native_ordered_list::template rebind_traits<
1132 opt::compare< key_compare >
1133 , opt::disposer< wrapped_disposer >
1134 , opt::boundary_node_type< aux_node >
1138 template <class OrderedList, class Traits>
1139 using rebind_list_traits = ordered_list_adapter< OrderedList, Traits, is_iterable_list<OrderedList>::value >;
1141 template <typename OrderedList, bool IsConst>
1142 struct select_list_iterator;
1144 template <typename OrderedList>
1145 struct select_list_iterator<OrderedList, false>
1147 typedef typename OrderedList::iterator type;
1150 template <typename OrderedList>
1151 struct select_list_iterator<OrderedList, true>
1153 typedef typename OrderedList::const_iterator type;
1156 template <typename NodeTraits, typename OrderedList, bool IsConst>
1159 typedef OrderedList ordered_list_type;
1160 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
1163 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
1164 typedef NodeTraits node_traits;
1167 list_iterator m_itCur;
1168 list_iterator m_itEnd;
1171 typedef typename list_iterator::value_ptr value_ptr;
1172 typedef typename list_iterator::value_ref value_ref;
1178 iterator_type( iterator_type const& src )
1179 : m_itCur( src.m_itCur )
1180 , m_itEnd( src.m_itEnd )
1183 // This ctor should be protected...
1184 iterator_type( list_iterator itCur, list_iterator itEnd )
1189 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy())
1193 value_ptr operator ->() const
1195 return m_itCur.operator->();
1198 value_ref operator *() const
1200 return m_itCur.operator*();
1204 iterator_type& operator ++()
1206 if ( m_itCur != m_itEnd ) {
1209 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy());
1214 iterator_type& operator = (iterator_type const& src)
1216 m_itCur = src.m_itCur;
1217 m_itEnd = src.m_itEnd;
1222 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1224 return m_itCur == i.m_itCur;
1227 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1229 return m_itCur != i.m_itCur;
1232 } // namespace details
1238 /// Reverses bit order in \p nHash
1239 static inline size_t reverse_bits( size_t nHash )
1241 return bitop::RBO( nHash );
1244 static inline size_t regular_hash( size_t nHash )
1246 return reverse_bits( nHash ) | size_t(1);
1249 static inline size_t dummy_hash( size_t nHash )
1251 return reverse_bits( nHash ) & ~size_t(1);
1255 } // namespace split_list
1258 // Forward declaration
1259 template <class GC, class OrderedList, class Traits = split_list::traits>
1263 }} // namespace cds::intrusive
1265 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H