+ void free_aux_node( aux_node_type * p )
+ {
+ m_Buckets.free_aux_node( p );
+ m_Stat.onHeadNodeFreed();
+ }
+
+ /// Calculates hash value of \p key
+ template <typename Q>
+ size_t hash_value( Q const& key ) const
+ {
+ return m_HashFunctor( key );
+ }
+
+ size_t bucket_no( size_t nHash ) const
+ {
+ return nHash & ((1 << m_nBucketCountLog2.load( memory_model::memory_order_relaxed )) - 1);
+ }
+
+ static size_t parent_bucket( size_t nBucket )
+ {
+ assert( nBucket > 0 );
+ return nBucket & ~(1 << bitop::MSBnz( nBucket ));
+ }
+
+ aux_node_type * init_bucket( size_t const nBucket )
+ {
+ assert( nBucket > 0 );
+ size_t nParent = parent_bucket( nBucket );
+
+ aux_node_type * pParentBucket = m_Buckets.bucket( nParent );
+ if ( pParentBucket == nullptr ) {
+ pParentBucket = init_bucket( nParent );
+ m_Stat.onRecursiveInitBucket();
+ }
+
+ assert( pParentBucket != nullptr );
+
+ // Allocate an aux node for new bucket
+ aux_node_type * pBucket = m_Buckets.bucket( nBucket );
+
+ back_off bkoff;
+ for ( ;; pBucket = m_Buckets.bucket( nBucket )) {
+ if ( pBucket )
+ return pBucket;
+
+ pBucket = alloc_aux_node( split_list::dummy_hash<bit_reversal>( nBucket ));
+ if ( pBucket ) {
+ if ( m_List.insert_aux_node( pParentBucket, pBucket )) {
+ m_Buckets.bucket( nBucket, pBucket );
+ m_Stat.onNewBucket();
+ return pBucket;
+ }
+
+ // Another thread set the bucket. Wait while it done
+ free_aux_node( pBucket );
+ m_Stat.onBucketInitContenton();
+ break;
+ }
+
+ // There are no free buckets. It means that the bucket table is full
+ // Wait while another thread set the bucket or a free bucket will be available
+ m_Stat.onBucketsExhausted();
+ bkoff();
+ }
+
+ // Another thread set the bucket. Wait while it done
+ for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) {
+ bkoff();
+ m_Stat.onBusyWaitBucketInit();
+ }
+
+ return pBucket;
+ }
+
+ aux_node_type * get_bucket( size_t nHash )
+ {
+ size_t nBucket = bucket_no( nHash );
+
+ aux_node_type * pHead = m_Buckets.bucket( nBucket );
+ if ( pHead == nullptr )
+ pHead = init_bucket( nBucket );
+
+ assert( pHead->is_dummy());
+
+ return pHead;
+ }
+
+ void init()
+ {
+ // Initialize bucket 0
+ aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash<bit_reversal>(0)*/ );
+
+ // insert_aux_node cannot return false for empty list
+ CDS_VERIFY( m_List.insert_aux_node( pNode ));
+
+ m_Buckets.bucket( 0, pNode );
+ }
+
+ static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor )
+ {
+ return nBucketCount * nLoadFactor;
+ }
+
+ void inc_item_count()
+ {
+ size_t nMaxCount = m_nMaxItemCount.load( memory_model::memory_order_relaxed );
+ if ( ++m_ItemCounter <= nMaxCount )
+ return;
+
+ size_t sz = m_nBucketCountLog2.load( memory_model::memory_order_relaxed );
+ const size_t nBucketCount = static_cast<size_t>(1) << sz;
+ if ( nBucketCount < m_Buckets.capacity()) {
+ // we may grow the bucket table
+ const size_t nLoadFactor = m_Buckets.load_factor();
+ if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
+ return; // someone already have updated m_nBucketCountLog2, so stop here
+
+ m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
+ memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
+ m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
+ }
+ else
+ m_nMaxItemCount.store( std::numeric_limits<size_t>::max(), memory_model::memory_order_relaxed );
+ }
+ //@endcond
+
+ protected:
+ //@cond
+ static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value;
+
+ typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table;
+ padded_bucket_table m_Buckets; ///< bucket table
+
+ typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list;
+ padded_ordered_list m_List; ///< Ordered list containing split-list items
+
+ atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
+ atomics::atomic<size_t> m_nMaxItemCount; ///< number of items container can hold, before we have to resize
+ hash m_HashFunctor; ///< Hash functor
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics