X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=cds%2Fintrusive%2Fdetails%2Fsplit_list_base.h;h=ce7c299ef57b9cbba1777676a9ee03cc1ee9f8fe;hb=fb39f01da6fceb1d350bc0cae8ff2ed22b3c128b;hp=d24ddb3fce52e8379326d5f2c2fc06466508df33;hpb=2402fb1beb25ec532cea91c8dfbb9425eb5bdf48;p=libcds.git diff --git a/cds/intrusive/details/split_list_base.h b/cds/intrusive/details/split_list_base.h index d24ddb3f..ce7c299e 100644 --- a/cds/intrusive/details/split_list_base.h +++ b/cds/intrusive/details/split_list_base.h @@ -1,7 +1,7 @@ /* This file is a part of libcds - Concurrent Data Structures library - (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 Source code repo: http://github.com/khizmax/libcds/ Download: http://sourceforge.net/projects/libcds/files/ @@ -33,11 +33,13 @@ #include #include +#include #include #include #include #include #include +#include namespace cds { namespace intrusive { @@ -58,7 +60,7 @@ namespace cds { namespace intrusive { } /// Initializes dummy node with \p nHash value - hash_node( size_t nHash ) + explicit hash_node( size_t nHash ) : m_nHash( nHash ) { assert( is_dummy()); @@ -92,7 +94,7 @@ namespace cds { namespace intrusive { } /// Initializes dummy node with \p nHash value - node( size_t nHash ) + explicit node( size_t nHash ) : hash_node( nHash ) { assert( is_dummy()); @@ -118,7 +120,7 @@ namespace cds { namespace intrusive { } /// Initializes dummy node with \p nHash value - node( size_t nHash ) + explicit node( size_t nHash ) : hash_node( nHash ) { assert( is_dummy()); @@ -212,6 +214,22 @@ namespace cds { namespace intrusive { //@endcond }; + /// Option to control bit reversal algorithm + /** + Bit reversal is a significant part of split-list. + \p Type can be one of predefined algorithm in \p cds::algo::bit_reversal namespace. + */ + template + struct bit_reversal { + //@cond + template + struct pack: public Base + { + typedef Type bit_reversal; + }; + //@endcond + }; + /// SplitListSet traits struct traits { @@ -223,13 +241,24 @@ namespace cds { namespace intrusive { */ typedef opt::none hash; + /// Bit reversal algorithm + /** + Bit reversal is a significant part of split-list. + There are several predefined algorithm in \p cds::algo::bit_reversal namespace, + \p cds::algo::bit_reversal::lookup is the best general purpose one. + + There are more efficient bit reversal algoritm for particular processor architecture, + for example, based on x86 SIMD/AVX instruction set, see here + */ + typedef cds::algo::bit_reversal::lookup bit_reversal; + /// Item counter /** The item counting is an important part of \p SplitListSet algorithm: the empty() member function depends on correct item counting. Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option. - Default is \p cds::atomicity::item_counter. + Default is \p cds::atomicity::item_counter; to avoid false sharing you may use \p atomicity::cache_friendly_item_counter */ typedef cds::atomicity::item_counter item_counter; @@ -309,6 +338,8 @@ namespace cds { namespace intrusive { /** Available \p Options: - \p opt::hash - mandatory option, specifies hash functor. + - \p split_list::bit_reversal - bit reversal algorithm, see \p traits::bit_reversal for explanation + default is \p cds::algo::bit_reversal::lookup - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter for default type. - \p opt::memory_model - C++ memory model for atomic operations. @@ -366,7 +397,16 @@ namespace cds { namespace intrusive { /// Auxiliary node type struct aux_node_type: public node_type, public free_list::node - {}; + { +# ifdef CDS_DEBUG + atomics::atomic m_busy; + + aux_node_type() + { + m_busy.store( false, atomics::memory_order_release ); + } +# endif + }; typedef atomics::atomic table_entry; ///< Table entry type typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator @@ -452,8 +492,10 @@ namespace cds { namespace intrusive { if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) { // alloc next free node from m_auxNode size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed ); - if ( idx < capacity()) + if ( idx < capacity() ) { + CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ) ); return new( &m_auxNode[idx] ) aux_node_type(); + } } // get from free-list @@ -525,8 +567,17 @@ namespace cds { namespace intrusive { typedef typename options::free_list free_list; /// Auxiliary node type - class aux_node_type: public node_type, public free_list::node - {}; + struct aux_node_type: public node_type, public free_list::node + { +# ifdef CDS_DEBUG + atomics::atomic m_busy; + + aux_node_type() + { + m_busy.store( false, atomics::memory_order_release ); + } +# endif + }; protected: //@cond @@ -539,9 +590,10 @@ namespace cds { namespace intrusive { // aux_node_type nodes[]; aux_node_segment() - : aux_node_count(0) - , next_segment( nullptr ) - {} + : next_segment( nullptr ) + { + aux_node_count.store( 0, atomics::memory_order_release ); + } aux_node_type* segment() { @@ -638,25 +690,29 @@ namespace cds { namespace intrusive { if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) { table_entry* pNewSegment = allocate_segment(); table_entry * pNull = nullptr; - if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) { + if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) destroy_segment( pNewSegment ); - } } + + assert( segment.load( atomics::memory_order_relaxed )[nBucket & (m_metrics.nSegmentSize - 1)].load( atomics::memory_order_relaxed ) == nullptr ); segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release ); } /// Allocates auxiliary node; can return \p nullptr if the table exhausted aux_node_type* alloc_aux_node() { + aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_acquire ); + for ( ;; ) { - aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_relaxed ); assert( aux_segment != nullptr ); // try to allocate from current aux segment - if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) { + if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) { size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed ); - if ( idx < m_metrics.nSegmentSize ) + if ( idx < m_metrics.nSegmentSize ) { + CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ) ); return new( aux_segment->segment() + idx ) aux_node_type(); + } } // try allocate from free-list @@ -668,9 +724,13 @@ namespace cds { namespace intrusive { // try to allocate new aux segment // We can allocate more aux segments than we need but it is not a problem in this context aux_node_segment* new_aux_segment = allocate_aux_segment(); + new_aux_segment->next_segment = aux_segment; new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed ); - if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_relaxed, atomics::memory_order_relaxed )) - return new( new_aux_segment->segment()) aux_node_type(); + + if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ) ) { + CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ) ); + return new( new_aux_segment->segment() ) aux_node_type(); + } free_aux_segment( new_aux_segment ); } @@ -703,7 +763,7 @@ namespace cds { namespace intrusive { // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1; - size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor); + size_t nBucketCount = ( nItemCount + m.nLoadFactor - 1 ) / m.nLoadFactor; if ( nBucketCount <= 2 ) { m.nSegmentCount = 1; m.nSegmentSize = 2; @@ -750,6 +810,7 @@ namespace cds { namespace intrusive { aux_node_segment* allocate_aux_segment() { char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize ); + CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ) ); return new(p) aux_node_segment(); } @@ -962,10 +1023,21 @@ namespace cds { namespace intrusive { return base_class()(q.val, v); } - template - int operator()( Q1 const& v1, Q2 const& v2 ) const + int operator()( value_type const& lhs, value_type const& rhs ) const { - return base_class()(v1, v2); + splitlist_node_type const * n1 = static_cast(native_node_traits::to_node_ptr( lhs )); + splitlist_node_type const * n2 = static_cast(native_node_traits::to_node_ptr( rhs )); + if ( n1->m_nHash != n2->m_nHash ) + return n1->m_nHash < n2->m_nHash ? -1 : 1; + + if ( n1->is_dummy()) { + assert( n2->is_dummy()); + return 0; + } + + assert( !n1->is_dummy() && !n2->is_dummy()); + + return native_key_comparator()( lhs, rhs ); } }; @@ -1027,9 +1099,8 @@ namespace cds { namespace intrusive { { void operator()( value_type * v ) { - hash_node* p = static_cast( v ); - if ( !p->is_dummy()) - native_disposer()(v); + if ( !static_cast( v )->is_dummy()) + native_disposer()( v ); } }; @@ -1041,6 +1112,7 @@ namespace cds { namespace intrusive { aux_node() { typedef typename native_ordered_list::node_type list_node_type; + list_node_type::data.store( typename list_node_type::marked_data_ptr( static_cast( static_cast( this ))), atomics::memory_order_release @@ -1098,10 +1170,21 @@ namespace cds { namespace intrusive { return base_class()(q.val, v); } - template - int operator()( Q1 const& v1, Q2 const& v2 ) const + int operator()( value_type const& lhs, value_type const& rhs ) const { - return base_class()(v1, v2); + hash_node const& n1 = static_cast( lhs ); + hash_node const& n2 = static_cast( rhs ); + if ( n1.m_nHash != n2.m_nHash ) + return n1.m_nHash < n2.m_nHash ? -1 : 1; + + if ( n1.is_dummy()) { + assert( n2.is_dummy()); + return 0; + } + + assert( !n1.is_dummy() && !n2.is_dummy()); + + return base_class()( lhs, rhs ); } }; @@ -1205,27 +1288,28 @@ namespace cds { namespace intrusive { { return m_itCur != i.m_itCur; } + + protected: + list_iterator const& underlying_iterator() const + { + return m_itCur; + } }; } // namespace details //@endcond //@cond // Helper functions - - /// Reverses bit order in \p nHash - static inline size_t reverse_bits( size_t nHash ) - { - return bitop::RBO( nHash ); - } - + template static inline size_t regular_hash( size_t nHash ) { - return reverse_bits( nHash ) | size_t(1); + return static_cast( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) | size_t(1); } + template static inline size_t dummy_hash( size_t nHash ) { - return reverse_bits( nHash ) & ~size_t(1); + return static_cast( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) & ~size_t(1); } //@endcond