Fixed ambiguity in casting size_t to one of unsigned type (found in AIX 32bit target)
[libcds.git] / cds / intrusive / details / split_list_base.h
index 76b769132a5cf7a3a114aeecaf5d3465af751607..ce7c299ef57b9cbba1777676a9ee03cc1ee9f8fe 100644 (file)
 
 #include <cds/intrusive/details/base.h>
 #include <cds/algo/atomic.h>
+#include <cds/algo/bit_reversal.h>
 #include <cds/details/allocator.h>
 #include <cds/algo/int_algo.h>
 #include <cds/algo/bitop.h>
 #include <cds/opt/hash.h>
 #include <cds/intrusive/free_list_selector.h>
+#include <cds/details/size_t_cast.h>
 
 namespace cds { namespace intrusive {
 
@@ -58,7 +60,7 @@ namespace cds { namespace intrusive {
             }
 
             /// Initializes dummy node with \p nHash value
-            hash_node( size_t nHash )
+            explicit hash_node( size_t nHash )
                 : m_nHash( nHash )
             {
                 assert( is_dummy());
@@ -92,7 +94,7 @@ namespace cds { namespace intrusive {
             }
 
             /// Initializes dummy node with \p nHash value
-            node( size_t nHash )
+            explicit node( size_t nHash )
                 : hash_node( nHash )
             {
                 assert( is_dummy());
@@ -118,7 +120,7 @@ namespace cds { namespace intrusive {
             }
 
             /// Initializes dummy node with \p nHash value
-            node( size_t nHash )
+            explicit node( size_t nHash )
                 : hash_node( nHash )
             {
                 assert( is_dummy());
@@ -212,6 +214,22 @@ namespace cds { namespace intrusive {
             //@endcond
         };
 
+        /// Option to control bit reversal algorithm
+        /**
+            Bit reversal is a significant part of split-list.
+            \p Type can be one of predefined algorithm in \p cds::algo::bit_reversal namespace.
+        */
+        template <typename Type>
+        struct bit_reversal {
+            //@cond
+            template <typename Base>
+            struct pack: public Base
+            {
+                typedef Type bit_reversal;
+            };
+            //@endcond
+        };
+
         /// SplitListSet traits
         struct traits
         {
@@ -223,13 +241,24 @@ namespace cds { namespace intrusive {
             */
             typedef opt::none       hash;
 
+            /// Bit reversal algorithm
+            /**
+                Bit reversal is a significant part of split-list.
+                There are several predefined algorithm in \p cds::algo::bit_reversal namespace,
+                \p cds::algo::bit_reversal::lookup is the best general purpose one.
+
+                There are more efficient bit reversal algoritm for particular processor architecture,
+                for example, based on x86 SIMD/AVX instruction set, see <a href="http://stackoverflow.com/questions/746171/best-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c">here</a>
+            */
+            typedef cds::algo::bit_reversal::lookup bit_reversal;
+
             /// Item counter
             /**
                 The item counting is an important part of \p SplitListSet algorithm:
                 the <tt>empty()</tt> member function depends on correct item counting.
                 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
 
-                Default is \p cds::atomicity::item_counter.
+                Default is \p cds::atomicity::item_counter; to avoid false sharing you may use \p atomicity::cache_friendly_item_counter
             */
             typedef cds::atomicity::item_counter item_counter;
 
@@ -309,6 +338,8 @@ namespace cds { namespace intrusive {
         /**
             Available \p Options:
             - \p opt::hash - mandatory option, specifies hash functor.
+            - \p split_list::bit_reversal - bit reversal algorithm, see \p traits::bit_reversal for explanation
+                default is \p cds::algo::bit_reversal::lookup
             - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
                 for default type.
             - \p opt::memory_model - C++ memory model for atomic operations.
@@ -366,7 +397,16 @@ namespace cds { namespace intrusive {
 
             /// Auxiliary node type
             struct aux_node_type: public node_type, public free_list::node
-            {};
+            {
+#           ifdef CDS_DEBUG
+                atomics::atomic<bool> m_busy;
+
+                aux_node_type()
+                {
+                    m_busy.store( false, atomics::memory_order_release );
+                }
+#           endif
+            };
 
             typedef atomics::atomic<aux_node_type *> table_entry;  ///< Table entry type
             typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
@@ -452,8 +492,10 @@ namespace cds { namespace intrusive {
                 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
                     // alloc next free node from m_auxNode
                     size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
-                    if ( idx < capacity())
+                    if ( idx < capacity() ) {
+                        CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ) );
                         return new( &m_auxNode[idx] ) aux_node_type();
+                    }
                 }
 
                 // get from free-list
@@ -525,8 +567,17 @@ namespace cds { namespace intrusive {
             typedef typename options::free_list free_list;
 
             /// Auxiliary node type
-            class aux_node_type: public node_type, public free_list::node
-            {};
+            struct aux_node_type: public node_type, public free_list::node
+            {
+#           ifdef CDS_DEBUG
+                atomics::atomic<bool> m_busy;
+
+                aux_node_type()
+                {
+                    m_busy.store( false, atomics::memory_order_release );
+                }
+#           endif
+            };
 
         protected:
             //@cond
@@ -539,9 +590,10 @@ namespace cds { namespace intrusive {
                 // aux_node_type     nodes[];
 
                 aux_node_segment()
-                    : aux_node_count(0)
-                    , next_segment( nullptr )
-                {}
+                    : next_segment( nullptr )
+                {
+                    aux_node_count.store( 0, atomics::memory_order_release );
+                }
 
                 aux_node_type* segment()
                 {
@@ -655,10 +707,12 @@ namespace cds { namespace intrusive {
                     assert( aux_segment != nullptr );
 
                     // try to allocate from current aux segment
-                    if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
+                    if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) {
                         size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
-                        if ( idx < m_metrics.nSegmentSize )
+                        if ( idx < m_metrics.nSegmentSize ) {
+                            CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ) );
                             return new( aux_segment->segment() + idx ) aux_node_type();
+                        }
                     }
 
                     // try allocate from free-list
@@ -672,10 +726,11 @@ namespace cds { namespace intrusive {
                     aux_node_segment* new_aux_segment = allocate_aux_segment();
                     new_aux_segment->next_segment = aux_segment;
                     new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
-                    CDS_COMPILER_RW_BARRIER;
 
-                    if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ))
-                        return new( new_aux_segment->segment()) aux_node_type();
+                    if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ) ) {
+                        CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ) );
+                        return new( new_aux_segment->segment() ) aux_node_type();
+                    }
 
                     free_aux_segment( new_aux_segment );
                 }
@@ -708,7 +763,7 @@ namespace cds { namespace intrusive {
                 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
                 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
 
-                size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
+                size_t nBucketCount = ( nItemCount + m.nLoadFactor - 1 ) / m.nLoadFactor;
                 if ( nBucketCount <= 2 ) {
                     m.nSegmentCount = 1;
                     m.nSegmentSize = 2;
@@ -755,6 +810,7 @@ namespace cds { namespace intrusive {
             aux_node_segment* allocate_aux_segment()
             {
                 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
+                CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ) );
                 return new(p) aux_node_segment();
             }
 
@@ -1232,27 +1288,28 @@ namespace cds { namespace intrusive {
                 {
                     return m_itCur != i.m_itCur;
                 }
+
+            protected:
+                list_iterator const& underlying_iterator() const
+                {
+                    return m_itCur;
+                }
             };
         }   // namespace details
         //@endcond
 
         //@cond
         // Helper functions
-
-        /// Reverses bit order in \p nHash
-        static inline size_t reverse_bits( size_t nHash )
-        {
-            return bitop::RBO( nHash );
-        }
-
+        template <typename BitReversalAlgo>
         static inline size_t regular_hash( size_t nHash )
         {
-            return reverse_bits( nHash ) | size_t(1);
+            return static_cast<size_t>( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) | size_t(1);
         }
 
+        template <typename BitReversalAlgo>
         static inline size_t dummy_hash( size_t nHash )
         {
-            return reverse_bits( nHash ) & ~size_t(1);
+            return static_cast<size_t>( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) & ~size_t(1);
         }
         //@endcond