Improved management of SkipList auxiliary nodes: now aux nodes are allocated from...
[libcds.git] / cds / intrusive / split_list_rcu.h
index 6f264cceb5c8f2bcd052ee058de96df6ecaa7cbd..8ea2078d97e53085d4f3061f758f623ee66f98f9 100644 (file)
@@ -5,7 +5,7 @@
 
     Source code repo: http://github.com/khizmax/libcds/
     Download: http://sourceforge.net/projects/libcds/files/
-    
+
     Redistribution and use in source and binary forms, with or without
     modification, are permitted provided that the following conditions are met:
 
@@ -25,7 +25,7 @@
     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.     
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 
 #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
@@ -35,6 +35,7 @@
 
 #include <cds/intrusive/details/split_list_base.h>
 #include <cds/details/binary_functor_wrapper.h>
+#include <cds/details/type_padding.h>
 
 namespace cds { namespace intrusive {
 
@@ -123,6 +124,13 @@ namespace cds { namespace intrusive {
         typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
         typedef typename traits::stat         stat;         ///< Internal statistics
 
+        // GC and OrderedList::gc must be the same
+        static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
+
+        // atomicity::empty_item_counter is not allowed as a item counter
+        static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
+                        "cds::atomicity::empty_item_counter is not allowed as a item counter");
+
     protected:
         typedef typename ordered_list::node_type    list_node_type;  ///< Node type as declared in ordered list
         typedef split_list::node<list_node_type>    node_type;       ///< split-list node type
@@ -264,244 +272,6 @@ namespace cds { namespace intrusive {
         };
         //@endcond
 
-    protected:
-        ordered_list_wrapper    m_List;             ///< Ordered list containing split-list items
-        bucket_table            m_Buckets;          ///< bucket table
-        atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
-        atomics::atomic<size_t> m_nMaxItemCount;    ///< number of items container can hold, before we have to resize
-        item_counter            m_ItemCounter;      ///< Item counter
-        hash                    m_HashFunctor;      ///< Hash functor
-        stat                    m_Stat;             ///< Internal statistics accumulator
-
-    protected:
-        //@cond
-        typedef cds::details::Allocator< aux_node_type, typename traits::allocator >   aux_node_allocator;
-
-        aux_node_type * alloc_aux_node( size_t nHash )
-        {
-            m_Stat.onHeadNodeAllocated();
-            return aux_node_allocator().New( nHash );
-        }
-        void free_aux_node( aux_node_type * p )
-        {
-            aux_node_allocator().Delete( p );
-            m_Stat.onHeadNodeFreed();
-        }
-
-        /// Calculates hash value of \p key
-        template <typename Q>
-        size_t hash_value( Q const& key ) const
-        {
-            return m_HashFunctor( key );
-        }
-
-        size_t bucket_no( size_t nHash ) const
-        {
-            return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 );
-        }
-
-        static size_t parent_bucket( size_t nBucket )
-        {
-            assert( nBucket > 0 );
-            return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
-        }
-
-        aux_node_type * init_bucket( size_t nBucket )
-        {
-            assert( nBucket > 0 );
-            size_t nParent = parent_bucket( nBucket );
-
-            aux_node_type * pParentBucket = m_Buckets.bucket( nParent );
-            if ( pParentBucket == nullptr ) {
-                pParentBucket = init_bucket( nParent );
-                m_Stat.onRecursiveInitBucket();
-            }
-
-            assert( pParentBucket != nullptr );
-
-            // Allocate a dummy node for new bucket
-            {
-                aux_node_type * pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
-                if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
-                    m_Buckets.bucket( nBucket, pBucket );
-                    m_Stat.onNewBucket();
-                    return pBucket;
-                }
-                free_aux_node( pBucket );
-            }
-
-            // Another thread set the bucket. Wait while it done
-
-            // In this point, we must wait while nBucket is empty.
-            // The compiler can decide that waiting loop can be "optimized" (stripped)
-            // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
-            //
-            m_Stat.onBucketInitContenton();
-            back_off bkoff;
-            while ( true ) {
-                aux_node_type volatile * p = m_Buckets.bucket( nBucket );
-                if ( p != nullptr )
-                    return const_cast<aux_node_type *>( p );
-                bkoff();
-                m_Stat.onBusyWaitBucketInit();
-            }
-        }
-
-        aux_node_type * get_bucket( size_t nHash )
-        {
-            size_t nBucket = bucket_no( nHash );
-
-            aux_node_type * pHead = m_Buckets.bucket( nBucket );
-            if ( pHead == nullptr )
-                pHead = init_bucket( nBucket );
-
-            assert( pHead->is_dummy() );
-
-            return pHead;
-        }
-
-        void init()
-        {
-            // GC and OrderedList::gc must be the same
-            static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
-
-            // atomicity::empty_item_counter is not allowed as a item counter
-            static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
-                           "cds::atomicity::empty_item_counter is not allowed as a item counter");
-
-            // Initialize bucket 0
-            aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ );
-
-            // insert_aux_node cannot return false for empty list
-            CDS_VERIFY( m_List.insert_aux_node( pNode ));
-
-            m_Buckets.bucket( 0, pNode );
-        }
-
-        static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor )
-        {
-            return nBucketCount * nLoadFactor;
-        }
-
-        void inc_item_count()
-        {
-            size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed);
-            if ( ++m_ItemCounter <= nMaxCount )
-                return;
-
-            size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed);
-            const size_t nBucketCount = static_cast<size_t>(1) << sz;
-            if ( nBucketCount < m_Buckets.capacity() ) {
-                // we may grow the bucket table
-                const size_t nLoadFactor = m_Buckets.load_factor();
-                if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
-                    return; // someone already have updated m_nBucketCountLog2, so stop here
-
-                m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
-                                                         memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
-                m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
-            }
-            else
-                m_nMaxItemCount.store( std::numeric_limits<size_t>::max(), memory_model::memory_order_relaxed );
-        }
-
-        template <typename Q, typename Compare, typename Func>
-        bool find_( Q& val, Compare cmp, Func f )
-        {
-            size_t nHash = hash_value( val );
-            split_list::details::search_value_type<Q>  sv( val, split_list::regular_hash( nHash ));
-            aux_node_type * pHead = get_bucket( nHash );
-            assert( pHead != nullptr );
-
-            return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
-                [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
-        }
-
-        template <typename Q, typename Compare>
-        bool find_value( Q const& val, Compare cmp )
-        {
-            size_t nHash = hash_value( val );
-            split_list::details::search_value_type<Q const>  sv( val, split_list::regular_hash( nHash ));
-            aux_node_type * pHead = get_bucket( nHash );
-            assert( pHead != nullptr );
-
-            return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
-        }
-
-        template <typename Q, typename Compare>
-        raw_ptr get_( Q const& val, Compare cmp )
-        {
-            size_t nHash = hash_value( val );
-            split_list::details::search_value_type<Q const>  sv( val, split_list::regular_hash( nHash ));
-            aux_node_type * pHead = get_bucket( nHash );
-            assert( pHead != nullptr );
-
-            raw_ptr p = m_List.get_at( pHead, sv, cmp );
-            m_Stat.onFind( !!p );
-            return p;
-        }
-
-        template <typename Q, typename Compare>
-        value_type * extract_( Q const& val, Compare cmp )
-        {
-            size_t nHash = hash_value( val );
-            split_list::details::search_value_type<Q const>  sv( val, split_list::regular_hash( nHash ));
-            aux_node_type * pHead = get_bucket( nHash );
-            assert( pHead != nullptr );
-
-            value_type * pNode = m_List.extract_at( pHead, sv, cmp );
-            if ( pNode ) {
-                --m_ItemCounter;
-                m_Stat.onExtractSuccess();
-            }
-            else
-                m_Stat.onExtractFailed();
-            return pNode;
-        }
-
-        template <typename Q, typename Less>
-        value_type * extract_with_( Q const& val, Less pred )
-        {
-            CDS_UNUSED( pred );
-            return extract_( val, typename wrapped_ordered_list::template make_compare_from_less<Less>());
-        }
-
-        template <typename Q, typename Compare>
-        bool erase_( const Q& val, Compare cmp )
-        {
-            size_t nHash = hash_value( val );
-            split_list::details::search_value_type<Q const>  sv( val, split_list::regular_hash( nHash ));
-            aux_node_type * pHead = get_bucket( nHash );
-            assert( pHead != nullptr );
-
-            if ( m_List.erase_at( pHead, sv, cmp ) ) {
-                --m_ItemCounter;
-                m_Stat.onEraseSuccess();
-                return true;
-            }
-            m_Stat.onEraseFailed();
-            return false;
-        }
-
-        template <typename Q, typename Compare, typename Func>
-        bool erase_( Q const& val, Compare cmp, Func f )
-        {
-            size_t nHash = hash_value( val );
-            split_list::details::search_value_type<Q const>  sv( val, split_list::regular_hash( nHash ));
-            aux_node_type * pHead = get_bucket( nHash );
-            assert( pHead != nullptr );
-
-            if ( m_List.erase_at( pHead, sv, cmp, f )) {
-                --m_ItemCounter;
-                m_Stat.onEraseSuccess();
-                return true;
-            }
-            m_Stat.onEraseFailed();
-            return false;
-        }
-
-        //@endcond
-
     public:
         /// Initialize split-ordered list of default capacity
         /**
@@ -1087,6 +857,244 @@ namespace cds { namespace intrusive {
             return const_iterator( m_List.cend(), m_List.cend() );
         }
     //@}
+
+    protected:
+        //@cond
+        aux_node_type * alloc_aux_node( size_t nHash )
+        {
+            m_Stat.onHeadNodeAllocated();
+            aux_node_type* p = m_Buckets.alloc_aux_node();
+            if ( p )
+                p->m_nHash = nHash;
+            return p;
+        }
+
+        void free_aux_node( aux_node_type * p )
+        {
+            m_Buckets.free_aux_node( p );
+            m_Stat.onHeadNodeFreed();
+        }
+
+        /// Calculates hash value of \p key
+        template <typename Q>
+        size_t hash_value( Q const& key ) const
+        {
+            return m_HashFunctor( key );
+        }
+
+        size_t bucket_no( size_t nHash ) const
+        {
+            return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 );
+        }
+
+        static size_t parent_bucket( size_t nBucket )
+        {
+            assert( nBucket > 0 );
+            return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
+        }
+
+        aux_node_type * init_bucket( size_t nBucket )
+        {
+            assert( nBucket > 0 );
+            size_t nParent = parent_bucket( nBucket );
+
+            aux_node_type * pParentBucket = m_Buckets.bucket( nParent );
+            if ( pParentBucket == nullptr ) {
+                pParentBucket = init_bucket( nParent );
+                m_Stat.onRecursiveInitBucket();
+            }
+
+            assert( pParentBucket != nullptr );
+
+            // Allocate a dummy node for new bucket
+            {
+                aux_node_type * pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
+                if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
+                    m_Buckets.bucket( nBucket, pBucket );
+                    m_Stat.onNewBucket();
+                    return pBucket;
+                }
+                free_aux_node( pBucket );
+            }
+
+            // Another thread set the bucket. Wait while it done
+
+            // In this point, we must wait while nBucket is empty.
+            // The compiler can decide that waiting loop can be "optimized" (stripped)
+            // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
+            //
+            m_Stat.onBucketInitContenton();
+            back_off bkoff;
+            while ( true ) {
+                aux_node_type volatile * p = m_Buckets.bucket( nBucket );
+                if ( p != nullptr )
+                    return const_cast<aux_node_type *>( p );
+                bkoff();
+                m_Stat.onBusyWaitBucketInit();
+            }
+        }
+
+        aux_node_type * get_bucket( size_t nHash )
+        {
+            size_t nBucket = bucket_no( nHash );
+
+            aux_node_type * pHead = m_Buckets.bucket( nBucket );
+            if ( pHead == nullptr )
+                pHead = init_bucket( nBucket );
+
+            assert( pHead->is_dummy() );
+
+            return pHead;
+        }
+
+        void init()
+        {
+            // Initialize bucket 0
+            aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ );
+
+            // insert_aux_node cannot return false for empty list
+            CDS_VERIFY( m_List.insert_aux_node( pNode ));
+
+            m_Buckets.bucket( 0, pNode );
+        }
+
+        static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor )
+        {
+            return nBucketCount * nLoadFactor;
+        }
+
+        void inc_item_count()
+        {
+            size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed);
+            if ( ++m_ItemCounter <= nMaxCount )
+                return;
+
+            size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed);
+            const size_t nBucketCount = static_cast<size_t>(1) << sz;
+            if ( nBucketCount < m_Buckets.capacity() ) {
+                // we may grow the bucket table
+                const size_t nLoadFactor = m_Buckets.load_factor();
+                if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
+                    return; // someone already have updated m_nBucketCountLog2, so stop here
+
+                m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
+                                                         memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
+                m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
+            }
+            else
+                m_nMaxItemCount.store( std::numeric_limits<size_t>::max(), memory_model::memory_order_relaxed );
+        }
+
+        template <typename Q, typename Compare, typename Func>
+        bool find_( Q& val, Compare cmp, Func f )
+        {
+            size_t nHash = hash_value( val );
+            split_list::details::search_value_type<Q>  sv( val, split_list::regular_hash( nHash ));
+            aux_node_type * pHead = get_bucket( nHash );
+            assert( pHead != nullptr );
+
+            return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
+                [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
+        }
+
+        template <typename Q, typename Compare>
+        bool find_value( Q const& val, Compare cmp )
+        {
+            size_t nHash = hash_value( val );
+            split_list::details::search_value_type<Q const>  sv( val, split_list::regular_hash( nHash ));
+            aux_node_type * pHead = get_bucket( nHash );
+            assert( pHead != nullptr );
+
+            return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
+        }
+
+        template <typename Q, typename Compare>
+        raw_ptr get_( Q const& val, Compare cmp )
+        {
+            size_t nHash = hash_value( val );
+            split_list::details::search_value_type<Q const>  sv( val, split_list::regular_hash( nHash ));
+            aux_node_type * pHead = get_bucket( nHash );
+            assert( pHead != nullptr );
+
+            raw_ptr p = m_List.get_at( pHead, sv, cmp );
+            m_Stat.onFind( !!p );
+            return p;
+        }
+
+        template <typename Q, typename Compare>
+        value_type * extract_( Q const& val, Compare cmp )
+        {
+            size_t nHash = hash_value( val );
+            split_list::details::search_value_type<Q const>  sv( val, split_list::regular_hash( nHash ));
+            aux_node_type * pHead = get_bucket( nHash );
+            assert( pHead != nullptr );
+
+            value_type * pNode = m_List.extract_at( pHead, sv, cmp );
+            if ( pNode ) {
+                --m_ItemCounter;
+                m_Stat.onExtractSuccess();
+            }
+            else
+                m_Stat.onExtractFailed();
+            return pNode;
+        }
+
+        template <typename Q, typename Less>
+        value_type * extract_with_( Q const& val, Less pred )
+        {
+            CDS_UNUSED( pred );
+            return extract_( val, typename wrapped_ordered_list::template make_compare_from_less<Less>());
+        }
+
+        template <typename Q, typename Compare>
+        bool erase_( const Q& val, Compare cmp )
+        {
+            size_t nHash = hash_value( val );
+            split_list::details::search_value_type<Q const>  sv( val, split_list::regular_hash( nHash ));
+            aux_node_type * pHead = get_bucket( nHash );
+            assert( pHead != nullptr );
+
+            if ( m_List.erase_at( pHead, sv, cmp ) ) {
+                --m_ItemCounter;
+                m_Stat.onEraseSuccess();
+                return true;
+            }
+            m_Stat.onEraseFailed();
+            return false;
+        }
+
+        template <typename Q, typename Compare, typename Func>
+        bool erase_( Q const& val, Compare cmp, Func f )
+        {
+            size_t nHash = hash_value( val );
+            split_list::details::search_value_type<Q const>  sv( val, split_list::regular_hash( nHash ));
+            aux_node_type * pHead = get_bucket( nHash );
+            assert( pHead != nullptr );
+
+            if ( m_List.erase_at( pHead, sv, cmp, f )) {
+                --m_ItemCounter;
+                m_Stat.onEraseSuccess();
+                return true;
+            }
+            m_Stat.onEraseFailed();
+            return false;
+        }
+        //@endcond
+
+    protected:
+        //@cond
+        typedef typename cds::details::type_padding< bucket_table, traits::padding >::type padded_bucket_table;
+        padded_bucket_table     m_Buckets;          ///< bucket table
+
+        typedef typename cds::details::type_padding< ordered_list_wrapper, traits::padding>::type padded_ordered_list;
+        padded_ordered_list     m_List;             ///< Ordered list containing split-list items
+
+        atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
+        atomics::atomic<size_t> m_nMaxItemCount;    ///< number of items container can hold, before we have to resize
+        item_counter            m_ItemCounter;      ///< Item counter
+        hash                    m_HashFunctor;      ///< Hash functor
+        stat                    m_Stat;             ///< Internal statistics accumulator
+        //@endcond
     };
 
 }}  // namespace cds::intrusive