Uses different pass count for different parallel queue test cases
[libcds.git] / cds / urcu / details / gpb.h
index 89bae00a3ef999eae6f27a4bd1fb8ee18356499a..5ad75d130d202372af6d13f5847a20a658259fa5 100644 (file)
@@ -1,14 +1,42 @@
-//$$CDS-header$$
+/*
+    This file is a part of libcds - Concurrent Data Structures library
 
 
-#ifndef _CDS_URCU_DETAILS_GPB_H
-#define _CDS_URCU_DETAILS_GPB_H
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
 
 
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_URCU_DETAILS_GPB_H
+#define CDSLIB_URCU_DETAILS_GPB_H
+
+#include <mutex>
+#include <limits>
 #include <cds/urcu/details/gp.h>
 #include <cds/urcu/details/gp.h>
-#include <cds/backoff_strategy.h>
+#include <cds/algo/backoff_strategy.h>
 #include <cds/container/vyukov_mpmc_cycle_queue.h>
 
 #include <cds/container/vyukov_mpmc_cycle_queue.h>
 
-#include <cds/details/std/mutex.h>
-
 namespace cds { namespace urcu {
 
     /// User-space general-purpose RCU with deferred (buffered) reclamation
 namespace cds { namespace urcu {
 
     /// User-space general-purpose RCU with deferred (buffered) reclamation
@@ -21,7 +49,7 @@ namespace cds { namespace urcu {
         i.e. until the RCU quiescent state will come. After that the buffer and all retired objects are freed.
         This synchronization cycle may be called in any thread that calls \p retire_ptr function.
 
         i.e. until the RCU quiescent state will come. After that the buffer and all retired objects are freed.
         This synchronization cycle may be called in any thread that calls \p retire_ptr function.
 
-        The \p Buffer contains items of \ref cds_urcu_retired_ptr "retired_ptr" type and it should support a queue interface with
+        The \p Buffer contains items of \ref cds_urcu_retired_ptr "epoch_retired_ptr" type and it should support a queue interface with
         three function:
         - <tt> bool push( retired_ptr& p ) </tt> - places the retired pointer \p p into queue. If the function
             returns \p false it means that the buffer is full and RCU synchronization cycle must be processed.
         three function:
         - <tt> bool push( retired_ptr& p ) </tt> - places the retired pointer \p p into queue. If the function
             returns \p false it means that the buffer is full and RCU synchronization cycle must be processed.
@@ -29,22 +57,19 @@ namespace cds { namespace urcu {
             this function must return \p false
         - <tt>size_t size()</tt> - returns queue's item count.
 
             this function must return \p false
         - <tt>size_t size()</tt> - returns queue's item count.
 
-        The buffer is considered as full if \p push returns \p false or the buffer size reaches the RCU threshold.
+        The buffer is considered as full if \p push() returns \p false or the buffer size reaches the RCU threshold.
 
         There is a wrapper \ref cds_urcu_general_buffered_gc "gc<general_buffered>" for \p %general_buffered class
         that provides unified RCU interface. You should use this wrapper class instead \p %general_buffered
 
         Template arguments:
 
         There is a wrapper \ref cds_urcu_general_buffered_gc "gc<general_buffered>" for \p %general_buffered class
         that provides unified RCU interface. You should use this wrapper class instead \p %general_buffered
 
         Template arguments:
-        - \p Buffer - buffer type. Default is cds::container::VyukovMPMCCycleQueue
+        - \p Buffer - buffer type. Default is \p cds::container::VyukovMPMCCycleQueue
         - \p Lock - mutex type, default is \p std::mutex
         - \p Backoff - back-off schema, default is cds::backoff::Default
     */
     template <
         - \p Lock - mutex type, default is \p std::mutex
         - \p Backoff - back-off schema, default is cds::backoff::Default
     */
     template <
-        class Buffer = cds::container::VyukovMPMCCycleQueue<
-            epoch_retired_ptr
-            ,cds::opt::buffer< cds::opt::v::dynamic_buffer< epoch_retired_ptr > >
-        >
-        ,class Lock = cds_std::mutex
+        class Buffer = cds::container::VyukovMPMCCycleQueue< epoch_retired_ptr >
+        ,class Lock = std::mutex
         ,class Backoff = cds::backoff::Default
     >
     class general_buffered: public details::gp_singleton< general_buffered_tag >
         ,class Backoff = cds::backoff::Default
     >
     class general_buffered: public details::gp_singleton< general_buffered_tag >
@@ -61,7 +86,9 @@ namespace cds { namespace urcu {
         typedef base_class::thread_gc thread_gc ;   ///< Thread-side RCU part
         typedef typename thread_gc::scoped_lock scoped_lock ; ///< Access lock class
 
         typedef base_class::thread_gc thread_gc ;   ///< Thread-side RCU part
         typedef typename thread_gc::scoped_lock scoped_lock ; ///< Access lock class
 
-        static bool const c_bBuffered = true ; ///< This RCU buffers disposed elements
+        //@cond
+        static bool const c_bBuffered = true ; ///< Bufferized RCU
+        //@endcond
 
     protected:
         //@cond
 
     protected:
         //@cond
@@ -70,22 +97,22 @@ namespace cds { namespace urcu {
 
     protected:
         //@cond
 
     protected:
         //@cond
-        buffer_type                     m_Buffer;
-        CDS_ATOMIC::atomic<uint64_t>    m_nCurEpoch;
-        lock_type                       m_Lock;
-        size_t const                    m_nCapacity;
+        buffer_type                m_Buffer;
+        atomics::atomic<uint64_t>  m_nCurEpoch;
+        lock_type                  m_Lock;
+        size_t const               m_nCapacity;
         //@endcond
 
     public:
         /// Returns singleton instance
         static general_buffered * instance()
         {
         //@endcond
 
     public:
         /// Returns singleton instance
         static general_buffered * instance()
         {
-            return static_cast<general_buffered *>( base_class::instance() );
+            return static_cast<general_buffered *>( base_class::instance());
         }
         /// Checks if the singleton is created and ready to use
         static bool isUsed()
         {
         }
         /// Checks if the singleton is created and ready to use
         static bool isUsed()
         {
-            return singleton_ptr::s_pRCU != null_ptr<singleton_vtbl *>();
+            return singleton_ptr::s_pRCU != nullptr;
         }
 
     protected:
         }
 
     protected:
@@ -98,7 +125,7 @@ namespace cds { namespace urcu {
 
         ~general_buffered()
         {
 
         ~general_buffered()
         {
-            clear_buffer( (uint64_t) -1 );
+            clear_buffer( std::numeric_limits< uint64_t >::max());
         }
 
         void flip_and_wait()
         }
 
         void flip_and_wait()
@@ -111,23 +138,25 @@ namespace cds { namespace urcu {
         {
             epoch_retired_ptr p;
             while ( m_Buffer.pop( p )) {
         {
             epoch_retired_ptr p;
             while ( m_Buffer.pop( p )) {
-                if ( p.m_nEpoch <= nEpoch )
+                if ( p.m_nEpoch <= nEpoch ) {
                     p.free();
                     p.free();
+                }
                 else {
                 else {
-                    push_buffer( );
+                    push_buffer( std::move(p));
                     break;
                 }
             }
         }
 
                     break;
                 }
             }
         }
 
-        // Return: true - synchronize has been called, false - otherwise
-        bool push_buffer( epoch_retired_ptr& ep )
+        // Return: \p true - synchronize has been called, \p false - otherwise
+        bool push_buffer( epoch_retired_ptr&& ep )
         {
             bool bPushed = m_Buffer.push( ep );
         {
             bool bPushed = m_Buffer.push( ep );
-            if ( !bPushed || m_Buffer.size() >= capacity() ) {
+            if ( !bPushed || m_Buffer.size() >= capacity()) {
                 synchronize();
                 synchronize();
-                if ( !bPushed )
+                if ( !bPushed ) {
                     ep.free();
                     ep.free();
+                }
                 return true;
             }
             return false;
                 return true;
             }
             return false;
@@ -148,12 +177,12 @@ namespace cds { namespace urcu {
         /// Destroys singleton object
         static void Destruct( bool bDetachAll = false )
         {
         /// Destroys singleton object
         static void Destruct( bool bDetachAll = false )
         {
-            if ( isUsed() ) {
-                instance()->clear_buffer( (uint64_t) -1 );
+            if ( isUsed()) {
+                instance()->clear_buffer( std::numeric_limits< uint64_t >::max());
                 if ( bDetachAll )
                     instance()->m_ThreadList.detach_all();
                 delete instance();
                 if ( bDetachAll )
                     instance()->m_ThreadList.detach_all();
                 delete instance();
-                singleton_ptr::s_pRCU = null_ptr<singleton_vtbl *>();
+                singleton_ptr::s_pRCU = nullptr;
             }
         }
 
             }
         }
 
@@ -164,30 +193,40 @@ namespace cds { namespace urcu {
             When the buffer becomes full \ref synchronize function is called
             to wait for the end of grace period and then to free all pointers from the buffer.
         */
             When the buffer becomes full \ref synchronize function is called
             to wait for the end of grace period and then to free all pointers from the buffer.
         */
-        virtual void retire_ptr( retired_ptr& p )
+        virtual void retire_ptr( retired_ptr& p ) override
         {
         {
-            if ( p.m_p ) {
-                epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
-                push_buffer( ep );
-            }
+            if ( p.m_p )
+                push_buffer( epoch_retired_ptr( p, m_nCurEpoch.load( atomics::memory_order_relaxed )));
         }
 
         /// Retires the pointer chain [\p itFirst, \p itLast)
         template <typename ForwardIterator>
         void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
         {
         }
 
         /// Retires the pointer chain [\p itFirst, \p itLast)
         template <typename ForwardIterator>
         void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
         {
-            uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+            uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
             while ( itFirst != itLast ) {
                 epoch_retired_ptr ep( *itFirst, nEpoch );
                 ++itFirst;
             while ( itFirst != itLast ) {
                 epoch_retired_ptr ep( *itFirst, nEpoch );
                 ++itFirst;
-                push_buffer( ep );
+                push_buffer( std::move(ep));
+            }
+        }
+
+        /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+        template <typename Func>
+        void batch_retire( Func e )
+        {
+            uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
+            for ( retired_ptr p{ e() }; p.m_p; ) {
+                epoch_retired_ptr ep( p, nEpoch );
+                p = e();
+                push_buffer( std::move(ep));
             }
         }
 
         /// Wait to finish a grace period and then clear the buffer
         void synchronize()
         {
             }
         }
 
         /// Wait to finish a grace period and then clear the buffer
         void synchronize()
         {
-            epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
+            epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
             synchronize( ep );
         }
 
             synchronize( ep );
         }
 
@@ -195,17 +234,15 @@ namespace cds { namespace urcu {
         bool synchronize( epoch_retired_ptr& ep )
         {
             uint64_t nEpoch;
         bool synchronize( epoch_retired_ptr& ep )
         {
             uint64_t nEpoch;
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
             {
             {
-                cds::lock::scoped_lock<lock_type> sl( m_Lock );
-                if ( ep.m_p && m_Buffer.push( ep ) )
+                std::unique_lock<lock_type> sl( m_Lock );
+                if ( ep.m_p && m_Buffer.push( ep ))
                     return false;
                     return false;
-                nEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+                nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
                 flip_and_wait();
                 flip_and_wait();
             }
             clear_buffer( nEpoch );
                 flip_and_wait();
                 flip_and_wait();
             }
             clear_buffer( nEpoch );
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
             return true;
         }
         //@endcond
             return true;
         }
         //@endcond
@@ -217,6 +254,17 @@ namespace cds { namespace urcu {
         }
     };
 
         }
     };
 
+    /// User-space general-purpose RCU with deferred (buffered) reclamation (stripped version)
+    /**
+        @headerfile cds/urcu/general_buffered.h
+
+        This short version of \p general_buffered is intended for stripping debug info.
+        If you use \p %general_buffered with default template arguments you may use
+        this stripped version. All functionality of both classes are identical.
+    */
+    class general_buffered_stripped: public general_buffered<>
+    {};
+
 }} // namespace cds::urcu
 
 }} // namespace cds::urcu
 
-#endif // #ifndef _CDS_URCU_DETAILS_GPB_H
+#endif // #ifndef CDSLIB_URCU_DETAILS_GPB_H