2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_URCU_DETAILS_GPB_H
32 #define CDSLIB_URCU_DETAILS_GPB_H
36 #include <cds/urcu/details/gp.h>
37 #include <cds/algo/backoff_strategy.h>
38 #include <cds/container/vyukov_mpmc_cycle_queue.h>
40 namespace cds { namespace urcu {
42 /// User-space general-purpose RCU with deferred (buffered) reclamation
44 @headerfile cds/urcu/general_buffered.h
46 This URCU implementation contains an internal buffer where retired objects are
47 accumulated. When the buffer becomes full, the RCU \p synchronize function is called
48 that waits until all reader/updater threads end up their read-side critical sections,
49 i.e. until the RCU quiescent state will come. After that the buffer and all retired objects are freed.
50 This synchronization cycle may be called in any thread that calls \p retire_ptr function.
52 The \p Buffer contains items of \ref cds_urcu_retired_ptr "epoch_retired_ptr" type and it should support a queue interface with
54 - <tt> bool push( retired_ptr& p ) </tt> - places the retired pointer \p p into queue. If the function
55 returns \p false it means that the buffer is full and RCU synchronization cycle must be processed.
56 - <tt>bool pop( retired_ptr& p ) </tt> - pops queue's head item into \p p parameter; if the queue is empty
57 this function must return \p false
58 - <tt>size_t size()</tt> - returns queue's item count.
60 The buffer is considered as full if \p push() returns \p false or the buffer size reaches the RCU threshold.
62 There is a wrapper \ref cds_urcu_general_buffered_gc "gc<general_buffered>" for \p %general_buffered class
63 that provides unified RCU interface. You should use this wrapper class instead \p %general_buffered
66 - \p Buffer - buffer type. Default is \p cds::container::VyukovMPMCCycleQueue
67 - \p Lock - mutex type, default is \p std::mutex
68 - \p Backoff - back-off schema, default is cds::backoff::Default
71 class Buffer = cds::container::VyukovMPMCCycleQueue< epoch_retired_ptr >
72 ,class Lock = std::mutex
73 ,class Backoff = cds::backoff::Default
75 class general_buffered: public details::gp_singleton< general_buffered_tag >
78 typedef details::gp_singleton< general_buffered_tag > base_class;
81 typedef general_buffered_tag rcu_tag ; ///< RCU tag
82 typedef Buffer buffer_type ; ///< Buffer type
83 typedef Lock lock_type ; ///< Lock type
84 typedef Backoff back_off ; ///< Back-off type
86 typedef base_class::thread_gc thread_gc ; ///< Thread-side RCU part
87 typedef typename thread_gc::scoped_lock scoped_lock ; ///< Access lock class
89 static bool const c_bBuffered = true ; ///< This RCU buffers disposed elements
93 typedef details::gp_singleton_instance< rcu_tag > singleton_ptr;
99 atomics::atomic<uint64_t> m_nCurEpoch;
101 size_t const m_nCapacity;
105 /// Returns singleton instance
106 static general_buffered * instance()
108 return static_cast<general_buffered *>( base_class::instance() );
110 /// Checks if the singleton is created and ready to use
113 return singleton_ptr::s_pRCU != nullptr;
118 general_buffered( size_t nBufferCapacity )
119 : m_Buffer( nBufferCapacity )
121 , m_nCapacity( nBufferCapacity )
126 clear_buffer( std::numeric_limits< uint64_t >::max());
132 base_class::flip_and_wait( bkoff );
135 void clear_buffer( uint64_t nEpoch )
138 while ( m_Buffer.pop( p )) {
139 if ( p.m_nEpoch <= nEpoch ) {
143 push_buffer( std::move(p) );
149 // Return: true - synchronize has been called, false - otherwise
150 bool push_buffer( epoch_retired_ptr&& ep )
152 bool bPushed = m_Buffer.push( ep );
153 if ( !bPushed || m_Buffer.size() >= capacity() ) {
165 /// Creates singleton object
167 The \p nBufferCapacity parameter defines RCU threshold.
169 static void Construct( size_t nBufferCapacity = 256 )
171 if ( !singleton_ptr::s_pRCU )
172 singleton_ptr::s_pRCU = new general_buffered( nBufferCapacity );
175 /// Destroys singleton object
176 static void Destruct( bool bDetachAll = false )
179 instance()->clear_buffer( std::numeric_limits< uint64_t >::max());
181 instance()->m_ThreadList.detach_all();
183 singleton_ptr::s_pRCU = nullptr;
188 /// Retire \p p pointer
190 The method pushes \p p pointer to internal buffer.
191 When the buffer becomes full \ref synchronize function is called
192 to wait for the end of grace period and then to free all pointers from the buffer.
194 virtual void retire_ptr( retired_ptr& p )
197 push_buffer( epoch_retired_ptr( p, m_nCurEpoch.load( atomics::memory_order_relaxed )));
200 /// Retires the pointer chain [\p itFirst, \p itLast)
201 template <typename ForwardIterator>
202 void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
204 uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
205 while ( itFirst != itLast ) {
206 epoch_retired_ptr ep( *itFirst, nEpoch );
208 push_buffer( std::move(ep) );
212 /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
213 template <typename Func>
214 void batch_retire( Func e )
216 uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
217 for ( retired_ptr p{ e() }; p.m_p; ) {
218 epoch_retired_ptr ep( p, nEpoch );
220 push_buffer( std::move(ep));
224 /// Wait to finish a grace period and then clear the buffer
227 epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
232 bool synchronize( epoch_retired_ptr& ep )
235 atomics::atomic_thread_fence( atomics::memory_order_acquire );
237 std::unique_lock<lock_type> sl( m_Lock );
238 if ( ep.m_p && m_Buffer.push( ep ) )
240 nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
244 clear_buffer( nEpoch );
245 atomics::atomic_thread_fence( atomics::memory_order_release );
250 /// Returns internal buffer capacity
251 size_t capacity() const
257 /// User-space general-purpose RCU with deferred (buffered) reclamation (stripped version)
259 @headerfile cds/urcu/general_buffered.h
261 This short version of \p general_buffered is intended for stripping debug info.
262 If you use \p %general_buffered with default template arguments you may use
263 this stripped version. All functionality of both classes are identical.
265 class general_buffered_stripped: public general_buffered<>
268 }} // namespace cds::urcu
270 #endif // #ifndef CDSLIB_URCU_DETAILS_GPB_H