3 #ifndef CDSLIB_URCU_DETAILS_SIG_BUFFERED_H
4 #define CDSLIB_URCU_DETAILS_SIG_BUFFERED_H
6 #include <cds/urcu/details/sh.h>
7 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
11 #include <cds/algo/backoff_strategy.h>
12 #include <cds/container/vyukov_mpmc_cycle_queue.h>
14 namespace cds { namespace urcu {
16 /// User-space signal-handled RCU with deferred (buffered) reclamation
18 @headerfile cds/urcu/signal_buffered.h
20 This URCU implementation contains an internal buffer where retired objects are
21 accumulated. When the buffer becomes full, the RCU \p synchronize function is called
22 that waits until all reader/updater threads end up their read-side critical sections,
23 i.e. until the RCU quiescent state will come. After that the buffer and all retired objects are freed.
24 This synchronization cycle may be called in any thread that calls \p retire_ptr function.
26 The \p Buffer contains items of \ref cds_urcu_retired_ptr "retired_ptr" type and it should support a queue interface with
28 - <tt> bool push( retired_ptr& p ) </tt> - places the retired pointer \p p into queue. If the function
29 returns \p false it means that the buffer is full and RCU synchronization cycle must be processed.
30 - <tt>bool pop( retired_ptr& p ) </tt> - pops queue's head item into \p p parameter; if the queue is empty
31 this function must return \p false
32 - <tt>size_t size()</tt> - returns queue's item count.
34 The buffer is considered as full if \p push returns \p false or the buffer size reaches the RCU threshold.
36 There is a wrapper \ref cds_urcu_signal_buffered_gc "gc<signal_buffered>" for \p %signal_buffered class
37 that provides unified RCU interface. You should use this wrapper class instead \p %signal_buffered
40 - \p Buffer - buffer type. Default is cds::container::VyukovMPMCCycleQueue
41 - \p Lock - mutex type, default is \p std::mutex
42 - \p Backoff - back-off schema, default is cds::backoff::Default
45 class Buffer = cds::container::VyukovMPMCCycleQueue< epoch_retired_ptr >
46 ,class Lock = std::mutex
47 ,class Backoff = cds::backoff::Default
49 class signal_buffered: public details::sh_singleton< signal_buffered_tag >
52 typedef details::sh_singleton< signal_buffered_tag > base_class;
55 typedef signal_buffered_tag rcu_tag ; ///< RCU tag
56 typedef Buffer buffer_type ; ///< Buffer type
57 typedef Lock lock_type ; ///< Lock type
58 typedef Backoff back_off ; ///< Back-off type
60 typedef base_class::thread_gc thread_gc ; ///< Thread-side RCU part
61 typedef typename thread_gc::scoped_lock scoped_lock ; ///< Access lock class
63 static bool const c_bBuffered = true ; ///< This RCU buffers disposed elements
67 typedef details::sh_singleton_instance< rcu_tag > singleton_ptr;
73 atomics::atomic<uint64_t> m_nCurEpoch;
75 size_t const m_nCapacity;
79 /// Returns singleton instance
80 static signal_buffered * instance()
82 return static_cast<signal_buffered *>( base_class::instance() );
84 /// Checks if the singleton is created and ready to use
87 return singleton_ptr::s_pRCU != nullptr;
92 signal_buffered( size_t nBufferCapacity, int nSignal = SIGUSR1 )
93 : base_class( nSignal )
94 , m_Buffer( nBufferCapacity )
96 , m_nCapacity( nBufferCapacity )
101 clear_buffer( std::numeric_limits< uint64_t >::max() );
104 void clear_buffer( uint64_t nEpoch )
107 while ( m_Buffer.pop( p )) {
108 if ( p.m_nEpoch <= nEpoch ) {
112 push_buffer( std::move(p) );
118 bool push_buffer( epoch_retired_ptr&& ep )
120 bool bPushed = m_Buffer.push( ep );
121 if ( !bPushed || m_Buffer.size() >= capacity() ) {
133 /// Creates singleton object
135 The \p nBufferCapacity parameter defines RCU threshold.
137 The \p nSignal parameter defines a signal number stated for RCU, default is \p SIGUSR1
139 static void Construct( size_t nBufferCapacity = 256, int nSignal = SIGUSR1 )
141 if ( !singleton_ptr::s_pRCU )
142 singleton_ptr::s_pRCU = new signal_buffered( nBufferCapacity, nSignal );
145 /// Destroys singleton object
146 static void Destruct( bool bDetachAll = false )
149 instance()->clear_buffer( std::numeric_limits< uint64_t >::max());
151 instance()->m_ThreadList.detach_all();
153 singleton_ptr::s_pRCU = nullptr;
158 /// Retire \p p pointer
160 The method pushes \p p pointer to internal buffer.
161 When the buffer becomes full \ref synchronize function is called
162 to wait for the end of grace period and then to free all pointers from the buffer.
164 virtual void retire_ptr( retired_ptr& p )
167 push_buffer( epoch_retired_ptr( p, m_nCurEpoch.load( atomics::memory_order_relaxed )));
170 /// Retires the pointer chain [\p itFirst, \p itLast)
171 template <typename ForwardIterator>
172 void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
174 uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
175 while ( itFirst != itLast ) {
176 epoch_retired_ptr ep( *itFirst, nEpoch );
178 push_buffer( std::move(ep));
182 /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
183 template <typename Func>
184 void batch_retire( Func e )
186 uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
187 for ( retired_ptr p{ e() }; p.m_p; ) {
188 epoch_retired_ptr ep( p, nEpoch );
190 push_buffer( std::move(ep));
194 /// Wait to finish a grace period and then clear the buffer
197 epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
202 bool synchronize( epoch_retired_ptr& ep )
205 atomics::atomic_thread_fence( atomics::memory_order_acquire );
207 std::unique_lock<lock_type> sl( m_Lock );
208 if ( ep.m_p && m_Buffer.push( ep ) && m_Buffer.size() < capacity())
210 nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
213 base_class::force_membar_all_threads( bkOff );
214 base_class::switch_next_epoch();
216 base_class::wait_for_quiescent_state( bkOff );
217 base_class::switch_next_epoch();
219 base_class::wait_for_quiescent_state( bkOff );
220 base_class::force_membar_all_threads( bkOff );
223 clear_buffer( nEpoch );
228 /// Returns the threshold of internal buffer
229 size_t capacity() const
234 /// Returns the signal number stated for RCU
235 int signal_no() const
237 return base_class::signal_no();
241 }} // namespace cds::urcu
243 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
244 #endif // #ifndef CDSLIB_URCU_DETAILS_SIG_BUFFERED_H