3 #ifndef CDSLIB_URCU_DETAILS_SIG_BUFFERED_H
4 #define CDSLIB_URCU_DETAILS_SIG_BUFFERED_H
6 #include <cds/urcu/details/sh.h>
7 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
10 #include <cds/algo/backoff_strategy.h>
11 #include <cds/container/vyukov_mpmc_cycle_queue.h>
13 namespace cds { namespace urcu {
15 /// User-space signal-handled RCU with deferred (buffered) reclamation
17 @headerfile cds/urcu/signal_buffered.h
19 This URCU implementation contains an internal buffer where retired objects are
20 accumulated. When the buffer becomes full, the RCU \p synchronize function is called
21 that waits until all reader/updater threads end up their read-side critical sections,
22 i.e. until the RCU quiescent state will come. After that the buffer and all retired objects are freed.
23 This synchronization cycle may be called in any thread that calls \p retire_ptr function.
25 The \p Buffer contains items of \ref cds_urcu_retired_ptr "retired_ptr" type and it should support a queue interface with
27 - <tt> bool push( retired_ptr& p ) </tt> - places the retired pointer \p p into queue. If the function
28 returns \p false it means that the buffer is full and RCU synchronization cycle must be processed.
29 - <tt>bool pop( retired_ptr& p ) </tt> - pops queue's head item into \p p parameter; if the queue is empty
30 this function must return \p false
31 - <tt>size_t size()</tt> - returns queue's item count.
33 The buffer is considered as full if \p push returns \p false or the buffer size reaches the RCU threshold.
35 There is a wrapper \ref cds_urcu_signal_buffered_gc "gc<signal_buffered>" for \p %signal_buffered class
36 that provides unified RCU interface. You should use this wrapper class instead \p %signal_buffered
39 - \p Buffer - buffer type. Default is cds::container::VyukovMPMCCycleQueue
40 - \p Lock - mutex type, default is \p std::mutex
41 - \p Backoff - back-off schema, default is cds::backoff::Default
44 class Buffer = cds::container::VyukovMPMCCycleQueue< epoch_retired_ptr >
45 ,class Lock = std::mutex
46 ,class Backoff = cds::backoff::Default
48 class signal_buffered: public details::sh_singleton< signal_buffered_tag >
51 typedef details::sh_singleton< signal_buffered_tag > base_class;
54 typedef signal_buffered_tag rcu_tag ; ///< RCU tag
55 typedef Buffer buffer_type ; ///< Buffer type
56 typedef Lock lock_type ; ///< Lock type
57 typedef Backoff back_off ; ///< Back-off type
59 typedef base_class::thread_gc thread_gc ; ///< Thread-side RCU part
60 typedef typename thread_gc::scoped_lock scoped_lock ; ///< Access lock class
62 static bool const c_bBuffered = true ; ///< This RCU buffers disposed elements
66 typedef details::sh_singleton_instance< rcu_tag > singleton_ptr;
72 atomics::atomic<uint64_t> m_nCurEpoch;
74 size_t const m_nCapacity;
78 /// Returns singleton instance
79 static signal_buffered * instance()
81 return static_cast<signal_buffered *>( base_class::instance() );
83 /// Checks if the singleton is created and ready to use
86 return singleton_ptr::s_pRCU != nullptr;
91 signal_buffered( size_t nBufferCapacity, int nSignal = SIGUSR1 )
92 : base_class( nSignal )
93 , m_Buffer( nBufferCapacity )
95 , m_nCapacity( nBufferCapacity )
100 clear_buffer( (uint64_t) -1 );
103 void clear_buffer( uint64_t nEpoch )
106 while ( m_Buffer.pop( p )) {
107 if ( p.m_nEpoch <= nEpoch ) {
108 CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
110 CDS_TSAN_ANNOTATE_IGNORE_RW_END;
119 bool push_buffer( epoch_retired_ptr& ep )
121 bool bPushed = m_Buffer.push( ep );
122 if ( !bPushed || m_Buffer.size() >= capacity() ) {
125 CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
127 CDS_TSAN_ANNOTATE_IGNORE_RW_END;
136 /// Creates singleton object
138 The \p nBufferCapacity parameter defines RCU threshold.
140 The \p nSignal parameter defines a signal number stated for RCU, default is \p SIGUSR1
142 static void Construct( size_t nBufferCapacity = 256, int nSignal = SIGUSR1 )
144 if ( !singleton_ptr::s_pRCU )
145 singleton_ptr::s_pRCU = new signal_buffered( nBufferCapacity, nSignal );
148 /// Destroys singleton object
149 static void Destruct( bool bDetachAll = false )
152 instance()->clear_buffer( (uint64_t) -1 );
154 instance()->m_ThreadList.detach_all();
156 singleton_ptr::s_pRCU = nullptr;
161 /// Retire \p p pointer
163 The method pushes \p p pointer to internal buffer.
164 When the buffer becomes full \ref synchronize function is called
165 to wait for the end of grace period and then to free all pointers from the buffer.
167 virtual void retire_ptr( retired_ptr& p )
170 epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed ));
175 /// Retires the pointer chain [\p itFirst, \p itLast)
176 template <typename ForwardIterator>
177 void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
179 uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
180 while ( itFirst != itLast ) {
181 epoch_retired_ptr ep( *itFirst, nEpoch );
187 /// Wait to finish a grace period and then clear the buffer
190 epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
195 bool synchronize( epoch_retired_ptr& ep )
198 atomics::atomic_thread_fence( atomics::memory_order_acquire );
200 std::unique_lock<lock_type> sl( m_Lock );
201 if ( ep.m_p && m_Buffer.push( ep ) && m_Buffer.size() < capacity())
203 nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
206 base_class::force_membar_all_threads( bkOff );
207 base_class::switch_next_epoch();
209 base_class::wait_for_quiescent_state( bkOff );
210 base_class::switch_next_epoch();
212 base_class::wait_for_quiescent_state( bkOff );
213 base_class::force_membar_all_threads( bkOff );
216 clear_buffer( nEpoch );
221 /// Returns the threshold of internal buffer
222 size_t capacity() const
227 /// Returns the signal number stated for RCU
228 int signal_no() const
230 return base_class::signal_no();
234 }} // namespace cds::urcu
236 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
237 #endif // #ifndef CDSLIB_URCU_DETAILS_SIG_BUFFERED_H