Merge branch 'dev'
[libcds.git] / cds / urcu / details / sig_buffered.h
1 //$$CDS-header$$
2
3 #ifndef CDSLIB_URCU_DETAILS_SIG_BUFFERED_H
4 #define CDSLIB_URCU_DETAILS_SIG_BUFFERED_H
5
6 #include <cds/urcu/details/sh.h>
7 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
8
9 #include <mutex>
10 #include <cds/algo/backoff_strategy.h>
11 #include <cds/container/vyukov_mpmc_cycle_queue.h>
12
13 namespace cds { namespace urcu {
14
15     /// User-space signal-handled RCU with deferred (buffered) reclamation
16     /**
17         @headerfile cds/urcu/signal_buffered.h
18
19         This URCU implementation contains an internal buffer where retired objects are
20         accumulated. When the buffer becomes full, the RCU \p synchronize function is called
21         that waits until all reader/updater threads end up their read-side critical sections,
22         i.e. until the RCU quiescent state will come. After that the buffer and all retired objects are freed.
23         This synchronization cycle may be called in any thread that calls \p retire_ptr function.
24
25         The \p Buffer contains items of \ref cds_urcu_retired_ptr "retired_ptr" type and it should support a queue interface with
26         three function:
27         - <tt> bool push( retired_ptr& p ) </tt> - places the retired pointer \p p into queue. If the function
28             returns \p false it means that the buffer is full and RCU synchronization cycle must be processed.
29         - <tt>bool pop( retired_ptr& p ) </tt> - pops queue's head item into \p p parameter; if the queue is empty
30             this function must return \p false
31         - <tt>size_t size()</tt> - returns queue's item count.
32
33         The buffer is considered as full if \p push returns \p false or the buffer size reaches the RCU threshold.
34
35         There is a wrapper \ref cds_urcu_signal_buffered_gc "gc<signal_buffered>" for \p %signal_buffered class
36         that provides unified RCU interface. You should use this wrapper class instead \p %signal_buffered
37
38         Template arguments:
39         - \p Buffer - buffer type. Default is cds::container::VyukovMPMCCycleQueue
40         - \p Lock - mutex type, default is \p std::mutex
41         - \p Backoff - back-off schema, default is cds::backoff::Default
42     */
43     template <
44         class Buffer = cds::container::VyukovMPMCCycleQueue< epoch_retired_ptr >
45         ,class Lock = std::mutex
46         ,class Backoff = cds::backoff::Default
47     >
48     class signal_buffered: public details::sh_singleton< signal_buffered_tag >
49     {
50         //@cond
51         typedef details::sh_singleton< signal_buffered_tag > base_class;
52         //@endcond
53     public:
54         typedef signal_buffered_tag rcu_tag ;  ///< RCU tag
55         typedef Buffer  buffer_type ;   ///< Buffer type
56         typedef Lock    lock_type   ;   ///< Lock type
57         typedef Backoff back_off    ;   ///< Back-off type
58
59         typedef base_class::thread_gc thread_gc ;   ///< Thread-side RCU part
60         typedef typename thread_gc::scoped_lock scoped_lock ; ///< Access lock class
61
62         static bool const c_bBuffered = true ; ///< This RCU buffers disposed elements
63
64     protected:
65         //@cond
66         typedef details::sh_singleton_instance< rcu_tag >    singleton_ptr;
67         //@endcond
68
69     protected:
70         //@cond
71         buffer_type               m_Buffer;
72         atomics::atomic<uint64_t> m_nCurEpoch;
73         lock_type                 m_Lock;
74         size_t const              m_nCapacity;
75         //@endcond
76
77     public:
78         /// Returns singleton instance
79         static signal_buffered * instance()
80         {
81             return static_cast<signal_buffered *>( base_class::instance() );
82         }
83         /// Checks if the singleton is created and ready to use
84         static bool isUsed()
85         {
86             return singleton_ptr::s_pRCU != nullptr;
87         }
88
89     protected:
90         //@cond
91         signal_buffered( size_t nBufferCapacity, int nSignal = SIGUSR1 )
92             : base_class( nSignal )
93             , m_Buffer( nBufferCapacity )
94             , m_nCurEpoch(0)
95             , m_nCapacity( nBufferCapacity )
96         {}
97
98         ~signal_buffered()
99         {
100             clear_buffer( (uint64_t) -1 );
101         }
102
103         void clear_buffer( uint64_t nEpoch )
104         {
105             epoch_retired_ptr p;
106             while ( m_Buffer.pop( p )) {
107                 if ( p.m_nEpoch <= nEpoch ) {
108                     CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
109                     p.free();
110                     CDS_TSAN_ANNOTATE_IGNORE_RW_END;
111                 }
112                 else {
113                     push_buffer( p );
114                     break;
115                 }
116             }
117         }
118
119         bool push_buffer( epoch_retired_ptr& ep )
120         {
121             bool bPushed = m_Buffer.push( ep );
122             if ( !bPushed || m_Buffer.size() >= capacity() ) {
123                 synchronize();
124                 if ( !bPushed ) {
125                     CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
126                     ep.free();
127                     CDS_TSAN_ANNOTATE_IGNORE_RW_END;
128                 }
129                 return true;
130             }
131             return false;
132         }
133         //@endcond
134
135     public:
136         /// Creates singleton object
137         /**
138             The \p nBufferCapacity parameter defines RCU threshold.
139
140             The \p nSignal parameter defines a signal number stated for RCU, default is \p SIGUSR1
141         */
142         static void Construct( size_t nBufferCapacity = 256, int nSignal = SIGUSR1 )
143         {
144             if ( !singleton_ptr::s_pRCU )
145                 singleton_ptr::s_pRCU = new signal_buffered( nBufferCapacity, nSignal );
146         }
147
148         /// Destroys singleton object
149         static void Destruct( bool bDetachAll = false )
150         {
151             if ( isUsed() ) {
152                 instance()->clear_buffer( (uint64_t) -1 );
153                 if ( bDetachAll )
154                     instance()->m_ThreadList.detach_all();
155                 delete instance();
156                 singleton_ptr::s_pRCU = nullptr;
157             }
158         }
159
160     public:
161         /// Retire \p p pointer
162         /**
163             The method pushes \p p pointer to internal buffer.
164             When the buffer becomes full \ref synchronize function is called
165             to wait for the end of grace period and then to free all pointers from the buffer.
166         */
167         virtual void retire_ptr( retired_ptr& p )
168         {
169             if ( p.m_p ) {
170                 epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed ));
171                 push_buffer( ep );
172             }
173         }
174
175         /// Retires the pointer chain [\p itFirst, \p itLast)
176         template <typename ForwardIterator>
177         void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
178         {
179             uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
180             while ( itFirst != itLast ) {
181                 epoch_retired_ptr ep( *itFirst, nEpoch );
182                 ++itFirst;
183                 push_buffer( ep );
184             }
185         }
186
187         /// Wait to finish a grace period and then clear the buffer
188         void synchronize()
189         {
190             epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
191             synchronize( ep );
192         }
193
194         //@cond
195         bool synchronize( epoch_retired_ptr& ep )
196         {
197             uint64_t nEpoch;
198             atomics::atomic_thread_fence( atomics::memory_order_acquire );
199             {
200                 std::unique_lock<lock_type> sl( m_Lock );
201                 if ( ep.m_p && m_Buffer.push( ep ) && m_Buffer.size() < capacity())
202                     return false;
203                 nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
204
205                 back_off bkOff;
206                 base_class::force_membar_all_threads( bkOff );
207                 base_class::switch_next_epoch();
208                 bkOff.reset();
209                 base_class::wait_for_quiescent_state( bkOff );
210                 base_class::switch_next_epoch();
211                 bkOff.reset();
212                 base_class::wait_for_quiescent_state( bkOff );
213                 base_class::force_membar_all_threads( bkOff );
214             }
215
216             clear_buffer( nEpoch );
217             return true;
218         }
219         //@endcond
220
221         /// Returns the threshold of internal buffer
222         size_t capacity() const
223         {
224             return m_nCapacity;
225         }
226
227         /// Returns the signal number stated for RCU
228         int signal_no() const
229         {
230             return base_class::signal_no();
231         }
232     };
233
234 }} // namespace cds::urcu
235
236 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
237 #endif // #ifndef CDSLIB_URCU_DETAILS_SIG_BUFFERED_H