Move cds/backoff_strategy.h to cds/algo/backoff_strategy.h
[libcds.git] / cds / urcu / details / gpb.h
1 //$$CDS-header$$
2
3 #ifndef _CDS_URCU_DETAILS_GPB_H
4 #define _CDS_URCU_DETAILS_GPB_H
5
6 #include <cds/urcu/details/gp.h>
7 #include <cds/algo/backoff_strategy.h>
8 #include <cds/container/vyukov_mpmc_cycle_queue.h>
9
10 #include <cds/details/std/mutex.h>
11
12 namespace cds { namespace urcu {
13
14     /// User-space general-purpose RCU with deferred (buffered) reclamation
15     /**
16         @headerfile cds/urcu/general_buffered.h
17
18         This URCU implementation contains an internal buffer where retired objects are
19         accumulated. When the buffer becomes full, the RCU \p synchronize function is called
20         that waits until all reader/updater threads end up their read-side critical sections,
21         i.e. until the RCU quiescent state will come. After that the buffer and all retired objects are freed.
22         This synchronization cycle may be called in any thread that calls \p retire_ptr function.
23
24         The \p Buffer contains items of \ref cds_urcu_retired_ptr "retired_ptr" type and it should support a queue interface with
25         three function:
26         - <tt> bool push( retired_ptr& p ) </tt> - places the retired pointer \p p into queue. If the function
27             returns \p false it means that the buffer is full and RCU synchronization cycle must be processed.
28         - <tt>bool pop( retired_ptr& p ) </tt> - pops queue's head item into \p p parameter; if the queue is empty
29             this function must return \p false
30         - <tt>size_t size()</tt> - returns queue's item count.
31
32         The buffer is considered as full if \p push returns \p false or the buffer size reaches the RCU threshold.
33
34         There is a wrapper \ref cds_urcu_general_buffered_gc "gc<general_buffered>" for \p %general_buffered class
35         that provides unified RCU interface. You should use this wrapper class instead \p %general_buffered
36
37         Template arguments:
38         - \p Buffer - buffer type. Default is cds::container::VyukovMPMCCycleQueue
39         - \p Lock - mutex type, default is \p std::mutex
40         - \p Backoff - back-off schema, default is cds::backoff::Default
41     */
42     template <
43         class Buffer = cds::container::VyukovMPMCCycleQueue<
44             epoch_retired_ptr
45             ,cds::opt::buffer< cds::opt::v::dynamic_buffer< epoch_retired_ptr > >
46         >
47         ,class Lock = cds_std::mutex
48         ,class Backoff = cds::backoff::Default
49     >
50     class general_buffered: public details::gp_singleton< general_buffered_tag >
51     {
52         //@cond
53         typedef details::gp_singleton< general_buffered_tag > base_class;
54         //@endcond
55     public:
56         typedef general_buffered_tag rcu_tag ;  ///< RCU tag
57         typedef Buffer  buffer_type ;   ///< Buffer type
58         typedef Lock    lock_type   ;   ///< Lock type
59         typedef Backoff back_off    ;   ///< Back-off type
60
61         typedef base_class::thread_gc thread_gc ;   ///< Thread-side RCU part
62         typedef typename thread_gc::scoped_lock scoped_lock ; ///< Access lock class
63
64         static bool const c_bBuffered = true ; ///< This RCU buffers disposed elements
65
66     protected:
67         //@cond
68         typedef details::gp_singleton_instance< rcu_tag >    singleton_ptr;
69         //@endcond
70
71     protected:
72         //@cond
73         buffer_type                     m_Buffer;
74         CDS_ATOMIC::atomic<uint64_t>    m_nCurEpoch;
75         lock_type                       m_Lock;
76         size_t const                    m_nCapacity;
77         //@endcond
78
79     public:
80         /// Returns singleton instance
81         static general_buffered * instance()
82         {
83             return static_cast<general_buffered *>( base_class::instance() );
84         }
85         /// Checks if the singleton is created and ready to use
86         static bool isUsed()
87         {
88             return singleton_ptr::s_pRCU != nullptr;
89         }
90
91     protected:
92         //@cond
93         general_buffered( size_t nBufferCapacity )
94             : m_Buffer( nBufferCapacity )
95             , m_nCurEpoch(0)
96             , m_nCapacity( nBufferCapacity )
97         {}
98
99         ~general_buffered()
100         {
101             clear_buffer( (uint64_t) -1 );
102         }
103
104         void flip_and_wait()
105         {
106             back_off bkoff;
107             base_class::flip_and_wait( bkoff );
108         }
109
110         void clear_buffer( uint64_t nEpoch )
111         {
112             epoch_retired_ptr p;
113             while ( m_Buffer.pop( p )) {
114                 if ( p.m_nEpoch <= nEpoch )
115                     p.free();
116                 else {
117                     push_buffer( p );
118                     break;
119                 }
120             }
121         }
122
123         // Return: true - synchronize has been called, false - otherwise
124         bool push_buffer( epoch_retired_ptr& ep )
125         {
126             bool bPushed = m_Buffer.push( ep );
127             if ( !bPushed || m_Buffer.size() >= capacity() ) {
128                 synchronize();
129                 if ( !bPushed )
130                     ep.free();
131                 return true;
132             }
133             return false;
134         }
135         //@endcond
136
137     public:
138         /// Creates singleton object
139         /**
140             The \p nBufferCapacity parameter defines RCU threshold.
141         */
142         static void Construct( size_t nBufferCapacity = 256 )
143         {
144             if ( !singleton_ptr::s_pRCU )
145                 singleton_ptr::s_pRCU = new general_buffered( nBufferCapacity );
146         }
147
148         /// Destroys singleton object
149         static void Destruct( bool bDetachAll = false )
150         {
151             if ( isUsed() ) {
152                 instance()->clear_buffer( (uint64_t) -1 );
153                 if ( bDetachAll )
154                     instance()->m_ThreadList.detach_all();
155                 delete instance();
156                 singleton_ptr::s_pRCU = nullptr;
157             }
158         }
159
160     public:
161         /// Retire \p p pointer
162         /**
163             The method pushes \p p pointer to internal buffer.
164             When the buffer becomes full \ref synchronize function is called
165             to wait for the end of grace period and then to free all pointers from the buffer.
166         */
167         virtual void retire_ptr( retired_ptr& p )
168         {
169             if ( p.m_p ) {
170                 epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
171                 push_buffer( ep );
172             }
173         }
174
175         /// Retires the pointer chain [\p itFirst, \p itLast)
176         template <typename ForwardIterator>
177         void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
178         {
179             uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
180             while ( itFirst != itLast ) {
181                 epoch_retired_ptr ep( *itFirst, nEpoch );
182                 ++itFirst;
183                 push_buffer( ep );
184             }
185         }
186
187         /// Wait to finish a grace period and then clear the buffer
188         void synchronize()
189         {
190             epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
191             synchronize( ep );
192         }
193
194         //@cond
195         bool synchronize( epoch_retired_ptr& ep )
196         {
197             uint64_t nEpoch;
198             CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
199             {
200                 cds::lock::scoped_lock<lock_type> sl( m_Lock );
201                 if ( ep.m_p && m_Buffer.push( ep ) )
202                     return false;
203                 nEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
204                 flip_and_wait();
205                 flip_and_wait();
206             }
207             clear_buffer( nEpoch );
208             CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
209             return true;
210         }
211         //@endcond
212
213         /// Returns internal buffer capacity
214         size_t capacity() const
215         {
216             return m_nCapacity;
217         }
218     };
219
220 }} // namespace cds::urcu
221
222 #endif // #ifndef _CDS_URCU_DETAILS_GPB_H