Adds writer test case for RCU
[folly.git] / folly / stress-test / stress-parallel-folly-sync.cpp
1 #include "sync_test.h"
2
3 namespace folly_test {
4
5 class FollySyncTest_Parallel: public cds_test::stress_fixture {
6 protected:
7   static size_t s_nThreadCount;
8   // Simulate as the data protected by the lock.
9   static size_t locked_data;
10   static std::atomic<RcuData*> rcu_data;
11   // For RCU, we want to benchmark two things:
12   // (1) Readers --- we have a writer thread that runs nonstop until all other
13   // reader threads are done with a certain number of reads.
14   // (2) Writers --- we have several reader threads that run nonstop until a
15   // writer thread finishes a certain number of writes.
16   static std::atomic_uint rcu_readers_num;
17   static std::atomic_uint rcu_writers_num;
18   // MicroLock
19   static size_t s_nMicroLockPassCount;
20   // MicroSpinLock
21   static size_t s_nMicroSpinLockPassCount;
22   // PicoSpinLock
23   static size_t s_nPicoSpinLockPassCount;
24   // SharedMutex
25   static size_t s_nSharedMutexPassCount;
26   // RWSpinLock
27   static size_t s_nRWSpinLockPassCount;
28   // RWTicketSpinLock
29   static size_t s_nRWTicketSpinLockPassCount;
30   // RCU
31   static size_t s_nRcuReaderPassCount;
32   static size_t s_nRcuWriterPassCount;
33   static size_t s_nRcuWriterFrequency;
34
35   static unsigned s_nSharedMutexWritePercentage;
36   static unsigned s_nRWSpinLockWritePercentage;
37   static unsigned s_nRWTicketSpinLockWritePercentage;
38
39   static void SetUpTestCase() {
40     const cds_test::config& cfg = get_config("ParallelFollySync");
41     GetConfigNonZeroExpected(ThreadCount, 4);
42     GetConfigNonZeroExpected(MicroLockPassCount, 2000000000);
43     GetConfigNonZeroExpected(MicroSpinLockPassCount, 1500000000);
44     GetConfigNonZeroExpected(PicoSpinLockPassCount, 2700000000);
45     GetConfigNonZeroExpected(SharedMutexPassCount, 5000000);
46     GetConfigNonZeroExpected(RWSpinLockPassCount, 5000000);
47     GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
48     GetConfigNonZeroExpected(RcuReaderPassCount, 10000);
49     // Every 100 ms by default there will be a writer.
50     GetConfigNonZeroExpected(RcuWriterFrequency, 100);
51     GetConfigNonZeroExpected(RcuWriterPassCount, 500);
52
53     GetConfigNonZeroExpected(SharedMutexWritePercentage, 5);
54     GetConfigNonZeroExpected(RWSpinLockWritePercentage, 5);
55     GetConfigNonZeroExpected(RWTicketSpinLockWritePercentage, 5);
56
57     rcu_data.store(new RcuData(), std::memory_order_relaxed);
58   }
59
60   static void rcu_write_sync() {
61     auto *old_data = rcu_data.load(std::memory_order_consume);
62     auto *new_data = new RcuData(*old_data);
63     new_data->d1++;
64     new_data->d2++;
65     rcu_data.store(new_data, std::memory_order_release);
66     folly::synchronize_rcu();
67     delete old_data;
68   }
69
70   static void rcu_write_retire() {
71     auto *old_data = rcu_data.load(std::memory_order_consume);
72     auto *new_data = new RcuData(*old_data);
73     new_data->d1++;
74     new_data->d2++;
75     rcu_data.store(new_data, std::memory_order_release);
76     folly::rcu_retire(old_data);
77   }
78
79   static void run_rcu_writer_sync_nonstop(size_t pass_count) {
80     while (rcu_readers_num.load(std::memory_order_acquire) > 0) {
81       rcu_write_sync();
82       std::this_thread::sleep_for(
83           std::chrono::milliseconds(s_nRcuWriterFrequency));
84     }
85   }
86
87   static void run_rcu_writer_retire_nonstop(size_t pass_count) {
88     while (rcu_readers_num.load(std::memory_order_acquire) > 0) {
89       rcu_write_retire();
90       std::this_thread::sleep_for(
91           std::chrono::milliseconds(s_nRcuWriterFrequency));
92     }
93   }
94
95   static void run_rcu_reader_pass_count(size_t pass_count) {
96     size_t sum = 0;
97     for (size_t count = 0; count < pass_count; count++) {
98       folly::rcu_reader g;
99       auto *data = rcu_data.load(std::memory_order_consume);
100       sum += (data->d1 + data->d2);
101     }
102     rcu_readers_num.fetch_sub(1, std::memory_order_release);
103     // Just want to simulate the reading.
104     EXPECT_GT(sum, 0);
105   }
106
107   static void run_rcu_writer_sync_pass_count(size_t pass_count) {
108     for (size_t count = 0; count < pass_count; count++) {
109       rcu_write_sync();
110     }
111     rcu_writers_num.fetch_sub(1, std::memory_order_release);
112   }
113
114   static void run_rcu_writer_retire_pass_count(size_t pass_count) {
115     for (size_t count = 0; count < pass_count; count++) {
116       rcu_write_retire();
117     }
118     rcu_writers_num.fetch_sub(1, std::memory_order_release);
119   }
120
121   static void run_rcu_reader_nonstop(size_t pass_count) {
122     size_t sum = 0;
123     while (rcu_writers_num.load(std::memory_order_acquire) > 0) {
124       folly::rcu_reader g;
125       auto *data = rcu_data.load(std::memory_order_consume);
126       sum += (data->d1 + data->d2);
127     }
128     EXPECT_GT(sum, 0);
129   }
130
131   template <typename Lock>
132   static void run_rw_lock(Lock *l, size_t pass_count,
133                           unsigned write_percentage) {
134     size_t sum = 0;
135     for (size_t count = 0; count < pass_count; count++) {
136       if (rand(100) < write_percentage) {
137         l->lock();
138         locked_data++;
139         l->unlock();
140       } else {
141         l->lock_shared();
142         sum = locked_data;
143         l->unlock_shared();
144       }
145     }
146     EXPECT_GE(sum, pass_count * write_percentage / 100);
147   }
148
149   template <typename Lock>
150   static void run_small_lock(Lock* l, size_t pass_count) {
151     for (size_t count = 0; count < pass_count; count++) {
152       l->lock();
153       locked_data++;
154       l->unlock();
155     }
156   }
157
158   template <typename... Args>
159   static void FollySyncThreading(Args... args) {
160     std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
161     for (size_t i = 0; i < s_nThreadCount; i++) {
162       threads[i] = std::thread(args...);
163     }
164     for (size_t i = 0; i < s_nThreadCount; i++) {
165       threads[i].join();
166     }
167   }
168
169   template <typename WriterFunc, typename ReaderFunc>
170   static void FollyRcuThreading(WriterFunc writer_func,
171                                 ReaderFunc reader_func) {
172     rcu_readers_num.store(s_nThreadCount - 1, std::memory_order_release);
173     rcu_writers_num.store(1, std::memory_order_release);
174
175     std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
176     // One of the threads is a writer.
177     threads[0] = std::thread(writer_func, s_nRcuWriterPassCount);
178     for (size_t i = 1; i < s_nThreadCount; i++) {
179       threads[i] = std::thread(reader_func, s_nRcuReaderPassCount);
180     }
181     for (size_t i = 0; i < s_nThreadCount; i++) {
182       threads[i].join();
183     }
184   }
185
186   template <typename SmallLockType>
187   static void FollySmallLockThreading(size_t pass_count) {
188     std::unique_ptr<SmallLockType> l(new SmallLockType());
189     l->init();
190     locked_data = 0;
191     FollySyncThreading(run_small_lock<SmallLockType>, l.get(), pass_count);
192     EXPECT_EQ(locked_data, pass_count * s_nThreadCount);
193   }
194
195   template <typename RWLockType>
196   static void FollyRWLockThreading(size_t pass_count, unsigned write_percentage) {
197     std::unique_ptr<RWLockType> l(new RWLockType());
198     locked_data = 0;
199     FollySyncThreading(run_rw_lock<RWLockType>, l.get(), pass_count,
200                        write_percentage);
201   }
202 };
203
204 size_t FollySyncTest_Parallel::locked_data;
205 std::atomic<RcuData*> FollySyncTest_Parallel::rcu_data;
206 std::atomic_uint FollySyncTest_Parallel::rcu_readers_num;
207 std::atomic_uint FollySyncTest_Parallel::rcu_writers_num;
208 size_t FollySyncTest_Parallel::s_nThreadCount;
209 size_t FollySyncTest_Parallel::s_nMicroLockPassCount;
210 size_t FollySyncTest_Parallel::s_nMicroSpinLockPassCount;
211 size_t FollySyncTest_Parallel::s_nPicoSpinLockPassCount;
212 size_t FollySyncTest_Parallel::s_nSharedMutexPassCount;
213 size_t FollySyncTest_Parallel::s_nRWSpinLockPassCount;
214 size_t FollySyncTest_Parallel::s_nRWTicketSpinLockPassCount;
215
216 size_t FollySyncTest_Parallel::s_nRcuReaderPassCount;
217 size_t FollySyncTest_Parallel::s_nRcuWriterPassCount;
218 size_t FollySyncTest_Parallel::s_nRcuWriterFrequency;
219
220 unsigned FollySyncTest_Parallel::s_nSharedMutexWritePercentage;
221 unsigned FollySyncTest_Parallel::s_nRWSpinLockWritePercentage;
222 unsigned FollySyncTest_Parallel::s_nRWTicketSpinLockWritePercentage;
223
224 TEST_F(FollySyncTest_Parallel, FollyRCU_NonstopWriterSync) {
225   FollyRcuThreading(run_rcu_writer_sync_nonstop, run_rcu_reader_pass_count);
226 }
227
228 TEST_F(FollySyncTest_Parallel, FollyRCU_NonstopWriterNoSync) {
229   FollyRcuThreading(run_rcu_writer_retire_nonstop, run_rcu_reader_pass_count);
230 }
231
232 //TEST_F(FollySyncTest_Parallel, FollyRCU_NonstopReaderSync) {
233 //  FollyRcuThreading(run_rcu_writer_sync_pass_count, run_rcu_reader_nonstop);
234 //}
235
236 TEST_F(FollySyncTest_Parallel, FollyRCU_NonstopReaderNoSync) {
237   FollyRcuThreading(run_rcu_writer_retire_pass_count, run_rcu_reader_nonstop);
238 }
239
240 TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_32) {
241   FollyRWLockThreading<RWTicketSpinLock32>(s_nRWTicketSpinLockPassCount,
242                                            s_nRWTicketSpinLockWritePercentage);
243 }
244
245 TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_64) {
246   FollyRWLockThreading<RWTicketSpinLock64>(s_nRWTicketSpinLockPassCount,
247                                            s_nRWTicketSpinLockWritePercentage);
248 }
249
250 TEST_F(FollySyncTest_Parallel, FollyRWSpinLock) {
251   FollyRWLockThreading<RWSpinLock>(s_nRWSpinLockPassCount,
252                                    s_nRWSpinLockWritePercentage);
253 }
254
255 TEST_F(FollySyncTest_Parallel, FollySharedMutex_ReadPriority) {
256   FollyRWLockThreading<SharedMutexReadPriority>(s_nSharedMutexPassCount,
257                                                 s_nSharedMutexWritePercentage);
258 }
259
260 TEST_F(FollySyncTest_Parallel, FollySharedMutex_WritePriority) {
261   FollyRWLockThreading<SharedMutexWritePriority>(s_nSharedMutexPassCount,
262                                                  s_nSharedMutexWritePercentage);
263 }
264
265 TEST_F(FollySyncTest_Parallel, FollyMicroSpinLock) {
266   FollySmallLockThreading<MicroSpinLock>(s_nMicroSpinLockPassCount);
267 }
268
269 TEST_F(FollySyncTest_Parallel, FollyPicoSpinLock) {
270   FollySmallLockThreading<PicoSpinLock>(s_nPicoSpinLockPassCount);
271 }
272
273 TEST_F(FollySyncTest_Parallel, FollyMicroLock) {
274   FollySmallLockThreading<MicroLock>(s_nMicroLockPassCount);
275 }
276
277 } // namespace folly_test