36266e27f697e18d9e9f970b8fe07091dc85a087
[folly.git] / folly / stress-test / stress-parallel-folly-sync.cpp
1 #include "sync_test.h"
2
3 namespace folly_test {
4
5 class FollySyncTest_Parallel: public cds_test::stress_fixture {
6 protected:
7   static size_t s_nThreadCount;
8   // Simulate as the data protected by the lock.
9   static size_t locked_data;
10   static std::atomic<RcuData*> rcu_data;
11   // MicroLock
12   static size_t s_nMicroLockPassCount;
13   // MicroSpinLock
14   static size_t s_nMicroSpinLockPassCount;
15   // PicoSpinLock
16   static size_t s_nPicoSpinLockPassCount;
17   // SharedMutex
18   static size_t s_nSharedMutexPassCount;
19   // RWSpinLock
20   static size_t s_nRWSpinLockPassCount;
21   // RWTicketSpinLock
22   static size_t s_nRWTicketSpinLockPassCount;
23   // RCU
24   static size_t s_nRcuReaderPassCount;
25   static size_t s_nRcuWriterPassCount;
26   static size_t s_nRcuWriterFrequency;
27
28   static unsigned s_nSharedMutexWritePercentage;
29   static unsigned s_nRWSpinLockWritePercentage;
30   static unsigned s_nRWTicketSpinLockWritePercentage;
31
32   static void SetUpTestCase() {
33     const cds_test::config& cfg = get_config("ParallelFollySync");
34     GetConfigNonZeroExpected(ThreadCount, 4);
35     GetConfigNonZeroExpected(MicroLockPassCount, 2000000000);
36     GetConfigNonZeroExpected(MicroSpinLockPassCount, 1500000000);
37     GetConfigNonZeroExpected(PicoSpinLockPassCount, 2700000000);
38     GetConfigNonZeroExpected(SharedMutexPassCount, 5000000);
39     GetConfigNonZeroExpected(RWSpinLockPassCount, 5000000);
40     GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
41     GetConfigNonZeroExpected(RcuReaderPassCount, 10000);
42     GetConfigNonZeroExpected(RcuWriterPassCount, 500);
43     // Every 100 ms by default there will be a writer.
44     GetConfigNonZeroExpected(RcuWriterFrequency, 100);
45
46     GetConfigNonZeroExpected(SharedMutexWritePercentage, 5);
47     GetConfigNonZeroExpected(RWSpinLockWritePercentage, 5);
48     GetConfigNonZeroExpected(RWTicketSpinLockWritePercentage, 5);
49
50     rcu_data.store(new RcuData(), std::memory_order_relaxed);
51   }
52
53   static void run_rcu_sync(size_t pass_count, unsigned write_percentage) {
54     for (size_t count = 0; count < pass_count; count++) {
55       if (rand(100) < write_percentage) {
56         auto *old_data = rcu_data.load(std::memory_order_relaxed);
57         auto *new_data = new RcuData();
58         rcu_data.store(new_data, std::memory_order_relaxed);
59         folly::rcu_retire(old_data);
60       } else {
61         folly::rcu_reader g;
62       }
63     }
64   }
65
66   // writer_freq is the milliseconds a writer should wait before another writer
67   // happens.
68   static void run_rcu_writer_sync(size_t pass_count, unsigned writer_freq) {
69     for (size_t count = 0; count < pass_count; count++) {
70       auto *old_data = rcu_data.load(std::memory_order_relaxed);
71       auto *new_data = new RcuData(*old_data);
72       new_data->d1++;
73       new_data->d2++;
74       rcu_data.store(new_data, std::memory_order_relaxed);
75       folly::synchronize_rcu();
76       delete old_data;
77       std::this_thread::sleep_for(std::chrono::milliseconds(writer_freq));
78     }
79   }
80
81   // writer_freq is the milliseconds a writer should wait before another writer
82   // happens.
83   static void run_rcu_writer_no_sync(size_t pass_count, unsigned writer_freq) {
84     for (size_t count = 0; count < pass_count; count++) {
85       auto *old_data = rcu_data.load(std::memory_order_relaxed);
86       auto *new_data = new RcuData(*old_data);
87       new_data->d1++;
88       new_data->d2++;
89       rcu_data.store(new_data, std::memory_order_relaxed);
90       folly::rcu_retire(old_data);
91       std::this_thread::sleep_for(std::chrono::milliseconds(writer_freq));
92     }
93   }
94
95   static void run_rcu_reader(size_t pass_count) {
96     size_t sum = 0;
97     for (size_t count = 0; count < pass_count; count++) {
98       folly::rcu_reader g;
99       auto *data = rcu_data.load(std::memory_order_relaxed);
100       sum += (data->d1 + data->d2);
101     }
102     // Just want to simulate the reading.
103     EXPECT_GT(sum, 0);
104   }
105
106   template <typename Lock>
107   static void run_rw_lock(Lock *l, size_t pass_count,
108                           unsigned write_percentage) {
109     size_t sum = 0;
110     for (size_t count = 0; count < pass_count; count++) {
111       if (rand(100) < write_percentage) {
112         l->lock();
113         locked_data++;
114         l->unlock();
115       } else {
116         l->lock_shared();
117         sum = locked_data;
118         l->unlock_shared();
119       }
120     }
121     EXPECT_GE(sum, pass_count * write_percentage / 100);
122   }
123
124   template <typename Lock>
125   static void run_small_lock(Lock* l, size_t pass_count) {
126     for (size_t count = 0; count < pass_count; count++) {
127       l->lock();
128       locked_data++;
129       l->unlock();
130     }
131   }
132
133   template <typename... Args>
134   static void FollySyncThreading(Args... args) {
135     std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
136     for (size_t i = 0; i < s_nThreadCount; i++) {
137       threads[i] = std::thread(args...);
138     }
139     for (size_t i = 0; i < s_nThreadCount; i++) {
140       threads[i].join();
141     }
142   }
143
144   template <typename WriterFunc>
145   static void FollyRcuThreading(WriterFunc writer_func) {
146     // One of the threads is a writer.
147     size_t reader_thrd_cnt = s_nThreadCount - 1;
148     std::unique_ptr<std::thread[]> reader_threads(
149         new std::thread[reader_thrd_cnt]);
150     std::thread writer_thread(writer_func, s_nRcuWriterPassCount,
151                               s_nRcuWriterFrequency);
152     for (size_t i = 0; i < reader_thrd_cnt; i++) {
153       reader_threads[i] = std::thread(run_rcu_reader, s_nRcuReaderPassCount);
154     }
155     for (size_t i = 0; i < reader_thrd_cnt; i++) {
156       reader_threads[i].join();
157     }
158     writer_thread.join();
159   }
160
161   template <typename SmallLockType>
162   static void FollySmallLockThreading(size_t pass_count) {
163     std::unique_ptr<SmallLockType> l(new SmallLockType());
164     l->init();
165     locked_data = 0;
166     FollySyncThreading(run_small_lock<SmallLockType>, l.get(), pass_count);
167     EXPECT_EQ(locked_data, pass_count * s_nThreadCount);
168   }
169
170   template <typename RWLockType>
171   static void FollyRWLockThreading(size_t pass_count, unsigned write_percentage) {
172     std::unique_ptr<RWLockType> l(new RWLockType());
173     locked_data = 0;
174     FollySyncThreading(run_rw_lock<RWLockType>, l.get(), pass_count,
175                        write_percentage);
176   }
177 };
178
179 size_t FollySyncTest_Parallel::locked_data;
180 std::atomic<RcuData*> FollySyncTest_Parallel::rcu_data;
181 size_t FollySyncTest_Parallel::s_nThreadCount;
182 size_t FollySyncTest_Parallel::s_nMicroLockPassCount;
183 size_t FollySyncTest_Parallel::s_nMicroSpinLockPassCount;
184 size_t FollySyncTest_Parallel::s_nPicoSpinLockPassCount;
185 size_t FollySyncTest_Parallel::s_nSharedMutexPassCount;
186 size_t FollySyncTest_Parallel::s_nRWSpinLockPassCount;
187 size_t FollySyncTest_Parallel::s_nRWTicketSpinLockPassCount;
188
189 size_t FollySyncTest_Parallel::s_nRcuReaderPassCount;
190 size_t FollySyncTest_Parallel::s_nRcuWriterPassCount;
191 size_t FollySyncTest_Parallel::s_nRcuWriterFrequency;
192
193 unsigned FollySyncTest_Parallel::s_nSharedMutexWritePercentage;
194 unsigned FollySyncTest_Parallel::s_nRWSpinLockWritePercentage;
195 unsigned FollySyncTest_Parallel::s_nRWTicketSpinLockWritePercentage;
196
197 TEST_F(FollySyncTest_Parallel, FollyRCU_Sync) {
198   FollyRcuThreading(run_rcu_writer_sync);
199 }
200
201 TEST_F(FollySyncTest_Parallel, FollyRCU_NoSync) {
202   FollyRcuThreading(run_rcu_writer_no_sync);
203 }
204
205 TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_32) {
206   FollyRWLockThreading<RWTicketSpinLock32>(s_nRWTicketSpinLockPassCount,
207                                            s_nRWTicketSpinLockWritePercentage);
208 }
209
210 TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_64) {
211   FollyRWLockThreading<RWTicketSpinLock64>(s_nRWTicketSpinLockPassCount,
212                                            s_nRWTicketSpinLockWritePercentage);
213 }
214
215 TEST_F(FollySyncTest_Parallel, FollyRWSpinLock) {
216   FollyRWLockThreading<RWSpinLock>(s_nRWSpinLockPassCount,
217                                    s_nRWSpinLockWritePercentage);
218 }
219
220 TEST_F(FollySyncTest_Parallel, FollySharedMutex_ReadPriority) {
221   FollyRWLockThreading<SharedMutexReadPriority>(s_nSharedMutexPassCount,
222                                                 s_nSharedMutexWritePercentage);
223 }
224
225 TEST_F(FollySyncTest_Parallel, FollySharedMutex_WritePriority) {
226   FollyRWLockThreading<SharedMutexWritePriority>(s_nSharedMutexPassCount,
227                                                  s_nSharedMutexWritePercentage);
228 }
229
230 TEST_F(FollySyncTest_Parallel, FollyMicroSpinLock) {
231   FollySmallLockThreading<MicroSpinLock>(s_nMicroSpinLockPassCount);
232 }
233
234 TEST_F(FollySyncTest_Parallel, FollyPicoSpinLock) {
235   FollySmallLockThreading<PicoSpinLock>(s_nPicoSpinLockPassCount);
236 }
237
238 TEST_F(FollySyncTest_Parallel, FollyMicroLock) {
239   FollySmallLockThreading<MicroLock>(s_nMicroLockPassCount);
240 }
241
242 } // namespace folly_test