c1b6ed573e31171dfe4ea4e1914c472cfca7ea13
[folly.git] / folly / stress-test / stress-parallel-folly-sync.cpp
1 #include "sync_test.h"
2
3 namespace folly_test {
4
5 class FollySyncTest_Parallel: public cds_test::stress_fixture {
6 protected:
7   static size_t s_nThreadCount;
8   // Simulate as the data protected by the lock.
9   static size_t locked_data;
10   static std::atomic<RcuData*> rcu_data;
11   // For RCU, we mostly want to benchmark the readers (cause it's designed for
12   // very fast readers and occasional writers). We have a writer thread that
13   // runs nonstop until all other reader threads are done.
14   static std::atomic_uint rcu_readers_num;
15   // MicroLock
16   static size_t s_nMicroLockPassCount;
17   // MicroSpinLock
18   static size_t s_nMicroSpinLockPassCount;
19   // PicoSpinLock
20   static size_t s_nPicoSpinLockPassCount;
21   // SharedMutex
22   static size_t s_nSharedMutexPassCount;
23   // RWSpinLock
24   static size_t s_nRWSpinLockPassCount;
25   // RWTicketSpinLock
26   static size_t s_nRWTicketSpinLockPassCount;
27   // RCU
28   static size_t s_nRcuReaderPassCount;
29   static size_t s_nRcuWriterPassCount;
30   static size_t s_nRcuWriterFrequency;
31
32   static unsigned s_nSharedMutexWritePercentage;
33   static unsigned s_nRWSpinLockWritePercentage;
34   static unsigned s_nRWTicketSpinLockWritePercentage;
35
36   static void SetUpTestCase() {
37     const cds_test::config& cfg = get_config("ParallelFollySync");
38     GetConfigNonZeroExpected(ThreadCount, 4);
39     GetConfigNonZeroExpected(MicroLockPassCount, 2000000000);
40     GetConfigNonZeroExpected(MicroSpinLockPassCount, 1500000000);
41     GetConfigNonZeroExpected(PicoSpinLockPassCount, 2700000000);
42     GetConfigNonZeroExpected(SharedMutexPassCount, 5000000);
43     GetConfigNonZeroExpected(RWSpinLockPassCount, 5000000);
44     GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
45     GetConfigNonZeroExpected(RcuReaderPassCount, 10000);
46     GetConfigNonZeroExpected(RcuWriterPassCount, 500);
47     // Every 100 ms by default there will be a writer.
48     GetConfigNonZeroExpected(RcuWriterFrequency, 100);
49
50     GetConfigNonZeroExpected(SharedMutexWritePercentage, 5);
51     GetConfigNonZeroExpected(RWSpinLockWritePercentage, 5);
52     GetConfigNonZeroExpected(RWTicketSpinLockWritePercentage, 5);
53
54     rcu_data.store(new RcuData(), std::memory_order_relaxed);
55   }
56
57   static void run_rcu_writer_sync() {
58     while (rcu_readers_num.load(std::memory_order_acquire) > 0) {
59       auto *old_data = rcu_data.load(std::memory_order_consume);
60       auto *new_data = new RcuData(*old_data);
61       new_data->d1++;
62       new_data->d2++;
63       rcu_data.store(new_data, std::memory_order_release);
64       folly::synchronize_rcu();
65       delete old_data;
66       std::this_thread::sleep_for(
67           std::chrono::milliseconds(s_nRcuWriterFrequency));
68     }
69   }
70
71   static void run_rcu_writer_no_sync() {
72     while (rcu_readers_num.load(std::memory_order_acquire) > 0) {
73       auto *old_data = rcu_data.load(std::memory_order_consume);
74       auto *new_data = new RcuData(*old_data);
75       new_data->d1++;
76       new_data->d2++;
77       rcu_data.store(new_data, std::memory_order_release);
78       folly::rcu_retire(old_data);
79       std::this_thread::sleep_for(
80           std::chrono::milliseconds(s_nRcuWriterFrequency));
81     }
82   }
83
84   static void run_rcu_reader(size_t pass_count) {
85     size_t sum = 0;
86     for (size_t count = 0; count < pass_count; count++) {
87       folly::rcu_reader g;
88       auto *data = rcu_data.load(std::memory_order_consume);
89       sum += (data->d1 + data->d2);
90     }
91     rcu_readers_num.fetch_sub(1, std::memory_order_release);
92     // Just want to simulate the reading.
93     EXPECT_GT(sum, 0);
94   }
95
96   template <typename Lock>
97   static void run_rw_lock(Lock *l, size_t pass_count,
98                           unsigned write_percentage) {
99     size_t sum = 0;
100     for (size_t count = 0; count < pass_count; count++) {
101       if (rand(100) < write_percentage) {
102         l->lock();
103         locked_data++;
104         l->unlock();
105       } else {
106         l->lock_shared();
107         sum = locked_data;
108         l->unlock_shared();
109       }
110     }
111     EXPECT_GE(sum, pass_count * write_percentage / 100);
112   }
113
114   template <typename Lock>
115   static void run_small_lock(Lock* l, size_t pass_count) {
116     for (size_t count = 0; count < pass_count; count++) {
117       l->lock();
118       locked_data++;
119       l->unlock();
120     }
121   }
122
123   template <typename... Args>
124   static void FollySyncThreading(Args... args) {
125     std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
126     for (size_t i = 0; i < s_nThreadCount; i++) {
127       threads[i] = std::thread(args...);
128     }
129     for (size_t i = 0; i < s_nThreadCount; i++) {
130       threads[i].join();
131     }
132   }
133
134   template <typename WriterFunc>
135   static void FollyRcuThreading(WriterFunc writer_func) {
136     rcu_readers_num.store(s_nThreadCount - 1, std::memory_order_release);
137
138     std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
139     // One of the threads is a writer.
140     threads[0] = std::thread(writer_func);
141     for (size_t i = 1; i < s_nThreadCount; i++) {
142       threads[i] = std::thread(run_rcu_reader, s_nRcuReaderPassCount);
143     }
144     for (size_t i = 0; i < s_nThreadCount; i++) {
145       threads[i].join();
146     }
147   }
148
149   template <typename SmallLockType>
150   static void FollySmallLockThreading(size_t pass_count) {
151     std::unique_ptr<SmallLockType> l(new SmallLockType());
152     l->init();
153     locked_data = 0;
154     FollySyncThreading(run_small_lock<SmallLockType>, l.get(), pass_count);
155     EXPECT_EQ(locked_data, pass_count * s_nThreadCount);
156   }
157
158   template <typename RWLockType>
159   static void FollyRWLockThreading(size_t pass_count, unsigned write_percentage) {
160     std::unique_ptr<RWLockType> l(new RWLockType());
161     locked_data = 0;
162     FollySyncThreading(run_rw_lock<RWLockType>, l.get(), pass_count,
163                        write_percentage);
164   }
165 };
166
167 size_t FollySyncTest_Parallel::locked_data;
168 std::atomic<RcuData*> FollySyncTest_Parallel::rcu_data;
169 std::atomic_uint FollySyncTest_Parallel::rcu_readers_num;
170 size_t FollySyncTest_Parallel::s_nThreadCount;
171 size_t FollySyncTest_Parallel::s_nMicroLockPassCount;
172 size_t FollySyncTest_Parallel::s_nMicroSpinLockPassCount;
173 size_t FollySyncTest_Parallel::s_nPicoSpinLockPassCount;
174 size_t FollySyncTest_Parallel::s_nSharedMutexPassCount;
175 size_t FollySyncTest_Parallel::s_nRWSpinLockPassCount;
176 size_t FollySyncTest_Parallel::s_nRWTicketSpinLockPassCount;
177
178 size_t FollySyncTest_Parallel::s_nRcuReaderPassCount;
179 size_t FollySyncTest_Parallel::s_nRcuWriterPassCount;
180 size_t FollySyncTest_Parallel::s_nRcuWriterFrequency;
181
182 unsigned FollySyncTest_Parallel::s_nSharedMutexWritePercentage;
183 unsigned FollySyncTest_Parallel::s_nRWSpinLockWritePercentage;
184 unsigned FollySyncTest_Parallel::s_nRWTicketSpinLockWritePercentage;
185
186 TEST_F(FollySyncTest_Parallel, FollyRCU_Sync) {
187   FollyRcuThreading(run_rcu_writer_sync);
188 }
189
190 TEST_F(FollySyncTest_Parallel, FollyRCU_NoSync) {
191   FollyRcuThreading(run_rcu_writer_no_sync);
192 }
193
194 TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_32) {
195   FollyRWLockThreading<RWTicketSpinLock32>(s_nRWTicketSpinLockPassCount,
196                                            s_nRWTicketSpinLockWritePercentage);
197 }
198
199 TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_64) {
200   FollyRWLockThreading<RWTicketSpinLock64>(s_nRWTicketSpinLockPassCount,
201                                            s_nRWTicketSpinLockWritePercentage);
202 }
203
204 TEST_F(FollySyncTest_Parallel, FollyRWSpinLock) {
205   FollyRWLockThreading<RWSpinLock>(s_nRWSpinLockPassCount,
206                                    s_nRWSpinLockWritePercentage);
207 }
208
209 TEST_F(FollySyncTest_Parallel, FollySharedMutex_ReadPriority) {
210   FollyRWLockThreading<SharedMutexReadPriority>(s_nSharedMutexPassCount,
211                                                 s_nSharedMutexWritePercentage);
212 }
213
214 TEST_F(FollySyncTest_Parallel, FollySharedMutex_WritePriority) {
215   FollyRWLockThreading<SharedMutexWritePriority>(s_nSharedMutexPassCount,
216                                                  s_nSharedMutexWritePercentage);
217 }
218
219 TEST_F(FollySyncTest_Parallel, FollyMicroSpinLock) {
220   FollySmallLockThreading<MicroSpinLock>(s_nMicroSpinLockPassCount);
221 }
222
223 TEST_F(FollySyncTest_Parallel, FollyPicoSpinLock) {
224   FollySmallLockThreading<PicoSpinLock>(s_nPicoSpinLockPassCount);
225 }
226
227 TEST_F(FollySyncTest_Parallel, FollyMicroLock) {
228   FollySmallLockThreading<MicroLock>(s_nMicroLockPassCount);
229 }
230
231 } // namespace folly_test