5 class FollySyncTest_Parallel: public cds_test::stress_fixture {
7 static size_t s_nThreadCount;
8 // Simulate as the data protected by the lock.
9 static size_t locked_data;
10 static std::atomic<RcuData*> rcu_data;
11 // For RCU, we mostly want to benchmark the readers (cause it's designed for
12 // very fast readers and occasional writers). We have a writer thread that
13 // runs nonstop until all other reader threads are done.
14 static std::atomic_uint rcu_readers_num;
16 static size_t s_nMicroLockPassCount;
18 static size_t s_nMicroSpinLockPassCount;
20 static size_t s_nPicoSpinLockPassCount;
22 static size_t s_nSharedMutexPassCount;
24 static size_t s_nRWSpinLockPassCount;
26 static size_t s_nRWTicketSpinLockPassCount;
28 static size_t s_nRcuReaderPassCount;
29 static size_t s_nRcuWriterPassCount;
30 static size_t s_nRcuWriterFrequency;
32 static unsigned s_nSharedMutexWritePercentage;
33 static unsigned s_nRWSpinLockWritePercentage;
34 static unsigned s_nRWTicketSpinLockWritePercentage;
36 static void SetUpTestCase() {
37 const cds_test::config& cfg = get_config("ParallelFollySync");
38 GetConfigNonZeroExpected(ThreadCount, 4);
39 GetConfigNonZeroExpected(MicroLockPassCount, 2000000000);
40 GetConfigNonZeroExpected(MicroSpinLockPassCount, 1500000000);
41 GetConfigNonZeroExpected(PicoSpinLockPassCount, 2700000000);
42 GetConfigNonZeroExpected(SharedMutexPassCount, 5000000);
43 GetConfigNonZeroExpected(RWSpinLockPassCount, 5000000);
44 GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
45 GetConfigNonZeroExpected(RcuReaderPassCount, 10000);
46 GetConfigNonZeroExpected(RcuWriterPassCount, 500);
47 // Every 100 ms by default there will be a writer.
48 GetConfigNonZeroExpected(RcuWriterFrequency, 100);
50 GetConfigNonZeroExpected(SharedMutexWritePercentage, 5);
51 GetConfigNonZeroExpected(RWSpinLockWritePercentage, 5);
52 GetConfigNonZeroExpected(RWTicketSpinLockWritePercentage, 5);
54 rcu_data.store(new RcuData(), std::memory_order_relaxed);
57 static void run_rcu_writer_sync() {
58 while (rcu_readers_num.load(std::memory_order_acquire) > 0) {
59 auto *old_data = rcu_data.load(std::memory_order_relaxed);
60 auto *new_data = new RcuData(*old_data);
63 rcu_data.store(new_data, std::memory_order_relaxed);
64 folly::synchronize_rcu();
66 std::this_thread::sleep_for(
67 std::chrono::milliseconds(s_nRcuWriterFrequency));
71 static void run_rcu_writer_no_sync() {
72 while (rcu_readers_num.load(std::memory_order_acquire) > 0) {
73 auto *old_data = rcu_data.load(std::memory_order_relaxed);
74 auto *new_data = new RcuData(*old_data);
77 rcu_data.store(new_data, std::memory_order_relaxed);
78 folly::rcu_retire(old_data);
79 std::this_thread::sleep_for(
80 std::chrono::milliseconds(s_nRcuWriterFrequency));
84 static void run_rcu_reader(size_t pass_count) {
86 for (size_t count = 0; count < pass_count; count++) {
88 auto *data = rcu_data.load(std::memory_order_relaxed);
89 sum += (data->d1 + data->d2);
91 rcu_readers_num.fetch_sub(1, std::memory_order_release);
92 // Just want to simulate the reading.
96 template <typename Lock>
97 static void run_rw_lock(Lock *l, size_t pass_count,
98 unsigned write_percentage) {
100 for (size_t count = 0; count < pass_count; count++) {
101 if (rand(100) < write_percentage) {
111 EXPECT_GE(sum, pass_count * write_percentage / 100);
114 template <typename Lock>
115 static void run_small_lock(Lock* l, size_t pass_count) {
116 for (size_t count = 0; count < pass_count; count++) {
123 template <typename... Args>
124 static void FollySyncThreading(Args... args) {
125 std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
126 for (size_t i = 0; i < s_nThreadCount; i++) {
127 threads[i] = std::thread(args...);
129 for (size_t i = 0; i < s_nThreadCount; i++) {
134 template <typename WriterFunc>
135 static void FollyRcuThreading(WriterFunc writer_func) {
136 rcu_readers_num.store(s_nThreadCount - 1, std::memory_order_release);
138 std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
139 // One of the threads is a writer.
140 threads[0] = std::thread(writer_func);
141 for (size_t i = 1; i < s_nThreadCount; i++) {
142 threads[i] = std::thread(run_rcu_reader, s_nRcuReaderPassCount);
144 for (size_t i = 0; i < s_nThreadCount; i++) {
149 template <typename SmallLockType>
150 static void FollySmallLockThreading(size_t pass_count) {
151 std::unique_ptr<SmallLockType> l(new SmallLockType());
154 FollySyncThreading(run_small_lock<SmallLockType>, l.get(), pass_count);
155 EXPECT_EQ(locked_data, pass_count * s_nThreadCount);
158 template <typename RWLockType>
159 static void FollyRWLockThreading(size_t pass_count, unsigned write_percentage) {
160 std::unique_ptr<RWLockType> l(new RWLockType());
162 FollySyncThreading(run_rw_lock<RWLockType>, l.get(), pass_count,
167 size_t FollySyncTest_Parallel::locked_data;
168 std::atomic<RcuData*> FollySyncTest_Parallel::rcu_data;
169 std::atomic_uint FollySyncTest_Parallel::rcu_readers_num;
170 size_t FollySyncTest_Parallel::s_nThreadCount;
171 size_t FollySyncTest_Parallel::s_nMicroLockPassCount;
172 size_t FollySyncTest_Parallel::s_nMicroSpinLockPassCount;
173 size_t FollySyncTest_Parallel::s_nPicoSpinLockPassCount;
174 size_t FollySyncTest_Parallel::s_nSharedMutexPassCount;
175 size_t FollySyncTest_Parallel::s_nRWSpinLockPassCount;
176 size_t FollySyncTest_Parallel::s_nRWTicketSpinLockPassCount;
178 size_t FollySyncTest_Parallel::s_nRcuReaderPassCount;
179 size_t FollySyncTest_Parallel::s_nRcuWriterPassCount;
180 size_t FollySyncTest_Parallel::s_nRcuWriterFrequency;
182 unsigned FollySyncTest_Parallel::s_nSharedMutexWritePercentage;
183 unsigned FollySyncTest_Parallel::s_nRWSpinLockWritePercentage;
184 unsigned FollySyncTest_Parallel::s_nRWTicketSpinLockWritePercentage;
186 TEST_F(FollySyncTest_Parallel, FollyRCU_Sync) {
187 FollyRcuThreading(run_rcu_writer_sync);
190 TEST_F(FollySyncTest_Parallel, FollyRCU_NoSync) {
191 FollyRcuThreading(run_rcu_writer_no_sync);
194 TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_32) {
195 FollyRWLockThreading<RWTicketSpinLock32>(s_nRWTicketSpinLockPassCount,
196 s_nRWTicketSpinLockWritePercentage);
199 TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_64) {
200 FollyRWLockThreading<RWTicketSpinLock64>(s_nRWTicketSpinLockPassCount,
201 s_nRWTicketSpinLockWritePercentage);
204 TEST_F(FollySyncTest_Parallel, FollyRWSpinLock) {
205 FollyRWLockThreading<RWSpinLock>(s_nRWSpinLockPassCount,
206 s_nRWSpinLockWritePercentage);
209 TEST_F(FollySyncTest_Parallel, FollySharedMutex_ReadPriority) {
210 FollyRWLockThreading<SharedMutexReadPriority>(s_nSharedMutexPassCount,
211 s_nSharedMutexWritePercentage);
214 TEST_F(FollySyncTest_Parallel, FollySharedMutex_WritePriority) {
215 FollyRWLockThreading<SharedMutexWritePriority>(s_nSharedMutexPassCount,
216 s_nSharedMutexWritePercentage);
219 TEST_F(FollySyncTest_Parallel, FollyMicroSpinLock) {
220 FollySmallLockThreading<MicroSpinLock>(s_nMicroSpinLockPassCount);
223 TEST_F(FollySyncTest_Parallel, FollyPicoSpinLock) {
224 FollySmallLockThreading<PicoSpinLock>(s_nPicoSpinLockPassCount);
227 TEST_F(FollySyncTest_Parallel, FollyMicroLock) {
228 FollySmallLockThreading<MicroLock>(s_nMicroLockPassCount);
231 } // namespace folly_test