5 class FollySyncTest_Parallel: public cds_test::stress_fixture {
7 static size_t s_nThreadCount;
8 // Simulate as the data protected by the lock.
9 static size_t locked_data;
10 static std::atomic<RcuData*> rcu_data;
12 static size_t s_nMicroLockPassCount;
14 static size_t s_nMicroSpinLockPassCount;
16 static size_t s_nPicoSpinLockPassCount;
18 static size_t s_nSharedMutexPassCount;
20 static size_t s_nRWSpinLockPassCount;
22 static size_t s_nRWTicketSpinLockPassCount;
24 static size_t s_nRcuReaderPassCount;
25 static size_t s_nRcuWriterPassCount;
26 static size_t s_nRcuWriterFrequency;
28 static unsigned s_nSharedMutexWritePercentage;
29 static unsigned s_nRWSpinLockWritePercentage;
30 static unsigned s_nRWTicketSpinLockWritePercentage;
32 static void SetUpTestCase() {
33 const cds_test::config& cfg = get_config("ParallelFollySync");
34 GetConfigNonZeroExpected(ThreadCount, 4);
35 GetConfigNonZeroExpected(MicroLockPassCount, 2000000000);
36 GetConfigNonZeroExpected(MicroSpinLockPassCount, 1500000000);
37 GetConfigNonZeroExpected(PicoSpinLockPassCount, 2700000000);
38 GetConfigNonZeroExpected(SharedMutexPassCount, 5000000);
39 GetConfigNonZeroExpected(RWSpinLockPassCount, 5000000);
40 GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
41 GetConfigNonZeroExpected(RcuReaderPassCount, 10000);
42 GetConfigNonZeroExpected(RcuWriterPassCount, 500);
43 // Every 100 ms by default there will be a writer.
44 GetConfigNonZeroExpected(RcuWriterFrequency, 100);
46 GetConfigNonZeroExpected(SharedMutexWritePercentage, 5);
47 GetConfigNonZeroExpected(RWSpinLockWritePercentage, 5);
48 GetConfigNonZeroExpected(RWTicketSpinLockWritePercentage, 5);
50 rcu_data.store(new RcuData(), std::memory_order_relaxed);
53 static void run_rcu_sync(size_t pass_count, unsigned write_percentage) {
54 for (size_t count = 0; count < pass_count; count++) {
55 if (rand(100) < write_percentage) {
56 auto *old_data = rcu_data.load(std::memory_order_relaxed);
57 auto *new_data = new RcuData();
58 rcu_data.store(new_data, std::memory_order_relaxed);
59 folly::rcu_retire(old_data);
66 // writer_freq is the milliseconds a writer should wait before another writer
68 static void run_rcu_writer_sync(size_t pass_count, unsigned writer_freq) {
69 for (size_t count = 0; count < pass_count; count++) {
70 auto *old_data = rcu_data.load(std::memory_order_relaxed);
71 auto *new_data = new RcuData(*old_data);
74 rcu_data.store(new_data, std::memory_order_relaxed);
75 folly::synchronize_rcu();
77 std::this_thread::sleep_for(std::chrono::milliseconds(writer_freq));
81 // writer_freq is the milliseconds a writer should wait before another writer
83 static void run_rcu_writer_no_sync(size_t pass_count, unsigned writer_freq) {
84 for (size_t count = 0; count < pass_count; count++) {
85 auto *old_data = rcu_data.load(std::memory_order_relaxed);
86 auto *new_data = new RcuData(*old_data);
89 rcu_data.store(new_data, std::memory_order_relaxed);
90 folly::rcu_retire(old_data);
91 std::this_thread::sleep_for(std::chrono::milliseconds(writer_freq));
95 static void run_rcu_reader(size_t pass_count) {
97 for (size_t count = 0; count < pass_count; count++) {
99 auto *data = rcu_data.load(std::memory_order_relaxed);
100 sum += (data->d1 + data->d2);
102 // Just want to simulate the reading.
106 template <typename Lock>
107 static void run_rw_lock(Lock *l, size_t pass_count,
108 unsigned write_percentage) {
110 for (size_t count = 0; count < pass_count; count++) {
111 if (rand(100) < write_percentage) {
121 EXPECT_GE(sum, pass_count * write_percentage / 100);
124 template <typename Lock>
125 static void run_small_lock(Lock* l, size_t pass_count) {
126 for (size_t count = 0; count < pass_count; count++) {
133 template <typename... Args>
134 static void FollySyncThreading(Args... args) {
135 std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
136 for (size_t i = 0; i < s_nThreadCount; i++) {
137 threads[i] = std::thread(args...);
139 for (size_t i = 0; i < s_nThreadCount; i++) {
144 template <typename WriterFunc>
145 static void FollyRcuThreading(WriterFunc writer_func) {
146 // One of the threads is a writer.
147 size_t reader_thrd_cnt = s_nThreadCount - 1;
148 std::unique_ptr<std::thread[]> reader_threads(
149 new std::thread[reader_thrd_cnt]);
150 std::thread writer_thread(writer_func, s_nRcuWriterPassCount,
151 s_nRcuWriterFrequency);
152 for (size_t i = 0; i < reader_thrd_cnt; i++) {
153 reader_threads[i] = std::thread(run_rcu_reader, s_nRcuReaderPassCount);
155 for (size_t i = 0; i < reader_thrd_cnt; i++) {
156 reader_threads[i].join();
158 writer_thread.join();
161 template <typename SmallLockType>
162 static void FollySmallLockThreading(size_t pass_count) {
163 std::unique_ptr<SmallLockType> l(new SmallLockType());
166 FollySyncThreading(run_small_lock<SmallLockType>, l.get(), pass_count);
167 EXPECT_EQ(locked_data, pass_count * s_nThreadCount);
170 template <typename RWLockType>
171 static void FollyRWLockThreading(size_t pass_count, unsigned write_percentage) {
172 std::unique_ptr<RWLockType> l(new RWLockType());
174 FollySyncThreading(run_rw_lock<RWLockType>, l.get(), pass_count,
179 size_t FollySyncTest_Parallel::locked_data;
180 std::atomic<RcuData*> FollySyncTest_Parallel::rcu_data;
181 size_t FollySyncTest_Parallel::s_nThreadCount;
182 size_t FollySyncTest_Parallel::s_nMicroLockPassCount;
183 size_t FollySyncTest_Parallel::s_nMicroSpinLockPassCount;
184 size_t FollySyncTest_Parallel::s_nPicoSpinLockPassCount;
185 size_t FollySyncTest_Parallel::s_nSharedMutexPassCount;
186 size_t FollySyncTest_Parallel::s_nRWSpinLockPassCount;
187 size_t FollySyncTest_Parallel::s_nRWTicketSpinLockPassCount;
189 size_t FollySyncTest_Parallel::s_nRcuReaderPassCount;
190 size_t FollySyncTest_Parallel::s_nRcuWriterPassCount;
191 size_t FollySyncTest_Parallel::s_nRcuWriterFrequency;
193 unsigned FollySyncTest_Parallel::s_nSharedMutexWritePercentage;
194 unsigned FollySyncTest_Parallel::s_nRWSpinLockWritePercentage;
195 unsigned FollySyncTest_Parallel::s_nRWTicketSpinLockWritePercentage;
197 TEST_F(FollySyncTest_Parallel, FollyRCU_Sync) {
198 FollyRcuThreading(run_rcu_writer_sync);
201 TEST_F(FollySyncTest_Parallel, FollyRCU_NoSync) {
202 FollyRcuThreading(run_rcu_writer_no_sync);
205 TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_32) {
206 FollyRWLockThreading<RWTicketSpinLock32>(s_nRWTicketSpinLockPassCount,
207 s_nRWTicketSpinLockWritePercentage);
210 TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_64) {
211 FollyRWLockThreading<RWTicketSpinLock64>(s_nRWTicketSpinLockPassCount,
212 s_nRWTicketSpinLockWritePercentage);
215 TEST_F(FollySyncTest_Parallel, FollyRWSpinLock) {
216 FollyRWLockThreading<RWSpinLock>(s_nRWSpinLockPassCount,
217 s_nRWSpinLockWritePercentage);
220 TEST_F(FollySyncTest_Parallel, FollySharedMutex_ReadPriority) {
221 FollyRWLockThreading<SharedMutexReadPriority>(s_nSharedMutexPassCount,
222 s_nSharedMutexWritePercentage);
225 TEST_F(FollySyncTest_Parallel, FollySharedMutex_WritePriority) {
226 FollyRWLockThreading<SharedMutexWritePriority>(s_nSharedMutexPassCount,
227 s_nSharedMutexWritePercentage);
230 TEST_F(FollySyncTest_Parallel, FollyMicroSpinLock) {
231 FollySmallLockThreading<MicroSpinLock>(s_nMicroSpinLockPassCount);
234 TEST_F(FollySyncTest_Parallel, FollyPicoSpinLock) {
235 FollySmallLockThreading<PicoSpinLock>(s_nPicoSpinLockPassCount);
238 TEST_F(FollySyncTest_Parallel, FollyMicroLock) {
239 FollySmallLockThreading<MicroLock>(s_nMicroLockPassCount);
242 } // namespace folly_test