Refactors sync test cases
authorPeizhao Ou <peizhaoo@uci.edu>
Mon, 12 Feb 2018 18:36:22 +0000 (10:36 -0800)
committerPeizhao Ou <peizhaoo@uci.edu>
Mon, 12 Feb 2018 18:36:22 +0000 (10:36 -0800)
folly/.clang-format [new file with mode: 0644]
folly/stress-test/CMakeLists.txt
folly/stress-test/stress-parallel-folly-queue.cpp
folly/stress-test/stress-parallel-folly-sync.cpp
folly/stress-test/stress-sequential-folly-queue.cpp
folly/stress-test/stress-sequential-folly-sync.cpp
folly/stress-test/sync_test.h [new file with mode: 0644]

diff --git a/folly/.clang-format b/folly/.clang-format
new file mode 100644 (file)
index 0000000..5bead5f
--- /dev/null
@@ -0,0 +1,2 @@
+BasedOnStyle: LLVM
+
index 52a2323..3a388e1 100644 (file)
@@ -22,30 +22,30 @@ link_directories(
 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++1y")
 
 set(FOLLY_LIB folly pthread gflags glog gtest stress-framework)
-set(RCU_OBJ ../synchronization/.libs/Rcu.o)
+set(OTHER_OBJS main.cpp ../synchronization/.libs/Rcu.o)
 
 # Sequential driver
 add_executable(stress-sequential-folly-map
-    stress-sequential-folly-map.cpp main.cpp ${RCU_OBJ})
+    stress-sequential-folly-map.cpp ${OTHER_OBJS})
 target_link_libraries(stress-sequential-folly-map ${FOLLY_LIB})
 
 add_executable(stress-sequential-folly-queue
-    stress-sequential-folly-queue.cpp ${RCU_OBJ})
+    stress-sequential-folly-queue.cpp ${OTHER_OBJS})
 target_link_libraries(stress-sequential-folly-queue ${FOLLY_LIB})
 
 add_executable(stress-sequential-folly-sync
-    stress-sequential-folly-sync.cpp ${RCU_OBJ})
+    stress-sequential-folly-sync.cpp main.cpp ${OTHER_OBJS})
 target_link_libraries(stress-sequential-folly-sync ${FOLLY_LIB})
 
 # Parallel driver
 add_executable(stress-parallel-folly-map
-    stress-parallel-folly-map.cpp main.cpp ${RCU_OBJ})
+    stress-parallel-folly-map.cpp ${OTHER_OBJS})
 target_link_libraries(stress-parallel-folly-map ${FOLLY_LIB})
 
 add_executable(stress-parallel-folly-queue
-    stress-parallel-folly-queue.cpp ${RCU_OBJ})
+    stress-parallel-folly-queue.cpp ${OTHER_OBJS})
 target_link_libraries(stress-parallel-folly-queue ${FOLLY_LIB})
 
 add_executable(stress-parallel-folly-sync
-    stress-parallel-folly-sync.cpp ${RCU_OBJ})
+    stress-parallel-folly-sync.cpp ${OTHER_OBJS})
 target_link_libraries(stress-parallel-folly-sync ${FOLLY_LIB})
index 70bcb15..115c523 100644 (file)
@@ -187,10 +187,3 @@ TEST_F(FollyQueueEnqueueDequeueTest, FollyDynamicBoundedQueue_MPMC) {
       kDMPMCQueueEnqueueCount,
       kDynamicBoundedQueueEnqueueStride);
 }
-
-int main(int argc, char** argv) {
-  // Init Google test
-  ::testing::InitGoogleTest(&argc, argv);
-  int result = RUN_ALL_TESTS();
-  return result;
-}
index 775252c..36266e2 100644 (file)
-#include <folly/SmallLocks.h>
-#include <folly/RWSpinLock.h>
-#include <folly/SharedMutex.h>
-#include <folly/synchronization/Rcu.h>
-
-#include <gtest/gtest.h>
-
-#include <memory>
-
-namespace {
-
-// MicroLock
-const size_t kMicroLockPassCount = 2000000000;
-typedef folly::MicroLock MicroLock;
-
-// MicroSpinLock
-const size_t kMicroSpinLockPassCount = 1500000000;
-typedef folly::MicroSpinLock MicroSpinLock;
-
-// PicoSpinLock
-const size_t kPicoSpinLockPassCount = 2700000000;
-typedef folly::PicoSpinLock<size_t> PicoSpinLock;
-
-// SharedMutex
-const size_t kSharedMutexPassCount = 5000000;
-typedef folly::SharedMutexReadPriority SharedMutexReadPriority;
-typedef folly::SharedMutexWritePriority SharedMutexWritePriority;
-
-// RWSpinLock
-const size_t kRWSpinLockPassCount = 5000000;
-typedef folly::RWSpinLock RWSpinLock;
-
-// RWTicketSpinLock
-const size_t kRWTicketSpinLockPassCount = 5000000;
-typedef folly::RWTicketSpinLock32 RWTicketSpinLock32;
-typedef folly::RWTicketSpinLock64 RWTicketSpinLock64;
-
-// RCU
-const size_t kRcuSyncPassCount = 180000;
-const size_t kRcuNoSyncPassCount = 3500000;
-// Represent the RCU-protected data.
-struct RcuFoo {
-  size_t f1;
-  size_t f2;
-};
-
-}
+#include "sync_test.h"
+
+namespace folly_test {
+
+class FollySyncTest_Parallel: public cds_test::stress_fixture {
+protected:
+  static size_t s_nThreadCount;
+  // Simulate as the data protected by the lock.
+  static size_t locked_data;
+  static std::atomic<RcuData*> rcu_data;
+  // MicroLock
+  static size_t s_nMicroLockPassCount;
+  // MicroSpinLock
+  static size_t s_nMicroSpinLockPassCount;
+  // PicoSpinLock
+  static size_t s_nPicoSpinLockPassCount;
+  // SharedMutex
+  static size_t s_nSharedMutexPassCount;
+  // RWSpinLock
+  static size_t s_nRWSpinLockPassCount;
+  // RWTicketSpinLock
+  static size_t s_nRWTicketSpinLockPassCount;
+  // RCU
+  static size_t s_nRcuReaderPassCount;
+  static size_t s_nRcuWriterPassCount;
+  static size_t s_nRcuWriterFrequency;
+
+  static unsigned s_nSharedMutexWritePercentage;
+  static unsigned s_nRWSpinLockWritePercentage;
+  static unsigned s_nRWTicketSpinLockWritePercentage;
+
+  static void SetUpTestCase() {
+    const cds_test::config& cfg = get_config("ParallelFollySync");
+    GetConfigNonZeroExpected(ThreadCount, 4);
+    GetConfigNonZeroExpected(MicroLockPassCount, 2000000000);
+    GetConfigNonZeroExpected(MicroSpinLockPassCount, 1500000000);
+    GetConfigNonZeroExpected(PicoSpinLockPassCount, 2700000000);
+    GetConfigNonZeroExpected(SharedMutexPassCount, 5000000);
+    GetConfigNonZeroExpected(RWSpinLockPassCount, 5000000);
+    GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
+    GetConfigNonZeroExpected(RcuReaderPassCount, 10000);
+    GetConfigNonZeroExpected(RcuWriterPassCount, 500);
+    // Every 100 ms by default there will be a writer.
+    GetConfigNonZeroExpected(RcuWriterFrequency, 100);
+
+    GetConfigNonZeroExpected(SharedMutexWritePercentage, 5);
+    GetConfigNonZeroExpected(RWSpinLockWritePercentage, 5);
+    GetConfigNonZeroExpected(RWTicketSpinLockWritePercentage, 5);
+
+    rcu_data.store(new RcuData(), std::memory_order_relaxed);
+  }
 
-void run_rcu_sync(size_t pass_count) {
-  for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+  static void run_rcu_sync(size_t pass_count, unsigned write_percentage) {
     for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          RcuFoo* f = new RcuFoo();
-          folly::rcu_retire(f);
-          folly::synchronize_rcu();
-        } else {
-          folly::rcu_reader g;
-        }
+      if (rand(100) < write_percentage) {
+        auto *old_data = rcu_data.load(std::memory_order_relaxed);
+        auto *new_data = new RcuData();
+        rcu_data.store(new_data, std::memory_order_relaxed);
+        folly::rcu_retire(old_data);
+      } else {
+        folly::rcu_reader g;
       }
     }
   }
-}
 
-void run_rcu_no_sync(size_t pass_count) {
-  for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+  // writer_freq is the milliseconds a writer should wait before another writer
+  // happens.
+  static void run_rcu_writer_sync(size_t pass_count, unsigned writer_freq) {
     for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          RcuFoo* f = new RcuFoo();
-          folly::rcu_retire(f);
-        } else {
-          folly::rcu_reader g;
-        }
-      }
+      auto *old_data = rcu_data.load(std::memory_order_relaxed);
+      auto *new_data = new RcuData(*old_data);
+      new_data->d1++;
+      new_data->d2++;
+      rcu_data.store(new_data, std::memory_order_relaxed);
+      folly::synchronize_rcu();
+      delete old_data;
+      std::this_thread::sleep_for(std::chrono::milliseconds(writer_freq));
     }
   }
-}
 
-template <typename Lock>
-void run_rw_lock(size_t pass_count) {
-  std::unique_ptr<Lock> l(new Lock());
-  for (int write_percentage = 5; write_percentage < 20; write_percentage += 5) {
+  // writer_freq is the milliseconds a writer should wait before another writer
+  // happens.
+  static void run_rcu_writer_no_sync(size_t pass_count, unsigned writer_freq) {
     for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          l->lock();
-          l->unlock();
-        } else {
-          l->lock_shared();
-          l->unlock_shared();
-        }
+      auto *old_data = rcu_data.load(std::memory_order_relaxed);
+      auto *new_data = new RcuData(*old_data);
+      new_data->d1++;
+      new_data->d2++;
+      rcu_data.store(new_data, std::memory_order_relaxed);
+      folly::rcu_retire(old_data);
+      std::this_thread::sleep_for(std::chrono::milliseconds(writer_freq));
+    }
+  }
+
+  static void run_rcu_reader(size_t pass_count) {
+    size_t sum = 0;
+    for (size_t count = 0; count < pass_count; count++) {
+      folly::rcu_reader g;
+      auto *data = rcu_data.load(std::memory_order_relaxed);
+      sum += (data->d1 + data->d2);
+    }
+    // Just want to simulate the reading.
+    EXPECT_GT(sum, 0);
+  }
+
+  template <typename Lock>
+  static void run_rw_lock(Lock *l, size_t pass_count,
+                          unsigned write_percentage) {
+    size_t sum = 0;
+    for (size_t count = 0; count < pass_count; count++) {
+      if (rand(100) < write_percentage) {
+        l->lock();
+        locked_data++;
+        l->unlock();
+      } else {
+        l->lock_shared();
+        sum = locked_data;
+        l->unlock_shared();
       }
     }
+    EXPECT_GE(sum, pass_count * write_percentage / 100);
   }
-}
 
-template <typename Lock>
-void run_small_lock(size_t pass_count) {
-  std::unique_ptr<Lock> l(new Lock());
-  l->init();
-  for (size_t count = 0; count < pass_count; count++) {
-    l->lock();
-    l->unlock();
+  template <typename Lock>
+  static void run_small_lock(Lock* l, size_t pass_count) {
+    for (size_t count = 0; count < pass_count; count++) {
+      l->lock();
+      locked_data++;
+      l->unlock();
+    }
+  }
+
+  template <typename... Args>
+  static void FollySyncThreading(Args... args) {
+    std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
+    for (size_t i = 0; i < s_nThreadCount; i++) {
+      threads[i] = std::thread(args...);
+    }
+    for (size_t i = 0; i < s_nThreadCount; i++) {
+      threads[i].join();
+    }
+  }
+
+  template <typename WriterFunc>
+  static void FollyRcuThreading(WriterFunc writer_func) {
+    // One of the threads is a writer.
+    size_t reader_thrd_cnt = s_nThreadCount - 1;
+    std::unique_ptr<std::thread[]> reader_threads(
+        new std::thread[reader_thrd_cnt]);
+    std::thread writer_thread(writer_func, s_nRcuWriterPassCount,
+                              s_nRcuWriterFrequency);
+    for (size_t i = 0; i < reader_thrd_cnt; i++) {
+      reader_threads[i] = std::thread(run_rcu_reader, s_nRcuReaderPassCount);
+    }
+    for (size_t i = 0; i < reader_thrd_cnt; i++) {
+      reader_threads[i].join();
+    }
+    writer_thread.join();
   }
-}
 
-class FollySyncTest: public ::testing::Test {
+  template <typename SmallLockType>
+  static void FollySmallLockThreading(size_t pass_count) {
+    std::unique_ptr<SmallLockType> l(new SmallLockType());
+    l->init();
+    locked_data = 0;
+    FollySyncThreading(run_small_lock<SmallLockType>, l.get(), pass_count);
+    EXPECT_EQ(locked_data, pass_count * s_nThreadCount);
+  }
 
+  template <typename RWLockType>
+  static void FollyRWLockThreading(size_t pass_count, unsigned write_percentage) {
+    std::unique_ptr<RWLockType> l(new RWLockType());
+    locked_data = 0;
+    FollySyncThreading(run_rw_lock<RWLockType>, l.get(), pass_count,
+                       write_percentage);
+  }
 };
 
-TEST_F(FollySyncTest, FollyRCU_Sync) {
-  run_rcu_sync(kRcuSyncPassCount);
-}
+size_t FollySyncTest_Parallel::locked_data;
+std::atomic<RcuData*> FollySyncTest_Parallel::rcu_data;
+size_t FollySyncTest_Parallel::s_nThreadCount;
+size_t FollySyncTest_Parallel::s_nMicroLockPassCount;
+size_t FollySyncTest_Parallel::s_nMicroSpinLockPassCount;
+size_t FollySyncTest_Parallel::s_nPicoSpinLockPassCount;
+size_t FollySyncTest_Parallel::s_nSharedMutexPassCount;
+size_t FollySyncTest_Parallel::s_nRWSpinLockPassCount;
+size_t FollySyncTest_Parallel::s_nRWTicketSpinLockPassCount;
+
+size_t FollySyncTest_Parallel::s_nRcuReaderPassCount;
+size_t FollySyncTest_Parallel::s_nRcuWriterPassCount;
+size_t FollySyncTest_Parallel::s_nRcuWriterFrequency;
 
-TEST_F(FollySyncTest, FollyRCU_NoSync) {
-  run_rcu_no_sync(kRcuNoSyncPassCount);
+unsigned FollySyncTest_Parallel::s_nSharedMutexWritePercentage;
+unsigned FollySyncTest_Parallel::s_nRWSpinLockWritePercentage;
+unsigned FollySyncTest_Parallel::s_nRWTicketSpinLockWritePercentage;
+
+TEST_F(FollySyncTest_Parallel, FollyRCU_Sync) {
+  FollyRcuThreading(run_rcu_writer_sync);
 }
 
-TEST_F(FollySyncTest, FollyRWTicketSpinLock_32) {
-  run_rw_lock<RWTicketSpinLock32>(kRWTicketSpinLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollyRCU_NoSync) {
+  FollyRcuThreading(run_rcu_writer_no_sync);
 }
 
-TEST_F(FollySyncTest, FollyRWTicketSpinLock_64) {
-  run_rw_lock<RWTicketSpinLock64>(kRWTicketSpinLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_32) {
+  FollyRWLockThreading<RWTicketSpinLock32>(s_nRWTicketSpinLockPassCount,
+                                           s_nRWTicketSpinLockWritePercentage);
 }
 
-TEST_F(FollySyncTest, FollyRWSpinLock) {
-  run_rw_lock<RWSpinLock>(kRWSpinLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_64) {
+  FollyRWLockThreading<RWTicketSpinLock64>(s_nRWTicketSpinLockPassCount,
+                                           s_nRWTicketSpinLockWritePercentage);
 }
 
-TEST_F(FollySyncTest, FollySharedMutex_ReadPriority) {
-  run_rw_lock<SharedMutexReadPriority>(kSharedMutexPassCount);
+TEST_F(FollySyncTest_Parallel, FollyRWSpinLock) {
+  FollyRWLockThreading<RWSpinLock>(s_nRWSpinLockPassCount,
+                                   s_nRWSpinLockWritePercentage);
 }
 
-TEST_F(FollySyncTest, FollySharedMutex_WritePriority) {
-  run_rw_lock<SharedMutexWritePriority>(kSharedMutexPassCount);
+TEST_F(FollySyncTest_Parallel, FollySharedMutex_ReadPriority) {
+  FollyRWLockThreading<SharedMutexReadPriority>(s_nSharedMutexPassCount,
+                                                s_nSharedMutexWritePercentage);
 }
 
-TEST_F(FollySyncTest, FollyMicroSpinLock) {
-  run_small_lock<MicroSpinLock>(kMicroSpinLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollySharedMutex_WritePriority) {
+  FollyRWLockThreading<SharedMutexWritePriority>(s_nSharedMutexPassCount,
+                                                 s_nSharedMutexWritePercentage);
 }
 
-TEST_F(FollySyncTest, FollyPicoSpinLock) {
-  run_small_lock<PicoSpinLock>(kPicoSpinLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollyMicroSpinLock) {
+  FollySmallLockThreading<MicroSpinLock>(s_nMicroSpinLockPassCount);
 }
 
-TEST_F(FollySyncTest, FollyMicroLock) {
-  run_small_lock<MicroLock>(kMicroLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollyPicoSpinLock) {
+  FollySmallLockThreading<PicoSpinLock>(s_nPicoSpinLockPassCount);
 }
 
-int main(int argc, char** argv) {
-  // Init Google test
-  ::testing::InitGoogleTest(&argc, argv);
-  int result = RUN_ALL_TESTS();
-  return result;
+TEST_F(FollySyncTest_Parallel, FollyMicroLock) {
+  FollySmallLockThreading<MicroLock>(s_nMicroLockPassCount);
 }
+
+} // namespace folly_test
index 70bcb15..115c523 100644 (file)
@@ -187,10 +187,3 @@ TEST_F(FollyQueueEnqueueDequeueTest, FollyDynamicBoundedQueue_MPMC) {
       kDMPMCQueueEnqueueCount,
       kDynamicBoundedQueueEnqueueStride);
 }
-
-int main(int argc, char** argv) {
-  // Init Google test
-  ::testing::InitGoogleTest(&argc, argv);
-  int result = RUN_ALL_TESTS();
-  return result;
-}
index 775252c..8a7a1d1 100644 (file)
-#include <folly/SmallLocks.h>
-#include <folly/RWSpinLock.h>
-#include <folly/SharedMutex.h>
-#include <folly/synchronization/Rcu.h>
-
-#include <gtest/gtest.h>
-
-#include <memory>
-
-namespace {
-
-// MicroLock
-const size_t kMicroLockPassCount = 2000000000;
-typedef folly::MicroLock MicroLock;
-
-// MicroSpinLock
-const size_t kMicroSpinLockPassCount = 1500000000;
-typedef folly::MicroSpinLock MicroSpinLock;
-
-// PicoSpinLock
-const size_t kPicoSpinLockPassCount = 2700000000;
-typedef folly::PicoSpinLock<size_t> PicoSpinLock;
-
-// SharedMutex
-const size_t kSharedMutexPassCount = 5000000;
-typedef folly::SharedMutexReadPriority SharedMutexReadPriority;
-typedef folly::SharedMutexWritePriority SharedMutexWritePriority;
-
-// RWSpinLock
-const size_t kRWSpinLockPassCount = 5000000;
-typedef folly::RWSpinLock RWSpinLock;
-
-// RWTicketSpinLock
-const size_t kRWTicketSpinLockPassCount = 5000000;
-typedef folly::RWTicketSpinLock32 RWTicketSpinLock32;
-typedef folly::RWTicketSpinLock64 RWTicketSpinLock64;
-
-// RCU
-const size_t kRcuSyncPassCount = 180000;
-const size_t kRcuNoSyncPassCount = 3500000;
-// Represent the RCU-protected data.
-struct RcuFoo {
-  size_t f1;
-  size_t f2;
-};
-
-}
+#include "sync_test.h"
+
+namespace folly_test {
+
+class FollySyncTest_Sequential: public cds_test::stress_fixture {
+protected:
+  // Simulate as the data protected by the lock.
+  static size_t locked_data;
+  // MicroLock
+  static size_t s_nMicroLockPassCount;
+  // MicroSpinLock
+  static size_t s_nMicroSpinLockPassCount;
+  // PicoSpinLock
+  static size_t s_nPicoSpinLockPassCount;
+  // SharedMutex
+  static size_t s_nSharedMutexPassCount;
+  // RWSpinLock
+  static size_t s_nRWSpinLockPassCount;
+  // RWTicketSpinLock
+  static size_t s_nRWTicketSpinLockPassCount;
+  // RCU
+  static size_t s_nRcuSyncPassCount;
+  static size_t s_nRcuNoSyncPassCount;
+
+  static void SetUpTestCase() {
+    const cds_test::config& cfg = get_config("SequentialFollySync");
+    GetConfigNonZeroExpected(MicroLockPassCount, 2000000000);
+    GetConfigNonZeroExpected(MicroSpinLockPassCount, 1500000000);
+    GetConfigNonZeroExpected(PicoSpinLockPassCount, 2700000000);
+    GetConfigNonZeroExpected(SharedMutexPassCount, 5000000);
+    GetConfigNonZeroExpected(RWSpinLockPassCount, 5000000);
+    GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
+    GetConfigNonZeroExpected(RcuSyncPassCount, 180000);
+    GetConfigNonZeroExpected(RcuNoSyncPassCount, 3500000);
+  }
 
-void run_rcu_sync(size_t pass_count) {
-  for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
-    for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          RcuFoo* f = new RcuFoo();
-          folly::rcu_retire(f);
-          folly::synchronize_rcu();
-        } else {
-          folly::rcu_reader g;
+  static void run_rcu_sync(size_t pass_count) {
+    for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+      for (size_t count = 0; count < pass_count; count++) {
+        for (int i = 0; i < 100; ++i) {
+          if (i < write_percentage) {
+            RcuData* data = new RcuData();
+            folly::rcu_retire(data);
+            folly::synchronize_rcu();
+          } else {
+            folly::rcu_reader g;
+          }
         }
       }
     }
   }
-}
 
-void run_rcu_no_sync(size_t pass_count) {
-  for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
-    for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          RcuFoo* f = new RcuFoo();
-          folly::rcu_retire(f);
-        } else {
-          folly::rcu_reader g;
+  static void run_rcu_no_sync(size_t pass_count) {
+    for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+      for (size_t count = 0; count < pass_count; count++) {
+        for (int i = 0; i < 100; ++i) {
+          if (i < write_percentage) {
+            RcuData* data = new RcuData();
+            folly::rcu_retire(data);
+          } else {
+            folly::rcu_reader g;
+          }
         }
       }
     }
   }
-}
 
-template <typename Lock>
-void run_rw_lock(size_t pass_count) {
-  std::unique_ptr<Lock> l(new Lock());
-  for (int write_percentage = 5; write_percentage < 20; write_percentage += 5) {
-    for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          l->lock();
-          l->unlock();
-        } else {
-          l->lock_shared();
-          l->unlock_shared();
+  template <typename Lock>
+  static void run_rw_lock(size_t pass_count) {
+    std::unique_ptr<Lock> l(new Lock());
+    for (int write_percentage = 5; write_percentage < 20; write_percentage += 5) {
+      locked_data = 0;
+      size_t read_sum = 0;
+      size_t write_lock_cnt = 0;
+      for (size_t count = 0; count < pass_count; count++) {
+        for (int i = 0; i < 100; ++i) {
+          if (i < write_percentage) {
+            write_lock_cnt++;
+            l->lock();
+            locked_data++;
+            l->unlock();
+          } else {
+            l->lock_shared();
+            read_sum = locked_data;
+            l->unlock_shared();
+          }
         }
       }
+      EXPECT_EQ(write_lock_cnt, locked_data);
+      EXPECT_EQ(locked_data, read_sum);
     }
   }
-}
 
-template <typename Lock>
-void run_small_lock(size_t pass_count) {
-  std::unique_ptr<Lock> l(new Lock());
-  l->init();
-  for (size_t count = 0; count < pass_count; count++) {
-    l->lock();
-    l->unlock();
+  template <typename Lock>
+  static void run_small_lock(size_t pass_count) {
+    std::unique_ptr<Lock> l(new Lock());
+    locked_data = 0;
+    l->init();
+    for (size_t count = 0; count < pass_count; count++) {
+      l->lock();
+      locked_data++;
+      l->unlock();
+    }
+    EXPECT_EQ(locked_data, pass_count);
   }
-}
-
-class FollySyncTest: public ::testing::Test {
-
 };
 
-TEST_F(FollySyncTest, FollyRCU_Sync) {
-  run_rcu_sync(kRcuSyncPassCount);
-}
+size_t FollySyncTest_Sequential::locked_data;
+size_t FollySyncTest_Sequential::s_nMicroLockPassCount;
+size_t FollySyncTest_Sequential::s_nMicroSpinLockPassCount;
+size_t FollySyncTest_Sequential::s_nPicoSpinLockPassCount;
+size_t FollySyncTest_Sequential::s_nSharedMutexPassCount;
+size_t FollySyncTest_Sequential::s_nRWSpinLockPassCount;
+size_t FollySyncTest_Sequential::s_nRWTicketSpinLockPassCount;
+size_t FollySyncTest_Sequential::s_nRcuSyncPassCount;
+size_t FollySyncTest_Sequential::s_nRcuNoSyncPassCount;
 
-TEST_F(FollySyncTest, FollyRCU_NoSync) {
-  run_rcu_no_sync(kRcuNoSyncPassCount);
+TEST_F(FollySyncTest_Sequential, FollyMicroSpinLock) {
+  run_small_lock<MicroSpinLock>(s_nMicroSpinLockPassCount);
 }
 
-TEST_F(FollySyncTest, FollyRWTicketSpinLock_32) {
-  run_rw_lock<RWTicketSpinLock32>(kRWTicketSpinLockPassCount);
+TEST_F(FollySyncTest_Sequential, FollyPicoSpinLock) {
+  run_small_lock<PicoSpinLock>(s_nPicoSpinLockPassCount);
 }
 
-TEST_F(FollySyncTest, FollyRWTicketSpinLock_64) {
-  run_rw_lock<RWTicketSpinLock64>(kRWTicketSpinLockPassCount);
+TEST_F(FollySyncTest_Sequential, FollyMicroLock) {
+  run_small_lock<MicroLock>(s_nMicroLockPassCount);
 }
 
-TEST_F(FollySyncTest, FollyRWSpinLock) {
-  run_rw_lock<RWSpinLock>(kRWSpinLockPassCount);
+TEST_F(FollySyncTest_Sequential, FollyRCU_Sync) {
+  run_rcu_sync(s_nRcuSyncPassCount);
 }
 
-TEST_F(FollySyncTest, FollySharedMutex_ReadPriority) {
-  run_rw_lock<SharedMutexReadPriority>(kSharedMutexPassCount);
+TEST_F(FollySyncTest_Sequential, FollyRCU_NoSync) {
+  run_rcu_no_sync(s_nRcuNoSyncPassCount);
 }
 
-TEST_F(FollySyncTest, FollySharedMutex_WritePriority) {
-  run_rw_lock<SharedMutexWritePriority>(kSharedMutexPassCount);
+TEST_F(FollySyncTest_Sequential, FollyRWTicketSpinLock_32) {
+  run_rw_lock<RWTicketSpinLock32>(s_nRWTicketSpinLockPassCount);
 }
 
-TEST_F(FollySyncTest, FollyMicroSpinLock) {
-  run_small_lock<MicroSpinLock>(kMicroSpinLockPassCount);
+TEST_F(FollySyncTest_Sequential, FollyRWTicketSpinLock_64) {
+  run_rw_lock<RWTicketSpinLock64>(s_nRWTicketSpinLockPassCount);
 }
 
-TEST_F(FollySyncTest, FollyPicoSpinLock) {
-  run_small_lock<PicoSpinLock>(kPicoSpinLockPassCount);
+TEST_F(FollySyncTest_Sequential, FollyRWSpinLock) {
+  run_rw_lock<RWSpinLock>(s_nRWSpinLockPassCount);
 }
 
-TEST_F(FollySyncTest, FollyMicroLock) {
-  run_small_lock<MicroLock>(kMicroLockPassCount);
+TEST_F(FollySyncTest_Sequential, FollySharedMutex_ReadPriority) {
+  run_rw_lock<SharedMutexReadPriority>(s_nSharedMutexPassCount);
 }
 
-int main(int argc, char** argv) {
-  // Init Google test
-  ::testing::InitGoogleTest(&argc, argv);
-  int result = RUN_ALL_TESTS();
-  return result;
+TEST_F(FollySyncTest_Sequential, FollySharedMutex_WritePriority) {
+  run_rw_lock<SharedMutexWritePriority>(s_nSharedMutexPassCount);
 }
+
+} // namespace folly_test
diff --git a/folly/stress-test/sync_test.h b/folly/stress-test/sync_test.h
new file mode 100644 (file)
index 0000000..1f62bae
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef _FOLLY_SYNC_TEST_H
+#define _FOLLY_SYNC_TEST_H
+
+#include <folly/SmallLocks.h>
+#include <folly/RWSpinLock.h>
+#include <folly/SharedMutex.h>
+#include <folly/synchronization/Rcu.h>
+
+#include <cds_test/stress_test.h>
+#include <cds_test/stress_test_util.h>
+
+#include <algorithm>
+#include <iostream>
+#include <memory>
+#include <random>
+#include <thread>
+#include <chrono>
+
+namespace folly_test {
+
+typedef folly::MicroLock MicroLock;
+typedef folly::MicroSpinLock MicroSpinLock;
+typedef folly::PicoSpinLock<size_t> PicoSpinLock;
+typedef folly::SharedMutexReadPriority SharedMutexReadPriority;
+typedef folly::SharedMutexWritePriority SharedMutexWritePriority;
+typedef folly::RWSpinLock RWSpinLock;
+typedef folly::RWTicketSpinLock32 RWTicketSpinLock32;
+typedef folly::RWTicketSpinLock64 RWTicketSpinLock64;
+
+// Represent the RCU-protected data.
+struct RcuData {
+  size_t d1;
+  size_t d2;
+};
+
+} // namespace folly_test
+
+#endif