Adds folly queue parallel test cases
[folly.git] / folly / stress-test / stress-parallel-folly-sync.cpp
index 775252c15b6194f7826f2830e4ed301dbc80109c..af820c7f4e1aea498566f5d919403fa6e1b97481 100644 (file)
-#include <folly/SmallLocks.h>
-#include <folly/RWSpinLock.h>
-#include <folly/SharedMutex.h>
-#include <folly/synchronization/Rcu.h>
-
-#include <gtest/gtest.h>
-
-#include <memory>
-
-namespace {
-
-// MicroLock
-const size_t kMicroLockPassCount = 2000000000;
-typedef folly::MicroLock MicroLock;
-
-// MicroSpinLock
-const size_t kMicroSpinLockPassCount = 1500000000;
-typedef folly::MicroSpinLock MicroSpinLock;
-
-// PicoSpinLock
-const size_t kPicoSpinLockPassCount = 2700000000;
-typedef folly::PicoSpinLock<size_t> PicoSpinLock;
-
-// SharedMutex
-const size_t kSharedMutexPassCount = 5000000;
-typedef folly::SharedMutexReadPriority SharedMutexReadPriority;
-typedef folly::SharedMutexWritePriority SharedMutexWritePriority;
-
-// RWSpinLock
-const size_t kRWSpinLockPassCount = 5000000;
-typedef folly::RWSpinLock RWSpinLock;
-
-// RWTicketSpinLock
-const size_t kRWTicketSpinLockPassCount = 5000000;
-typedef folly::RWTicketSpinLock32 RWTicketSpinLock32;
-typedef folly::RWTicketSpinLock64 RWTicketSpinLock64;
-
-// RCU
-const size_t kRcuSyncPassCount = 180000;
-const size_t kRcuNoSyncPassCount = 3500000;
-// Represent the RCU-protected data.
-struct RcuFoo {
-  size_t f1;
-  size_t f2;
-};
+#include "sync_test.h"
+
+namespace folly_test {
+
+class FollySyncTest_Parallel: public cds_test::stress_fixture {
+protected:
+  static size_t s_nThreadCount;
+  // Simulate as the data protected by the lock.
+  static size_t locked_data;
+  static std::atomic<RcuData*> rcu_data;
+  // For RCU, we mostly want to benchmark the readers (cause it's designed for
+  // very fast readers and occasional writers). We have a writer thread that
+  // runs nonstop until all other reader threads are done.
+  static std::atomic_uint rcu_readers_num;
+  // MicroLock
+  static size_t s_nMicroLockPassCount;
+  // MicroSpinLock
+  static size_t s_nMicroSpinLockPassCount;
+  // PicoSpinLock
+  static size_t s_nPicoSpinLockPassCount;
+  // SharedMutex
+  static size_t s_nSharedMutexPassCount;
+  // RWSpinLock
+  static size_t s_nRWSpinLockPassCount;
+  // RWTicketSpinLock
+  static size_t s_nRWTicketSpinLockPassCount;
+  // RCU
+  static size_t s_nRcuReaderPassCount;
+  static size_t s_nRcuWriterPassCount;
+  static size_t s_nRcuWriterFrequency;
+
+  static unsigned s_nSharedMutexWritePercentage;
+  static unsigned s_nRWSpinLockWritePercentage;
+  static unsigned s_nRWTicketSpinLockWritePercentage;
+
+  static void SetUpTestCase() {
+    const cds_test::config& cfg = get_config("ParallelFollySync");
+    GetConfigNonZeroExpected(ThreadCount, 4);
+    GetConfigNonZeroExpected(MicroLockPassCount, 2000000000);
+    GetConfigNonZeroExpected(MicroSpinLockPassCount, 1500000000);
+    GetConfigNonZeroExpected(PicoSpinLockPassCount, 2700000000);
+    GetConfigNonZeroExpected(SharedMutexPassCount, 5000000);
+    GetConfigNonZeroExpected(RWSpinLockPassCount, 5000000);
+    GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
+    GetConfigNonZeroExpected(RcuReaderPassCount, 10000);
+    GetConfigNonZeroExpected(RcuWriterPassCount, 500);
+    // Every 100 ms by default there will be a writer.
+    GetConfigNonZeroExpected(RcuWriterFrequency, 100);
+
+    GetConfigNonZeroExpected(SharedMutexWritePercentage, 5);
+    GetConfigNonZeroExpected(RWSpinLockWritePercentage, 5);
+    GetConfigNonZeroExpected(RWTicketSpinLockWritePercentage, 5);
+
+    rcu_data.store(new RcuData(), std::memory_order_relaxed);
+  }
 
-}
+  static void run_rcu_writer_sync() {
+    while (rcu_readers_num.load(std::memory_order_acquire) > 0) {
+      auto *old_data = rcu_data.load(std::memory_order_relaxed);
+      auto *new_data = new RcuData(*old_data);
+      new_data->d1++;
+      new_data->d2++;
+      rcu_data.store(new_data, std::memory_order_relaxed);
+      folly::synchronize_rcu();
+      delete old_data;
+      std::this_thread::sleep_for(
+          std::chrono::milliseconds(s_nRcuWriterFrequency));
+    }
+  }
+
+  static void run_rcu_writer_no_sync() {
+    while (rcu_readers_num.load(std::memory_order_acquire) > 0) {
+      auto *old_data = rcu_data.load(std::memory_order_relaxed);
+      auto *new_data = new RcuData(*old_data);
+      new_data->d1++;
+      new_data->d2++;
+      rcu_data.store(new_data, std::memory_order_relaxed);
+      folly::rcu_retire(old_data);
+      std::this_thread::sleep_for(
+          std::chrono::milliseconds(s_nRcuWriterFrequency));
+    }
+  }
 
-void run_rcu_sync(size_t pass_count) {
-  for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+  static void run_rcu_reader(size_t pass_count) {
+    size_t sum = 0;
     for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          RcuFoo* f = new RcuFoo();
-          folly::rcu_retire(f);
-          folly::synchronize_rcu();
-        } else {
-          folly::rcu_reader g;
-        }
-      }
+      folly::rcu_reader g;
+      auto *data = rcu_data.load(std::memory_order_relaxed);
+      sum += (data->d1 + data->d2);
     }
+    rcu_readers_num.fetch_sub(1, std::memory_order_release);
+    // Just want to simulate the reading.
+    EXPECT_GT(sum, 0);
   }
-}
 
-void run_rcu_no_sync(size_t pass_count) {
-  for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+  template <typename Lock>
+  static void run_rw_lock(Lock *l, size_t pass_count,
+                          unsigned write_percentage) {
+    size_t sum = 0;
     for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          RcuFoo* f = new RcuFoo();
-          folly::rcu_retire(f);
-        } else {
-          folly::rcu_reader g;
-        }
+      if (rand(100) < write_percentage) {
+        l->lock();
+        locked_data++;
+        l->unlock();
+      } else {
+        l->lock_shared();
+        sum = locked_data;
+        l->unlock_shared();
       }
     }
+    EXPECT_GE(sum, pass_count * write_percentage / 100);
   }
-}
 
-template <typename Lock>
-void run_rw_lock(size_t pass_count) {
-  std::unique_ptr<Lock> l(new Lock());
-  for (int write_percentage = 5; write_percentage < 20; write_percentage += 5) {
+  template <typename Lock>
+  static void run_small_lock(Lock* l, size_t pass_count) {
     for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          l->lock();
-          l->unlock();
-        } else {
-          l->lock_shared();
-          l->unlock_shared();
-        }
-      }
+      l->lock();
+      locked_data++;
+      l->unlock();
     }
   }
-}
 
-template <typename Lock>
-void run_small_lock(size_t pass_count) {
-  std::unique_ptr<Lock> l(new Lock());
-  l->init();
-  for (size_t count = 0; count < pass_count; count++) {
-    l->lock();
-    l->unlock();
+  template <typename... Args>
+  static void FollySyncThreading(Args... args) {
+    std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
+    for (size_t i = 0; i < s_nThreadCount; i++) {
+      threads[i] = std::thread(args...);
+    }
+    for (size_t i = 0; i < s_nThreadCount; i++) {
+      threads[i].join();
+    }
   }
-}
 
-class FollySyncTest: public ::testing::Test {
+  template <typename WriterFunc>
+  static void FollyRcuThreading(WriterFunc writer_func) {
+    rcu_readers_num.store(s_nThreadCount - 1, std::memory_order_release);
 
+    std::unique_ptr<std::thread[]> threads(new std::thread[s_nThreadCount]);
+    // One of the threads is a writer.
+    threads[0] = std::thread(writer_func);
+    for (size_t i = 1; i < s_nThreadCount; i++) {
+      threads[i] = std::thread(run_rcu_reader, s_nRcuReaderPassCount);
+    }
+    for (size_t i = 0; i < s_nThreadCount; i++) {
+      threads[i].join();
+    }
+  }
+
+  template <typename SmallLockType>
+  static void FollySmallLockThreading(size_t pass_count) {
+    std::unique_ptr<SmallLockType> l(new SmallLockType());
+    l->init();
+    locked_data = 0;
+    FollySyncThreading(run_small_lock<SmallLockType>, l.get(), pass_count);
+    EXPECT_EQ(locked_data, pass_count * s_nThreadCount);
+  }
+
+  template <typename RWLockType>
+  static void FollyRWLockThreading(size_t pass_count, unsigned write_percentage) {
+    std::unique_ptr<RWLockType> l(new RWLockType());
+    locked_data = 0;
+    FollySyncThreading(run_rw_lock<RWLockType>, l.get(), pass_count,
+                       write_percentage);
+  }
 };
 
-TEST_F(FollySyncTest, FollyRCU_Sync) {
-  run_rcu_sync(kRcuSyncPassCount);
-}
+size_t FollySyncTest_Parallel::locked_data;
+std::atomic<RcuData*> FollySyncTest_Parallel::rcu_data;
+std::atomic_uint FollySyncTest_Parallel::rcu_readers_num;
+size_t FollySyncTest_Parallel::s_nThreadCount;
+size_t FollySyncTest_Parallel::s_nMicroLockPassCount;
+size_t FollySyncTest_Parallel::s_nMicroSpinLockPassCount;
+size_t FollySyncTest_Parallel::s_nPicoSpinLockPassCount;
+size_t FollySyncTest_Parallel::s_nSharedMutexPassCount;
+size_t FollySyncTest_Parallel::s_nRWSpinLockPassCount;
+size_t FollySyncTest_Parallel::s_nRWTicketSpinLockPassCount;
 
-TEST_F(FollySyncTest, FollyRCU_NoSync) {
-  run_rcu_no_sync(kRcuNoSyncPassCount);
+size_t FollySyncTest_Parallel::s_nRcuReaderPassCount;
+size_t FollySyncTest_Parallel::s_nRcuWriterPassCount;
+size_t FollySyncTest_Parallel::s_nRcuWriterFrequency;
+
+unsigned FollySyncTest_Parallel::s_nSharedMutexWritePercentage;
+unsigned FollySyncTest_Parallel::s_nRWSpinLockWritePercentage;
+unsigned FollySyncTest_Parallel::s_nRWTicketSpinLockWritePercentage;
+
+TEST_F(FollySyncTest_Parallel, FollyRCU_Sync) {
+  FollyRcuThreading(run_rcu_writer_sync);
 }
 
-TEST_F(FollySyncTest, FollyRWTicketSpinLock_32) {
-  run_rw_lock<RWTicketSpinLock32>(kRWTicketSpinLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollyRCU_NoSync) {
+  FollyRcuThreading(run_rcu_writer_no_sync);
 }
 
-TEST_F(FollySyncTest, FollyRWTicketSpinLock_64) {
-  run_rw_lock<RWTicketSpinLock64>(kRWTicketSpinLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_32) {
+  FollyRWLockThreading<RWTicketSpinLock32>(s_nRWTicketSpinLockPassCount,
+                                           s_nRWTicketSpinLockWritePercentage);
 }
 
-TEST_F(FollySyncTest, FollyRWSpinLock) {
-  run_rw_lock<RWSpinLock>(kRWSpinLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollyRWTicketSpinLock_64) {
+  FollyRWLockThreading<RWTicketSpinLock64>(s_nRWTicketSpinLockPassCount,
+                                           s_nRWTicketSpinLockWritePercentage);
 }
 
-TEST_F(FollySyncTest, FollySharedMutex_ReadPriority) {
-  run_rw_lock<SharedMutexReadPriority>(kSharedMutexPassCount);
+TEST_F(FollySyncTest_Parallel, FollyRWSpinLock) {
+  FollyRWLockThreading<RWSpinLock>(s_nRWSpinLockPassCount,
+                                   s_nRWSpinLockWritePercentage);
 }
 
-TEST_F(FollySyncTest, FollySharedMutex_WritePriority) {
-  run_rw_lock<SharedMutexWritePriority>(kSharedMutexPassCount);
+TEST_F(FollySyncTest_Parallel, FollySharedMutex_ReadPriority) {
+  FollyRWLockThreading<SharedMutexReadPriority>(s_nSharedMutexPassCount,
+                                                s_nSharedMutexWritePercentage);
 }
 
-TEST_F(FollySyncTest, FollyMicroSpinLock) {
-  run_small_lock<MicroSpinLock>(kMicroSpinLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollySharedMutex_WritePriority) {
+  FollyRWLockThreading<SharedMutexWritePriority>(s_nSharedMutexPassCount,
+                                                 s_nSharedMutexWritePercentage);
 }
 
-TEST_F(FollySyncTest, FollyPicoSpinLock) {
-  run_small_lock<PicoSpinLock>(kPicoSpinLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollyMicroSpinLock) {
+  FollySmallLockThreading<MicroSpinLock>(s_nMicroSpinLockPassCount);
 }
 
-TEST_F(FollySyncTest, FollyMicroLock) {
-  run_small_lock<MicroLock>(kMicroLockPassCount);
+TEST_F(FollySyncTest_Parallel, FollyPicoSpinLock) {
+  FollySmallLockThreading<PicoSpinLock>(s_nPicoSpinLockPassCount);
 }
 
-int main(int argc, char** argv) {
-  // Init Google test
-  ::testing::InitGoogleTest(&argc, argv);
-  int result = RUN_ALL_TESTS();
-  return result;
+TEST_F(FollySyncTest_Parallel, FollyMicroLock) {
+  FollySmallLockThreading<MicroLock>(s_nMicroLockPassCount);
 }
+
+} // namespace folly_test