Fixes RCU test cases error (loads should use Consume ordering)
[folly.git] / folly / stress-test / stress-sequential-folly-sync.cpp
index 0b849c50d77c75d19f169b3a860245ce74a730b3..5e39e427e67c2de8b66f1be49ff5e1ba9d4abfdb 100644 (file)
-#include <folly/SmallLocks.h>
-#include <folly/RWSpinLock.h>
-#include <folly/SharedMutex.h>
-#include <folly/synchronization/Rcu.h>
-
-#include <chrono>
-#include <cassert>
-#include <iostream>
-#include <memory>
-
-namespace {
-
-const char* kTestName = "LockUnlock";
-
-// MicroLock
-const size_t kMicroLockPassCount = 2000000000;
-const char* kMicroLockBenchmarkName = "FollyMicroLock";
-typedef folly::MicroLock MicroLock;
-
-// MicroSpinLock
-const size_t kMicroSpinLockPassCount = 1500000000;
-const char* kMicroSpinLockBenchmarkName = "FollyMicroSpinLock";
-typedef folly::MicroSpinLock MicroSpinLock;
-
-// PicoSpinLock
-const size_t kPicoSpinLockPassCount = 2700000000;
-const char* kPicoSpinLockBenchmarkName = "FollyPicoSpinLock";
-typedef folly::PicoSpinLock<size_t> PicoSpinLock;
-
-// SharedMutex
-const size_t kSharedMutexPassCount = 5000000;
-const char* kSharedMutexReadPriorityBenchmarkName =
-    "FollySharedMutex_ReadPriority";
-const char* kSharedMutexWritePriorityBenchmarkName =
-    "FollySharedMutex_WritePriority";
-typedef folly::SharedMutexReadPriority SharedMutexReadPriority;
-typedef folly::SharedMutexWritePriority SharedMutexWritePriority;
-
-// RWSpinLock
-const size_t kRWSpinLockPassCount = 5000000;
-const char* kRWSpinLockBenchmarkName = "FollyRWSpinLock";
-typedef folly::RWSpinLock RWSpinLock;
-
-// RWTicketSpinLock
-const size_t kRWTicketSpinLockPassCount = 5000000;
-const char* kRWTicketSpinLock32BenchmarkName = "FollyRWTicketSpinLock_32";
-const char* kRWTicketSpinLock64BenchmarkName = "FollyRWTicketSpinLock_64";
-typedef folly::RWTicketSpinLock32 RWTicketSpinLock32;
-typedef folly::RWTicketSpinLock64 RWTicketSpinLock64;
-
-// RCU
-const size_t kRcuSyncPassCount = 3000000;
-const size_t kRcuNoSyncPassCount = 2500000;
-const char* kRcuSyncBenchmarkName = "FollyRCU_Sync";
-const char* kRcuNoSyncBenchmarkName = "FollyRCU_NoSync";
-// Represent the RCU-protected data.
-struct RcuFoo {
-  size_t f1;
-  size_t f2;
-};
-
-}
-
-void run_rcu_sync(size_t pass_count, const char* bench_name) {
-  std::cout << "[ RUN      ] " << kTestName << "." << bench_name << std::endl;
-  auto start_time = std::chrono::system_clock::now();
-
-  for (size_t count = 0; count < pass_count; count++) {
-    RcuFoo* f = new RcuFoo();
-    folly::rcu_retire(f);
-    folly::synchronize_rcu();
+#include "sync_test.h"
+
+namespace folly_test {
+
+class FollySyncTest_Sequential: public cds_test::stress_fixture {
+protected:
+  // Simulate as the data protected by the lock.
+  static size_t locked_data;
+  static std::atomic<RcuData*> rcu_data;
+  // MicroLock
+  static size_t s_nMicroLockPassCount;
+  // MicroSpinLock
+  static size_t s_nMicroSpinLockPassCount;
+  // PicoSpinLock
+  static size_t s_nPicoSpinLockPassCount;
+  // SharedMutex
+  static size_t s_nSharedMutexPassCount;
+  // RWSpinLock
+  static size_t s_nRWSpinLockPassCount;
+  // RWTicketSpinLock
+  static size_t s_nRWTicketSpinLockPassCount;
+  // RCU
+  static size_t s_nRcuSyncPassCount;
+  static size_t s_nRcuNoSyncPassCount;
+  static size_t s_nRcuReaderOnlyPassCount;
+
+  static void SetUpTestCase() {
+    const cds_test::config& cfg = get_config("SequentialFollySync");
+    GetConfigNonZeroExpected(MicroLockPassCount, 2000000000);
+    GetConfigNonZeroExpected(MicroSpinLockPassCount, 1500000000);
+    GetConfigNonZeroExpected(PicoSpinLockPassCount, 2700000000);
+    GetConfigNonZeroExpected(SharedMutexPassCount, 5000000);
+    GetConfigNonZeroExpected(RWSpinLockPassCount, 5000000);
+    GetConfigNonZeroExpected(RWTicketSpinLockPassCount, 5000000);
+    GetConfigNonZeroExpected(RcuSyncPassCount, 180000);
+    GetConfigNonZeroExpected(RcuNoSyncPassCount, 3500000);
+    GetConfigNonZeroExpected(RcuReaderOnlyPassCount, 3000000);
+
+    // Initialize the RCU protected data.
+    rcu_data.store(new RcuData(), std::memory_order_relaxed);
   }
 
-  auto finish_time = std::chrono::system_clock::now();
-  auto dur = finish_time - start_time;
-  auto milisecs = std::chrono::duration_cast<std::chrono::milliseconds>(dur);
-  std::cout << "[       OK ] " << kTestName << "." << bench_name
-            << " (" << milisecs.count() << " ms)" << std::endl;
-}
-
-void run_rcu_no_sync(size_t pass_count, const char* bench_name) {
-  std::cout << "[ RUN      ] " << kTestName << "." << bench_name << std::endl;
-  auto start_time = std::chrono::system_clock::now();
-
-  for (int write_percentage = 5; write_percentage <= 10; write_percentage += 1) {
+  static void run_rcu_reader_only(size_t pass_count) {
+    size_t sum = 1;
     for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          RcuFoo* f = new RcuFoo();
-          folly::rcu_retire(f);
-        } else {
-          folly::rcu_reader g;
+      folly::rcu_reader g;
+      auto *data = rcu_data.load(std::memory_order_consume);
+      sum += (data->d1 + data->d2);
+    }
+    EXPECT_EQ(sum, 1);
+  }
+
+  static void run_rcu_sync(size_t pass_count) {
+    for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+      size_t sum = 0;
+      for (size_t count = 0; count < pass_count; count++) {
+        for (int i = 0; i < 100; ++i) {
+          if (i < write_percentage) {
+            auto* old_data = rcu_data.load(std::memory_order_consume);
+            auto* new_data = new RcuData(*old_data);
+            new_data->d1++;
+            new_data->d2++;
+            rcu_data.store(new_data, std::memory_order_release);
+            folly::synchronize_rcu();
+            delete old_data;
+          } else {
+            folly::rcu_reader g;
+            auto* data = rcu_data.load(std::memory_order_consume);
+            sum += (data->d1 + data->d2);
+          }
         }
       }
+      EXPECT_GT(sum, 0);
     }
   }
 
-  auto finish_time = std::chrono::system_clock::now();
-  auto dur = finish_time - start_time;
-  auto milisecs = std::chrono::duration_cast<std::chrono::milliseconds>(dur);
-  std::cout << "[       OK ] " << kTestName << "." << bench_name
-            << " (" << milisecs.count() << " ms)" << std::endl;
-}
-
-template <typename Lock>
-void run_rw_lock(size_t pass_count, const char* bench_name) {
-  std::cout << "[ RUN      ] " << kTestName << "." << bench_name << std::endl;
-  auto start_time = std::chrono::system_clock::now();
-
-  std::unique_ptr<Lock> l(new Lock());
-  for (int write_percentage = 5; write_percentage < 20; write_percentage += 5) {
-    for (size_t count = 0; count < pass_count; count++) {
-      for (int i = 0; i < 100; ++i) {
-        if (i < write_percentage) {
-          l->lock();
-          l->unlock();
-        } else {
-          l->lock_shared();
-          l->unlock_shared();
+  static void run_rcu_no_sync(size_t pass_count) {
+    for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+      size_t sum = 0;
+      for (size_t count = 0; count < pass_count; count++) {
+        for (int i = 0; i < 100; ++i) {
+          if (i < write_percentage) {
+            auto* old_data = rcu_data.load(std::memory_order_consume);
+            auto* new_data = new RcuData(*old_data);
+            new_data->d1++;
+            new_data->d2++;
+            rcu_data.store(new_data, std::memory_order_release);
+            folly::rcu_retire(old_data);
+          } else {
+            folly::rcu_reader g;
+            auto* data = rcu_data.load(std::memory_order_consume);
+            sum += (data->d1 + data->d2);
+          }
         }
       }
+      EXPECT_GT(sum, 0);
     }
   }
 
-  auto finish_time = std::chrono::system_clock::now();
-  auto dur = finish_time - start_time;
-  auto milisecs = std::chrono::duration_cast<std::chrono::milliseconds>(dur);
-  std::cout << "[       OK ] " << kTestName << "." << bench_name
-            << " (" << milisecs.count() << " ms)" << std::endl;
-}
-
-template <typename Lock>
-void run_small_lock(Lock* l, size_t pass_count, const char* bench_name) {
-    std::cout << "[ RUN      ] " << kTestName << "." << bench_name << std::endl;
-    auto start_time = std::chrono::system_clock::now();
+  template <typename Lock>
+  static void run_rw_lock(size_t pass_count) {
+    std::unique_ptr<Lock> l(new Lock());
+    for (int write_percentage = 5; write_percentage < 20; write_percentage += 5) {
+      locked_data = 0;
+      size_t read_sum = 0;
+      size_t write_lock_cnt = 0;
+      for (size_t count = 0; count < pass_count; count++) {
+        for (int i = 0; i < 100; ++i) {
+          if (i < write_percentage) {
+            write_lock_cnt++;
+            l->lock();
+            locked_data++;
+            l->unlock();
+          } else {
+            l->lock_shared();
+            read_sum = locked_data;
+            l->unlock_shared();
+          }
+        }
+      }
+      EXPECT_EQ(write_lock_cnt, locked_data);
+      EXPECT_EQ(locked_data, read_sum);
+    }
+  }
 
+  template <typename Lock>
+  static void run_small_lock(size_t pass_count) {
+    std::unique_ptr<Lock> l(new Lock());
+    locked_data = 0;
+    l->init();
     for (size_t count = 0; count < pass_count; count++) {
       l->lock();
+      locked_data++;
       l->unlock();
     }
-    auto finish_time = std::chrono::system_clock::now();
-    auto dur = finish_time - start_time;
-    auto milisecs = std::chrono::duration_cast<std::chrono::milliseconds>(dur);
-    std::cout << "[       OK ] " << kTestName << "." << bench_name
-              << " (" << milisecs.count() << " ms)" << std::endl;
+    EXPECT_EQ(locked_data, pass_count);
+  }
+};
+
+size_t FollySyncTest_Sequential::locked_data;
+std::atomic<RcuData*> FollySyncTest_Sequential::rcu_data;
+size_t FollySyncTest_Sequential::s_nMicroLockPassCount;
+size_t FollySyncTest_Sequential::s_nMicroSpinLockPassCount;
+size_t FollySyncTest_Sequential::s_nPicoSpinLockPassCount;
+size_t FollySyncTest_Sequential::s_nSharedMutexPassCount;
+size_t FollySyncTest_Sequential::s_nRWSpinLockPassCount;
+size_t FollySyncTest_Sequential::s_nRWTicketSpinLockPassCount;
+size_t FollySyncTest_Sequential::s_nRcuSyncPassCount;
+size_t FollySyncTest_Sequential::s_nRcuNoSyncPassCount;
+size_t FollySyncTest_Sequential::s_nRcuReaderOnlyPassCount;
+
+TEST_F(FollySyncTest_Sequential, FollyRCU_ReaderOnly) {
+  run_rcu_reader_only(s_nRcuReaderOnlyPassCount);
+}
+
+TEST_F(FollySyncTest_Sequential, FollyRCU_Sync) {
+  run_rcu_sync(s_nRcuSyncPassCount);
+}
+
+TEST_F(FollySyncTest_Sequential, FollyRCU_NoSync) {
+  run_rcu_no_sync(s_nRcuNoSyncPassCount);
 }
 
-template <typename Lock>
-void init_run_lock(size_t pass_count, const char* bench_name) {
-  std::unique_ptr<Lock> l(new Lock());
-  l->init();
-  run_small_lock<Lock>(l.get(), pass_count, bench_name);
+TEST_F(FollySyncTest_Sequential, FollyMicroSpinLock) {
+  run_small_lock<MicroSpinLock>(s_nMicroSpinLockPassCount);
 }
 
-int main() {
-  run_rcu_sync(kRcuSyncPassCount, kRcuSyncBenchmarkName);
-  run_rcu_no_sync(kRcuNoSyncPassCount, kRcuNoSyncBenchmarkName);
+TEST_F(FollySyncTest_Sequential, FollyPicoSpinLock) {
+  run_small_lock<PicoSpinLock>(s_nPicoSpinLockPassCount);
+}
 
-  run_rw_lock<RWTicketSpinLock32>(kRWTicketSpinLockPassCount,
-                                  kRWTicketSpinLock32BenchmarkName);
-  run_rw_lock<RWTicketSpinLock64>(kRWTicketSpinLockPassCount,
-                                  kRWTicketSpinLock64BenchmarkName);
+TEST_F(FollySyncTest_Sequential, FollyMicroLock) {
+  run_small_lock<MicroLock>(s_nMicroLockPassCount);
+}
 
-  run_rw_lock<RWSpinLock>(kRWSpinLockPassCount, kRWSpinLockBenchmarkName);
+TEST_F(FollySyncTest_Sequential, FollyRWTicketSpinLock_32) {
+  run_rw_lock<RWTicketSpinLock32>(s_nRWTicketSpinLockPassCount);
+}
 
-  run_rw_lock<SharedMutexReadPriority>(
-      kSharedMutexPassCount, kSharedMutexReadPriorityBenchmarkName);
-  run_rw_lock<SharedMutexWritePriority>(
-      kSharedMutexPassCount, kSharedMutexWritePriorityBenchmarkName);
+TEST_F(FollySyncTest_Sequential, FollyRWTicketSpinLock_64) {
+  run_rw_lock<RWTicketSpinLock64>(s_nRWTicketSpinLockPassCount);
+}
 
-  init_run_lock<MicroSpinLock>(kMicroSpinLockPassCount,
-                               kMicroSpinLockBenchmarkName);
-  init_run_lock<PicoSpinLock>(kPicoSpinLockPassCount,
-                              kPicoSpinLockBenchmarkName);
-  init_run_lock<MicroLock>(kMicroLockPassCount, kMicroLockBenchmarkName);
-  return 0;
+TEST_F(FollySyncTest_Sequential, FollyRWSpinLock) {
+  run_rw_lock<RWSpinLock>(s_nRWSpinLockPassCount);
 }
+
+TEST_F(FollySyncTest_Sequential, FollySharedMutex_ReadPriority) {
+  run_rw_lock<SharedMutexReadPriority>(s_nSharedMutexPassCount);
+}
+
+TEST_F(FollySyncTest_Sequential, FollySharedMutex_WritePriority) {
+  run_rw_lock<SharedMutexWritePriority>(s_nSharedMutexPassCount);
+}
+
+} // namespace folly_test