Adds sync benchmarks
[folly.git] / folly / stress-test / stress-sequential-folly-sync.cpp
diff --git a/folly/stress-test/stress-sequential-folly-sync.cpp b/folly/stress-test/stress-sequential-folly-sync.cpp
new file mode 100644 (file)
index 0000000..0b849c5
--- /dev/null
@@ -0,0 +1,177 @@
+#include <folly/SmallLocks.h>
+#include <folly/RWSpinLock.h>
+#include <folly/SharedMutex.h>
+#include <folly/synchronization/Rcu.h>
+
+#include <chrono>
+#include <cassert>
+#include <iostream>
+#include <memory>
+
+namespace {
+
+const char* kTestName = "LockUnlock";
+
+// MicroLock
+const size_t kMicroLockPassCount = 2000000000;
+const char* kMicroLockBenchmarkName = "FollyMicroLock";
+typedef folly::MicroLock MicroLock;
+
+// MicroSpinLock
+const size_t kMicroSpinLockPassCount = 1500000000;
+const char* kMicroSpinLockBenchmarkName = "FollyMicroSpinLock";
+typedef folly::MicroSpinLock MicroSpinLock;
+
+// PicoSpinLock
+const size_t kPicoSpinLockPassCount = 2700000000;
+const char* kPicoSpinLockBenchmarkName = "FollyPicoSpinLock";
+typedef folly::PicoSpinLock<size_t> PicoSpinLock;
+
+// SharedMutex
+const size_t kSharedMutexPassCount = 5000000;
+const char* kSharedMutexReadPriorityBenchmarkName =
+    "FollySharedMutex_ReadPriority";
+const char* kSharedMutexWritePriorityBenchmarkName =
+    "FollySharedMutex_WritePriority";
+typedef folly::SharedMutexReadPriority SharedMutexReadPriority;
+typedef folly::SharedMutexWritePriority SharedMutexWritePriority;
+
+// RWSpinLock
+const size_t kRWSpinLockPassCount = 5000000;
+const char* kRWSpinLockBenchmarkName = "FollyRWSpinLock";
+typedef folly::RWSpinLock RWSpinLock;
+
+// RWTicketSpinLock
+const size_t kRWTicketSpinLockPassCount = 5000000;
+const char* kRWTicketSpinLock32BenchmarkName = "FollyRWTicketSpinLock_32";
+const char* kRWTicketSpinLock64BenchmarkName = "FollyRWTicketSpinLock_64";
+typedef folly::RWTicketSpinLock32 RWTicketSpinLock32;
+typedef folly::RWTicketSpinLock64 RWTicketSpinLock64;
+
+// RCU
+const size_t kRcuSyncPassCount = 3000000;
+const size_t kRcuNoSyncPassCount = 2500000;
+const char* kRcuSyncBenchmarkName = "FollyRCU_Sync";
+const char* kRcuNoSyncBenchmarkName = "FollyRCU_NoSync";
+// Represent the RCU-protected data.
+struct RcuFoo {
+  size_t f1;
+  size_t f2;
+};
+
+}
+
+void run_rcu_sync(size_t pass_count, const char* bench_name) {
+  std::cout << "[ RUN      ] " << kTestName << "." << bench_name << std::endl;
+  auto start_time = std::chrono::system_clock::now();
+
+  for (size_t count = 0; count < pass_count; count++) {
+    RcuFoo* f = new RcuFoo();
+    folly::rcu_retire(f);
+    folly::synchronize_rcu();
+  }
+
+  auto finish_time = std::chrono::system_clock::now();
+  auto dur = finish_time - start_time;
+  auto milisecs = std::chrono::duration_cast<std::chrono::milliseconds>(dur);
+  std::cout << "[       OK ] " << kTestName << "." << bench_name
+            << " (" << milisecs.count() << " ms)" << std::endl;
+}
+
+void run_rcu_no_sync(size_t pass_count, const char* bench_name) {
+  std::cout << "[ RUN      ] " << kTestName << "." << bench_name << std::endl;
+  auto start_time = std::chrono::system_clock::now();
+
+  for (int write_percentage = 5; write_percentage <= 10; write_percentage += 1) {
+    for (size_t count = 0; count < pass_count; count++) {
+      for (int i = 0; i < 100; ++i) {
+        if (i < write_percentage) {
+          RcuFoo* f = new RcuFoo();
+          folly::rcu_retire(f);
+        } else {
+          folly::rcu_reader g;
+        }
+      }
+    }
+  }
+
+  auto finish_time = std::chrono::system_clock::now();
+  auto dur = finish_time - start_time;
+  auto milisecs = std::chrono::duration_cast<std::chrono::milliseconds>(dur);
+  std::cout << "[       OK ] " << kTestName << "." << bench_name
+            << " (" << milisecs.count() << " ms)" << std::endl;
+}
+
+template <typename Lock>
+void run_rw_lock(size_t pass_count, const char* bench_name) {
+  std::cout << "[ RUN      ] " << kTestName << "." << bench_name << std::endl;
+  auto start_time = std::chrono::system_clock::now();
+
+  std::unique_ptr<Lock> l(new Lock());
+  for (int write_percentage = 5; write_percentage < 20; write_percentage += 5) {
+    for (size_t count = 0; count < pass_count; count++) {
+      for (int i = 0; i < 100; ++i) {
+        if (i < write_percentage) {
+          l->lock();
+          l->unlock();
+        } else {
+          l->lock_shared();
+          l->unlock_shared();
+        }
+      }
+    }
+  }
+
+  auto finish_time = std::chrono::system_clock::now();
+  auto dur = finish_time - start_time;
+  auto milisecs = std::chrono::duration_cast<std::chrono::milliseconds>(dur);
+  std::cout << "[       OK ] " << kTestName << "." << bench_name
+            << " (" << milisecs.count() << " ms)" << std::endl;
+}
+
+template <typename Lock>
+void run_small_lock(Lock* l, size_t pass_count, const char* bench_name) {
+    std::cout << "[ RUN      ] " << kTestName << "." << bench_name << std::endl;
+    auto start_time = std::chrono::system_clock::now();
+
+    for (size_t count = 0; count < pass_count; count++) {
+      l->lock();
+      l->unlock();
+    }
+    auto finish_time = std::chrono::system_clock::now();
+    auto dur = finish_time - start_time;
+    auto milisecs = std::chrono::duration_cast<std::chrono::milliseconds>(dur);
+    std::cout << "[       OK ] " << kTestName << "." << bench_name
+              << " (" << milisecs.count() << " ms)" << std::endl;
+}
+
+template <typename Lock>
+void init_run_lock(size_t pass_count, const char* bench_name) {
+  std::unique_ptr<Lock> l(new Lock());
+  l->init();
+  run_small_lock<Lock>(l.get(), pass_count, bench_name);
+}
+
+int main() {
+  run_rcu_sync(kRcuSyncPassCount, kRcuSyncBenchmarkName);
+  run_rcu_no_sync(kRcuNoSyncPassCount, kRcuNoSyncBenchmarkName);
+
+  run_rw_lock<RWTicketSpinLock32>(kRWTicketSpinLockPassCount,
+                                  kRWTicketSpinLock32BenchmarkName);
+  run_rw_lock<RWTicketSpinLock64>(kRWTicketSpinLockPassCount,
+                                  kRWTicketSpinLock64BenchmarkName);
+
+  run_rw_lock<RWSpinLock>(kRWSpinLockPassCount, kRWSpinLockBenchmarkName);
+
+  run_rw_lock<SharedMutexReadPriority>(
+      kSharedMutexPassCount, kSharedMutexReadPriorityBenchmarkName);
+  run_rw_lock<SharedMutexWritePriority>(
+      kSharedMutexPassCount, kSharedMutexWritePriorityBenchmarkName);
+
+  init_run_lock<MicroSpinLock>(kMicroSpinLockPassCount,
+                               kMicroSpinLockBenchmarkName);
+  init_run_lock<PicoSpinLock>(kPicoSpinLockPassCount,
+                              kPicoSpinLockBenchmarkName);
+  init_run_lock<MicroLock>(kMicroLockPassCount, kMicroLockBenchmarkName);
+  return 0;
+}