--- /dev/null
+cmake_minimum_required(VERSION 2.8.5)
+
+set(CMAKE_C_COMPILER clang-native)
+set(CMAKE_CXX_COMPILER clang++-native)
+
+set(CMAKE_BUILD_TYPE Release)
+
+include_directories(
+ /scratch/googletest/googletest/include
+ /scratch/gflags/gflags/build/include
+ /scratch/glog/glog/src
+ ../..
+)
+
+link_directories(
+ /scratch/folly/orig-folly/folly/folly-lib
+ /scratch/glog/glog/glog-lib
+ /scratch/gflags/gflags/build/gflags-lib
+ /scratch/googletest/googletest
+)
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++1y")
+
+set(FOLLY_LIB folly pthread gflags glog gtest)
+set(RCU_OBJ ../synchronization/.libs/Rcu.o)
+
+# Sequential driver
+add_executable(stress-sequential-folly-map stress-sequential-folly-map.cpp ${RCU_OBJ})
+target_link_libraries(stress-sequential-folly-map ${FOLLY_LIB})
+
+add_executable(stress-sequential-folly-queue stress-sequential-folly-queue.cpp ${RCU_OBJ})
+target_link_libraries(stress-sequential-folly-queue ${FOLLY_LIB})
+
+add_executable(stress-sequential-folly-sync stress-sequential-folly-sync.cpp ${RCU_OBJ})
+target_link_libraries(stress-sequential-folly-sync ${FOLLY_LIB})
+
+# Parallel driver
+add_executable(stress-parallel-folly-map stress-parallel-folly-map.cpp ${RCU_OBJ})
+target_link_libraries(stress-parallel-folly-map ${FOLLY_LIB})
+
+add_executable(stress-parallel-folly-queue stress-parallel-folly-queue.cpp ${RCU_OBJ})
+target_link_libraries(stress-parallel-folly-queue ${FOLLY_LIB})
+
+add_executable(stress-parallel-folly-sync stress-parallel-folly-sync.cpp ${RCU_OBJ})
+target_link_libraries(stress-parallel-folly-sync ${FOLLY_LIB})
+++ /dev/null
-#!/bin/bash -e
-
-Benchmarks=(
- stress-sequential-folly-queue
- stress-sequential-folly-map
- stress-sequential-folly-sync
-)
-
-for bench in ${Benchmarks[*]}; do
- rm -rf $bench ${bench}.bc ${bench}.o
-done
--- /dev/null
+#include <folly/concurrency/ConcurrentHashMap.h>
+#include <folly/AtomicHashMap.h>
+#include <folly/AtomicUnorderedMap.h>
+
+#include <gtest/gtest.h>
+
+#include <memory>
+
+namespace {
+
+const unsigned s_nInsertPercentage = 10;
+
+const size_t kConcurrentHashMapSize = 10000;
+const size_t kConcurrentHashMapPassCount = 36000;
+
+const size_t kAtomicHashMapSize = 10000;
+const size_t kAtomicHashMapPassCount = 250000;
+
+const size_t kAtomicUnorderedInsertMapSize = 10000;
+const size_t kAtomicUnorderedInsertMapPassCount = 500000;
+
+typedef folly::ConcurrentHashMap<size_t, size_t> ConcurrentHashMap;
+typedef folly::AtomicHashMap<size_t, size_t> AtomicHashMap;
+typedef folly::AtomicUnorderedInsertMap64<size_t, size_t>
+ AtomicUnorderedInsertMap;
+}
+
+template <typename Key>
+bool map_contains(const AtomicUnorderedInsertMap* map, Key key) {
+ return map->find(key) != map->cend();
+}
+
+template <typename Key>
+bool map_contains(const ConcurrentHashMap* map, Key key) {
+ return map->find(key) != map->cend();
+}
+
+template <typename Map, typename Key>
+bool map_contains(const Map* map, Key key) {
+ return map->find(key) != map->end();
+}
+
+template <typename Map>
+void run_atomic_unordered_insert_map(size_t map_size, size_t pass_count) {
+ size_t nInsertedNum = 0;
+ size_t nFindSuccess = 0;
+ std::unique_ptr<Map> map(new Map(map_size));
+ for (size_t count = 0; count < pass_count; count++) {
+ for (size_t i = 0; i < map_size; ++i) {
+ // The number to operate on the map.
+ size_t n = map_size + i;
+ // Insert
+ if (i % s_nInsertPercentage == 1) {
+ auto iter = map->find(i);
+ if (iter != map->cend()) {
+ if (iter->second == n) {
+ map->emplace(i, n / 2);
+ } else {
+ map->emplace(i, n);
+ }
+ } else {
+ map->emplace(i, n);
+ }
+ nInsertedNum++;
+ }
+ // Find
+ {
+ if (map_contains(map.get(), i)) {
+ ++nFindSuccess;
+ }
+ }
+ }
+ }
+ EXPECT_EQ(nFindSuccess, nInsertedNum);
+}
+
+template <typename Map>
+void run_atomic_hashmap(size_t map_size, size_t pass_count) {
+ size_t nInsertedNum = 0;
+ size_t nFindSuccess = 0;
+ std::unique_ptr<Map> map(new Map(map_size));
+ for (size_t count = 0; count < pass_count; count++) {
+ for (size_t i = 0; i < map_size; ++i) {
+ // The number to operate on the map.
+ size_t n = map_size + i;
+ // Insert
+ if (i % s_nInsertPercentage == 1) {
+ auto iter = map->find(i);
+ if (iter != map->end()) {
+ if (iter->second == n) {
+ iter->second = n / 2;
+ } else {
+ iter->second = n;
+ }
+ } else {
+ map->insert({i, n});
+ }
+ nInsertedNum++;
+ }
+ // Find
+ {
+ if (map_contains(map.get(), i)) {
+ ++nFindSuccess;
+ }
+ }
+ }
+ }
+ EXPECT_EQ(nFindSuccess, nInsertedNum);
+}
+
+template <typename Map>
+void run_concurrent_hashmap(size_t map_size, size_t pass_count) {
+ size_t nInsertedNum = 0;
+ size_t nFindSuccess = 0;
+ std::unique_ptr<Map> map(new Map(map_size));
+ for (size_t count = 0; count < pass_count; count++) {
+ for (size_t i = 0; i < map_size; ++i) {
+ // The number to operate on the map.
+ size_t n = map_size + i;
+ // Insert
+ if (i % s_nInsertPercentage == 1) {
+ auto iter = map->insert({i, n});
+ nInsertedNum++;
+ }
+ // Find
+ {
+ if (map_contains(map.get(), i)) {
+ ++nFindSuccess;
+ }
+ }
+ // Delete
+ if (i % s_nInsertPercentage == 1) {
+ if (map_contains(map.get(), i)) {
+ map->erase(i);
+ }
+ }
+ }
+ }
+ EXPECT_EQ(nFindSuccess, nInsertedNum);
+}
+
+class FollyMapInsDelFindTest: public ::testing::Test {
+};
+
+TEST_F(FollyMapInsDelFindTest, FollyConcurrentHashMap) {
+ run_concurrent_hashmap<ConcurrentHashMap>(
+ kConcurrentHashMapSize,
+ kConcurrentHashMapPassCount);
+}
+
+TEST_F(FollyMapInsDelFindTest, FollyAtomicHashMap) {
+ run_atomic_hashmap<AtomicHashMap>(
+ kAtomicHashMapSize,
+ kAtomicHashMapPassCount);
+}
+
+TEST_F(FollyMapInsDelFindTest, FollyAtomicUnorderedInsertMap) {
+ run_atomic_unordered_insert_map<AtomicUnorderedInsertMap>(
+ kAtomicUnorderedInsertMapSize,
+ kAtomicUnorderedInsertMapPassCount);
+}
+
+int main(int argc, char** argv) {
+ // Init Google test
+ ::testing::InitGoogleTest(&argc, argv);
+ int result = RUN_ALL_TESTS();
+ return result;
+}
--- /dev/null
+#include <folly/concurrency/UnboundedQueue.h>
+#include <folly/concurrency/DynamicBoundedQueue.h>
+#include <folly/AtomicLinkedList.h>
+#include <folly/MPMCQueue.h>
+
+#include <gtest/gtest.h>
+
+#include <memory>
+
+namespace {
+
+// Unbounded queue
+size_t kUnboundedQueueEnqueueStride = 10000;
+size_t kUSPSCQueueEnqueueCount = 1200000000;
+size_t kUMPSCQueueEnqueueCount = 320000000;
+size_t kUSPMCQueueEnqueueCount = 320000000;
+size_t kUMPMCQueueEnqueueCount = 320000000;
+
+typedef folly::USPSCQueue<size_t, false> USPSCQueue;
+typedef folly::UMPSCQueue<size_t, false> UMPSCQueue;
+typedef folly::USPMCQueue<size_t, false> USPMCQueue;
+typedef folly::UMPMCQueue<size_t, false> UMPMCQueue;
+
+// Dynamic bounded queue
+size_t kDynamicBoundedQueueEnqueueStride = 50000;
+size_t kDynamicBoundedQueueCapacity = 200000;
+size_t kDSPSCQueueEnqueueCount = 1200000000;
+size_t kDMPSCQueueEnqueueCount = 320000000;
+size_t kDSPMCQueueEnqueueCount = 320000000;
+size_t kDMPMCQueueEnqueueCount = 320000000;
+
+typedef folly::DSPSCQueue<size_t, false> DSPSCQueue;
+typedef folly::DMPSCQueue<size_t, false> DMPSCQueue;
+typedef folly::DSPMCQueue<size_t, false> DSPMCQueue;
+typedef folly::DMPMCQueue<size_t, false> DMPMCQueue;
+
+// AtomicLinkedList
+size_t kAtomicLinkedListSize = 50000;
+size_t kAtomicLinkedListPassCount = 10000;
+typedef folly::AtomicLinkedList<size_t> AtomicLinkedList;
+
+// MPMC Queue (linearizable)
+size_t kMPMCQueueEnqueueStride = 10000;
+size_t kMPMCQueueCapacity = 50000;
+size_t kMPMCQueueEnqueueCount = 500000000;
+typedef folly::MPMCQueue<size_t> MPMCQueue;
+
+}
+
+void run_atomic_linkedlist() {
+ for (size_t pass = 0; pass < kAtomicLinkedListPassCount; pass++) {
+ std::unique_ptr<AtomicLinkedList> list(new AtomicLinkedList());
+ bool in_order = true;
+ for (size_t i = 0; i < kAtomicLinkedListSize; i++) {
+ list->insertHead(i);
+ }
+ size_t nSum = 0;
+ auto func = [&nSum] (size_t elem) { nSum += elem; };
+ if (in_order) {
+ list->sweep(func);
+ } else {
+ list->reverseSweep(func);
+ }
+ in_order = !in_order;
+
+ size_t supposed_sum = kAtomicLinkedListSize * (kAtomicLinkedListSize - 1) / 2;
+ EXPECT_EQ(nSum, supposed_sum);
+ }
+}
+
+template <typename Queue>
+void run_queue(Queue* q, size_t enqueue_count, size_t enqueue_stride) {
+ size_t nNo = 0;
+ size_t pop_sum = 0;
+ while (nNo < enqueue_count) {
+ size_t curr_push_count =
+ std::min(enqueue_count - nNo, enqueue_stride);
+ for (size_t i = 0; i < curr_push_count; i++) {
+ q->enqueue(nNo++);
+ }
+ size_t res;
+ for (size_t i = 0; i < curr_push_count; i++) {
+ q->dequeue(res);
+ pop_sum += res;
+ }
+ }
+ size_t supposed_sum = enqueue_count * (enqueue_count - 1) / 2;
+ EXPECT_EQ (pop_sum, supposed_sum);
+}
+
+// MPMC Specialization.
+template <>
+void run_queue(MPMCQueue* q, size_t enqueue_count, size_t enqueue_stride) {
+ size_t nNo = 0;
+ size_t push_sum = 0;
+ size_t pop_sum = 0;
+ while (nNo < enqueue_count) {
+ size_t curr_push_count =
+ std::min(enqueue_count - nNo, enqueue_stride);
+ for (size_t i = 0; i < curr_push_count; i++) {
+ if (q->write(nNo)) {
+ push_sum += nNo;
+ nNo++;
+ }
+ }
+ size_t res;
+ while (q->read(res)) {
+ pop_sum += res;
+ }
+ }
+
+ size_t supposed_sum = enqueue_count * (enqueue_count - 1) / 2;
+ EXPECT_EQ(pop_sum, supposed_sum);
+}
+
+template <typename Queue>
+void run_without_initial_capacity(size_t enqueue_count, size_t enqueue_stride) {
+ std::unique_ptr<Queue> q(new Queue());
+ run_queue(q.get(), enqueue_count, enqueue_stride);
+}
+
+template <typename Queue>
+void run_with_initial_capacity(size_t queue_capacity, size_t enqueue_count,
+ size_t enqueue_stride) {
+ std::unique_ptr<Queue> q(new Queue(queue_capacity));
+ run_queue(q.get(), enqueue_count, enqueue_stride);
+}
+
+class FollyQueueEnqueueDequeueTest : public ::testing::Test {
+
+};
+
+TEST_F(FollyQueueEnqueueDequeueTest, FollyMPMCQueue) {
+ run_with_initial_capacity<MPMCQueue>(
+ kMPMCQueueCapacity, kMPMCQueueEnqueueCount, kMPMCQueueEnqueueStride);
+}
+
+TEST_F(FollyQueueEnqueueDequeueTest, FollyAtomicLinkedList) {
+ run_atomic_linkedlist();
+}
+
+TEST_F(FollyQueueEnqueueDequeueTest, FollyUnboundedQueue_SPSC) {
+ run_without_initial_capacity<USPSCQueue>(
+ kUSPSCQueueEnqueueCount, kUnboundedQueueEnqueueStride);
+}
+
+TEST_F(FollyQueueEnqueueDequeueTest, FollyUnboundedQueue_MPSC) {
+ run_without_initial_capacity<UMPSCQueue>(
+ kUMPSCQueueEnqueueCount, kUnboundedQueueEnqueueStride);
+}
+
+TEST_F(FollyQueueEnqueueDequeueTest, FollyUnboundedQueue_SPMC) {
+ run_without_initial_capacity<USPMCQueue>(
+ kUSPMCQueueEnqueueCount, kUnboundedQueueEnqueueStride);
+}
+
+TEST_F(FollyQueueEnqueueDequeueTest, FollyUnboundedQueue_MPMC) {
+ run_without_initial_capacity<UMPMCQueue>(
+ kUMPMCQueueEnqueueCount,
+ kUnboundedQueueEnqueueStride);
+}
+
+TEST_F(FollyQueueEnqueueDequeueTest, FollyDynamicBoundedQueue_SPSC) {
+ // DynamicBoundedQueue
+ run_with_initial_capacity<DSPSCQueue>(
+ kDynamicBoundedQueueCapacity,
+ kDSPSCQueueEnqueueCount, kDynamicBoundedQueueEnqueueStride);
+}
+
+TEST_F(FollyQueueEnqueueDequeueTest, FollyDynamicBoundedQueue_MPSC) {
+ run_with_initial_capacity<DMPSCQueue>(
+ kDynamicBoundedQueueCapacity,
+ kDMPSCQueueEnqueueCount,
+ kDynamicBoundedQueueEnqueueStride);
+}
+
+TEST_F(FollyQueueEnqueueDequeueTest, FollyDynamicBoundedQueue_SPMC) {
+ run_with_initial_capacity<DSPMCQueue>(
+ kDynamicBoundedQueueCapacity,
+ kDSPMCQueueEnqueueCount,
+ kDynamicBoundedQueueEnqueueStride);
+}
+
+TEST_F(FollyQueueEnqueueDequeueTest, FollyDynamicBoundedQueue_MPMC) {
+ run_with_initial_capacity<DMPMCQueue>(
+ kDynamicBoundedQueueCapacity,
+ kDMPMCQueueEnqueueCount,
+ kDynamicBoundedQueueEnqueueStride);
+}
+
+int main(int argc, char** argv) {
+ // Init Google test
+ ::testing::InitGoogleTest(&argc, argv);
+ int result = RUN_ALL_TESTS();
+ return result;
+}
--- /dev/null
+#include <folly/SmallLocks.h>
+#include <folly/RWSpinLock.h>
+#include <folly/SharedMutex.h>
+#include <folly/synchronization/Rcu.h>
+
+#include <gtest/gtest.h>
+
+#include <memory>
+
+namespace {
+
+// MicroLock
+const size_t kMicroLockPassCount = 2000000000;
+typedef folly::MicroLock MicroLock;
+
+// MicroSpinLock
+const size_t kMicroSpinLockPassCount = 1500000000;
+typedef folly::MicroSpinLock MicroSpinLock;
+
+// PicoSpinLock
+const size_t kPicoSpinLockPassCount = 2700000000;
+typedef folly::PicoSpinLock<size_t> PicoSpinLock;
+
+// SharedMutex
+const size_t kSharedMutexPassCount = 5000000;
+typedef folly::SharedMutexReadPriority SharedMutexReadPriority;
+typedef folly::SharedMutexWritePriority SharedMutexWritePriority;
+
+// RWSpinLock
+const size_t kRWSpinLockPassCount = 5000000;
+typedef folly::RWSpinLock RWSpinLock;
+
+// RWTicketSpinLock
+const size_t kRWTicketSpinLockPassCount = 5000000;
+typedef folly::RWTicketSpinLock32 RWTicketSpinLock32;
+typedef folly::RWTicketSpinLock64 RWTicketSpinLock64;
+
+// RCU
+const size_t kRcuSyncPassCount = 180000;
+const size_t kRcuNoSyncPassCount = 3500000;
+// Represent the RCU-protected data.
+struct RcuFoo {
+ size_t f1;
+ size_t f2;
+};
+
+}
+
+void run_rcu_sync(size_t pass_count) {
+ for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+ for (size_t count = 0; count < pass_count; count++) {
+ for (int i = 0; i < 100; ++i) {
+ if (i < write_percentage) {
+ RcuFoo* f = new RcuFoo();
+ folly::rcu_retire(f);
+ folly::synchronize_rcu();
+ } else {
+ folly::rcu_reader g;
+ }
+ }
+ }
+ }
+}
+
+void run_rcu_no_sync(size_t pass_count) {
+ for (int write_percentage = 1; write_percentage <= 5; write_percentage += 1) {
+ for (size_t count = 0; count < pass_count; count++) {
+ for (int i = 0; i < 100; ++i) {
+ if (i < write_percentage) {
+ RcuFoo* f = new RcuFoo();
+ folly::rcu_retire(f);
+ } else {
+ folly::rcu_reader g;
+ }
+ }
+ }
+ }
+}
+
+template <typename Lock>
+void run_rw_lock(size_t pass_count) {
+ std::unique_ptr<Lock> l(new Lock());
+ for (int write_percentage = 5; write_percentage < 20; write_percentage += 5) {
+ for (size_t count = 0; count < pass_count; count++) {
+ for (int i = 0; i < 100; ++i) {
+ if (i < write_percentage) {
+ l->lock();
+ l->unlock();
+ } else {
+ l->lock_shared();
+ l->unlock_shared();
+ }
+ }
+ }
+ }
+}
+
+template <typename Lock>
+void run_small_lock(size_t pass_count) {
+ std::unique_ptr<Lock> l(new Lock());
+ l->init();
+ for (size_t count = 0; count < pass_count; count++) {
+ l->lock();
+ l->unlock();
+ }
+}
+
+class FollySyncTest: public ::testing::Test {
+
+};
+
+TEST_F(FollySyncTest, FollyRCU_Sync) {
+ run_rcu_sync(kRcuSyncPassCount);
+}
+
+TEST_F(FollySyncTest, FollyRCU_NoSync) {
+ run_rcu_no_sync(kRcuNoSyncPassCount);
+}
+
+TEST_F(FollySyncTest, FollyRWTicketSpinLock_32) {
+ run_rw_lock<RWTicketSpinLock32>(kRWTicketSpinLockPassCount);
+}
+
+TEST_F(FollySyncTest, FollyRWTicketSpinLock_64) {
+ run_rw_lock<RWTicketSpinLock64>(kRWTicketSpinLockPassCount);
+}
+
+TEST_F(FollySyncTest, FollyRWSpinLock) {
+ run_rw_lock<RWSpinLock>(kRWSpinLockPassCount);
+}
+
+TEST_F(FollySyncTest, FollySharedMutex_ReadPriority) {
+ run_rw_lock<SharedMutexReadPriority>(kSharedMutexPassCount);
+}
+
+TEST_F(FollySyncTest, FollySharedMutex_WritePriority) {
+ run_rw_lock<SharedMutexWritePriority>(kSharedMutexPassCount);
+}
+
+TEST_F(FollySyncTest, FollyMicroSpinLock) {
+ run_small_lock<MicroSpinLock>(kMicroSpinLockPassCount);
+}
+
+TEST_F(FollySyncTest, FollyPicoSpinLock) {
+ run_small_lock<PicoSpinLock>(kPicoSpinLockPassCount);
+}
+
+TEST_F(FollySyncTest, FollyMicroLock) {
+ run_small_lock<MicroLock>(kMicroLockPassCount);
+}
+
+int main(int argc, char** argv) {
+ // Init Google test
+ ::testing::InitGoogleTest(&argc, argv);
+ int result = RUN_ALL_TESTS();
+ return result;
+}