logging: don't clamp the log level to DFATAL in debug builds
[folly.git] / folly / experimental / TLRefCount.h
index 9667f4c100cb5b1d62c188a6b9c05585547c2c49..e8baf26e213be2ccae5ba5bb1b8f16d12dd5e2db 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2016 Facebook, Inc.
+ * Copyright 2017 Facebook, Inc.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,6 +16,7 @@
 #pragma once
 
 #include <folly/ThreadLocal.h>
+#include <folly/synchronization/AsymmetricMemoryBarrier.h>
 
 namespace folly {
 
@@ -80,23 +81,47 @@ class TLRefCount {
   }
 
   void useGlobal() noexcept {
-    std::lock_guard<std::mutex> lg(globalMutex_);
+    std::array<TLRefCount*, 1> ptrs{{this}};
+    useGlobal(ptrs);
+  }
+
+  template <typename Container>
+  static void useGlobal(const Container& refCountPtrs) {
+#ifdef FOLLY_SANITIZE_THREAD
+    // TSAN has a limitation for the number of locks held concurrently, so it's
+    // safer to call useGlobal() serially.
+    if (refCountPtrs.size() > 1) {
+      for (auto refCountPtr : refCountPtrs) {
+        refCountPtr->useGlobal();
+      }
+      return;
+    }
+#endif
+
+    std::vector<std::unique_lock<std::mutex>> lgs_;
+    for (auto refCountPtr : refCountPtrs) {
+      lgs_.emplace_back(refCountPtr->globalMutex_);
 
-    state_ = State::GLOBAL_TRANSITION;
+      refCountPtr->state_ = State::GLOBAL_TRANSITION;
+    }
 
-    std::weak_ptr<void> collectGuardWeak = collectGuard_;
+    asymmetricHeavyBarrier();
 
-    // Make sure we can't create new LocalRefCounts
-    collectGuard_.reset();
+    for (auto refCountPtr : refCountPtrs) {
+      std::weak_ptr<void> collectGuardWeak = refCountPtr->collectGuard_;
 
-    while (!collectGuardWeak.expired()) {
-      auto accessor = localCount_.accessAllThreads();
-      for (auto& count : accessor) {
-        count.collect();
+      // Make sure we can't create new LocalRefCounts
+      refCountPtr->collectGuard_.reset();
+
+      while (!collectGuardWeak.expired()) {
+        auto accessor = refCountPtr->localCount_.accessAllThreads();
+        for (auto& count : accessor) {
+          count.collect();
+        }
       }
-    }
 
-    state_ = State::GLOBAL;
+      refCountPtr->state_ = State::GLOBAL;
+    }
   }
 
  private:
@@ -147,7 +172,14 @@ class TLRefCount {
         return false;
       }
 
-      auto count = count_ += delta;
+      // This is equivalent to atomic fetch_add. We know that this operation
+      // is always performed from a single thread. asymmetricLightBarrier()
+      // makes things faster than atomic fetch_add on platforms with native
+      // support.
+      auto count = count_.load(std::memory_order_relaxed) + delta;
+      count_.store(count, std::memory_order_relaxed);
+
+      asymmetricLightBarrier();
 
       if (UNLIKELY(refCount_.state_.load() != State::LOCAL)) {
         std::lock_guard<std::mutex> lg(collectMutex_);
@@ -178,4 +210,4 @@ class TLRefCount {
   std::shared_ptr<void> collectGuard_;
 };
 
-}
+} // namespace folly