logging: don't clamp the log level to DFATAL in debug builds
[folly.git] / folly / experimental / TLRefCount.h
index 87734e6ecb074ab494c96c409188be61bcdeb498..e8baf26e213be2ccae5ba5bb1b8f16d12dd5e2db 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2016 Facebook, Inc.
+ * Copyright 2017 Facebook, Inc.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
  */
 #pragma once
 
-#include <folly/Baton.h>
 #include <folly/ThreadLocal.h>
+#include <folly/synchronization/AsymmetricMemoryBarrier.h>
 
 namespace folly {
 
@@ -24,15 +24,9 @@ class TLRefCount {
  public:
   using Int = int64_t;
 
-  TLRefCount() :
-      localCount_([&]() {
-          return new LocalRefCount(*this);
-        }),
-      collectGuard_(&collectBaton_, [](void* p) {
-          auto baton = reinterpret_cast<folly::Baton<>*>(p);
-          baton->post();
-        }) {
-  }
+  TLRefCount()
+      : localCount_([&]() { return new LocalRefCount(*this); }),
+        collectGuard_(this, [](void*) {}) {}
 
   ~TLRefCount() noexcept {
     assert(globalCount_.load() == 0);
@@ -87,19 +81,47 @@ class TLRefCount {
   }
 
   void useGlobal() noexcept {
-    std::lock_guard<std::mutex> lg(globalMutex_);
+    std::array<TLRefCount*, 1> ptrs{{this}};
+    useGlobal(ptrs);
+  }
+
+  template <typename Container>
+  static void useGlobal(const Container& refCountPtrs) {
+#ifdef FOLLY_SANITIZE_THREAD
+    // TSAN has a limitation for the number of locks held concurrently, so it's
+    // safer to call useGlobal() serially.
+    if (refCountPtrs.size() > 1) {
+      for (auto refCountPtr : refCountPtrs) {
+        refCountPtr->useGlobal();
+      }
+      return;
+    }
+#endif
 
-    state_ = State::GLOBAL_TRANSITION;
+    std::vector<std::unique_lock<std::mutex>> lgs_;
+    for (auto refCountPtr : refCountPtrs) {
+      lgs_.emplace_back(refCountPtr->globalMutex_);
 
-    auto accessor = localCount_.accessAllThreads();
-    for (auto& count : accessor) {
-      count.collect();
+      refCountPtr->state_ = State::GLOBAL_TRANSITION;
     }
 
-    collectGuard_.reset();
-    collectBaton_.wait();
+    asymmetricHeavyBarrier();
+
+    for (auto refCountPtr : refCountPtrs) {
+      std::weak_ptr<void> collectGuardWeak = refCountPtr->collectGuard_;
 
-    state_ = State::GLOBAL;
+      // Make sure we can't create new LocalRefCounts
+      refCountPtr->collectGuard_.reset();
+
+      while (!collectGuardWeak.expired()) {
+        auto accessor = refCountPtr->localCount_.accessAllThreads();
+        for (auto& count : accessor) {
+          count.collect();
+        }
+      }
+
+      refCountPtr->state_ = State::GLOBAL;
+    }
   }
 
  private:
@@ -131,7 +153,7 @@ class TLRefCount {
         return;
       }
 
-      collectCount_ = count_;
+      collectCount_ = count_.load();
       refCount_.globalCount_.fetch_add(collectCount_);
       collectGuard_.reset();
     }
@@ -150,7 +172,14 @@ class TLRefCount {
         return false;
       }
 
-      auto count = count_ += delta;
+      // This is equivalent to atomic fetch_add. We know that this operation
+      // is always performed from a single thread. asymmetricLightBarrier()
+      // makes things faster than atomic fetch_add on platforms with native
+      // support.
+      auto count = count_.load(std::memory_order_relaxed) + delta;
+      count_.store(count, std::memory_order_relaxed);
+
+      asymmetricLightBarrier();
 
       if (UNLIKELY(refCount_.state_.load() != State::LOCAL)) {
         std::lock_guard<std::mutex> lg(collectMutex_);
@@ -166,7 +195,7 @@ class TLRefCount {
       return true;
     }
 
-    Int count_{0};
+    AtomicInt count_{0};
     TLRefCount& refCount_;
 
     std::mutex collectMutex_;
@@ -178,8 +207,7 @@ class TLRefCount {
   folly::ThreadLocal<LocalRefCount, TLRefCount> localCount_;
   std::atomic<int64_t> globalCount_{1};
   std::mutex globalMutex_;
-  folly::Baton<> collectBaton_;
   std::shared_ptr<void> collectGuard_;
 };
 
-}
+} // namespace folly