2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include <folly/ThreadLocal.h>
19 #include <folly/synchronization/AsymmetricMemoryBarrier.h>
28 : localCount_([&]() { return new LocalRefCount(*this); }),
29 collectGuard_(this, [](void*) {}) {}
31 ~TLRefCount() noexcept {
32 assert(globalCount_.load() == 0);
33 assert(state_.load() == State::GLOBAL);
36 // This can't increment from 0.
37 Int operator++() noexcept {
38 auto& localCount = *localCount_;
44 if (state_.load() == State::GLOBAL_TRANSITION) {
45 std::lock_guard<std::mutex> lg(globalMutex_);
48 assert(state_.load() == State::GLOBAL);
50 auto value = globalCount_.load();
55 } while (!globalCount_.compare_exchange_weak(value, value+1));
60 Int operator--() noexcept {
61 auto& localCount = *localCount_;
67 if (state_.load() == State::GLOBAL_TRANSITION) {
68 std::lock_guard<std::mutex> lg(globalMutex_);
71 assert(state_.load() == State::GLOBAL);
73 return globalCount_-- - 1;
76 Int operator*() const {
77 if (state_ != State::GLOBAL) {
80 return globalCount_.load();
83 void useGlobal() noexcept {
84 std::array<TLRefCount*, 1> ptrs{{this}};
88 template <typename Container>
89 static void useGlobal(const Container& refCountPtrs) {
90 #ifdef FOLLY_SANITIZE_THREAD
91 // TSAN has a limitation for the number of locks held concurrently, so it's
92 // safer to call useGlobal() serially.
93 if (refCountPtrs.size() > 1) {
94 for (auto refCountPtr : refCountPtrs) {
95 refCountPtr->useGlobal();
101 std::vector<std::unique_lock<std::mutex>> lgs_;
102 for (auto refCountPtr : refCountPtrs) {
103 lgs_.emplace_back(refCountPtr->globalMutex_);
105 refCountPtr->state_ = State::GLOBAL_TRANSITION;
108 asymmetricHeavyBarrier();
110 for (auto refCountPtr : refCountPtrs) {
111 std::weak_ptr<void> collectGuardWeak = refCountPtr->collectGuard_;
113 // Make sure we can't create new LocalRefCounts
114 refCountPtr->collectGuard_.reset();
116 while (!collectGuardWeak.expired()) {
117 auto accessor = refCountPtr->localCount_.accessAllThreads();
118 for (auto& count : accessor) {
123 refCountPtr->state_ = State::GLOBAL;
128 using AtomicInt = std::atomic<Int>;
136 class LocalRefCount {
138 explicit LocalRefCount(TLRefCount& refCount) :
139 refCount_(refCount) {
140 std::lock_guard<std::mutex> lg(refCount.globalMutex_);
142 collectGuard_ = refCount.collectGuard_;
150 std::lock_guard<std::mutex> lg(collectMutex_);
152 if (!collectGuard_) {
156 collectCount_ = count_.load();
157 refCount_.globalCount_.fetch_add(collectCount_);
158 collectGuard_.reset();
170 bool update(Int delta) {
171 if (UNLIKELY(refCount_.state_.load() != State::LOCAL)) {
175 // This is equivalent to atomic fetch_add. We know that this operation
176 // is always performed from a single thread. asymmetricLightBarrier()
177 // makes things faster than atomic fetch_add on platforms with native
179 auto count = count_.load(std::memory_order_relaxed) + delta;
180 count_.store(count, std::memory_order_relaxed);
182 asymmetricLightBarrier();
184 if (UNLIKELY(refCount_.state_.load() != State::LOCAL)) {
185 std::lock_guard<std::mutex> lg(collectMutex_);
190 if (collectCount_ != count) {
199 TLRefCount& refCount_;
201 std::mutex collectMutex_;
202 Int collectCount_{0};
203 std::shared_ptr<void> collectGuard_;
206 std::atomic<State> state_{State::LOCAL};
207 folly::ThreadLocal<LocalRefCount, TLRefCount> localCount_;
208 std::atomic<int64_t> globalCount_{1};
209 std::mutex globalMutex_;
210 std::shared_ptr<void> collectGuard_;