2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
27 #include <glog/logging.h>
29 #include <folly/Exception.h>
30 #include <folly/Function.h>
31 #include <folly/MicroSpinLock.h>
32 #include <folly/Portability.h>
33 #include <folly/ScopeGuard.h>
34 #include <folly/SharedMutex.h>
35 #include <folly/container/Foreach.h>
36 #include <folly/memory/Malloc.h>
37 #include <folly/portability/PThread.h>
39 #include <folly/detail/StaticSingletonManager.h>
41 // In general, emutls cleanup is not guaranteed to play nice with the way
42 // StaticMeta mixes direct pthread calls and the use of __thread. This has
43 // caused problems on multiple platforms so don't use __thread there.
45 // XXX: Ideally we would instead determine if emutls is in use at runtime as it
46 // is possible to configure glibc on Linux to use emutls regardless.
47 #if !FOLLY_MOBILE && !defined(__APPLE__) && !defined(_MSC_VER)
48 #define FOLLY_TLD_USE_FOLLY_TLS 1
50 #undef FOLLY_TLD_USE_FOLLY_TLS
55 enum class TLPDestructionMode { THIS_THREAD, ALL_THREADS };
56 struct AccessModeStrict {};
58 namespace threadlocal_detail {
61 * POD wrapper around an element (a void*) and an associated deleter.
62 * This must be POD, as we memset() it to 0 and memcpy() it around.
64 struct ElementWrapper {
65 using DeleterFunType = void(void*, TLPDestructionMode);
67 bool dispose(TLPDestructionMode mode) {
72 DCHECK(deleter1 != nullptr);
73 ownsDeleter ? (*deleter2)(ptr, mode) : (*deleter1)(ptr, mode);
90 auto guard = makeGuard([&] { delete p; });
91 DCHECK(ptr == nullptr);
92 DCHECK(deleter1 == nullptr);
96 deleter1 = [](void* pt, TLPDestructionMode) {
97 delete static_cast<Ptr>(pt);
104 template <class Ptr, class Deleter>
105 void set(Ptr p, const Deleter& d) {
106 auto guard = makeGuard([&] {
108 d(p, TLPDestructionMode::THIS_THREAD);
111 DCHECK(ptr == nullptr);
112 DCHECK(deleter2 == nullptr);
115 auto d2 = d; // gcc-4.8 doesn't decay types correctly in lambda captures
116 deleter2 = new std::function<DeleterFunType>(
117 [d2](void* pt, TLPDestructionMode mode) {
118 d2(static_cast<Ptr>(pt), mode);
136 DeleterFunType* deleter1;
137 std::function<DeleterFunType>* deleter2;
142 struct StaticMetaBase;
145 * Per-thread entry. Each thread using a StaticMeta object has one.
146 * This is written from the owning thread only (under the lock), read
147 * from the owning thread (no lock necessary), and read from other threads
151 ElementWrapper* elements{nullptr};
152 size_t elementsCapacity{0};
153 ThreadEntry* next{nullptr};
154 ThreadEntry* prev{nullptr};
155 StaticMetaBase* meta{nullptr};
158 constexpr uint32_t kEntryIDInvalid = std::numeric_limits<uint32_t>::max();
160 struct PthreadKeyUnregisterTester;
163 * We want to disable onThreadExit call at the end of shutdown, we don't care
164 * about leaking memory at that point.
166 * Otherwise if ThreadLocal is used in a shared library, onThreadExit may be
167 * called after dlclose().
169 * This class has one single static instance; however since it's so widely used,
170 * directly or indirectly, by so many classes, we need to take care to avoid
171 * problems stemming from the Static Initialization/Destruction Order Fiascos.
172 * Therefore this class needs to be constexpr-constructible, so as to avoid
173 * the need for this to participate in init/destruction order.
175 class PthreadKeyUnregister {
177 static constexpr size_t kMaxKeys = 1UL << 16;
179 ~PthreadKeyUnregister() {
180 // If static constructor priorities are not supported then
181 // ~PthreadKeyUnregister logic is not safe.
182 #if !defined(__APPLE__) && !defined(_MSC_VER)
185 pthread_key_delete(keys_[--size_]);
190 static void registerKey(pthread_key_t key) {
191 instance_.registerKeyImpl(key);
196 * Only one global instance should exist, hence this is private.
197 * See also the important note at the top of this class about `constexpr`
200 constexpr PthreadKeyUnregister() : lock_(), size_(0), keys_() { }
201 friend struct folly::threadlocal_detail::PthreadKeyUnregisterTester;
203 void registerKeyImpl(pthread_key_t key) {
205 if (size_ == kMaxKeys) {
206 throw std::logic_error("pthread_key limit has already been reached");
208 keys_[size_++] = key;
213 pthread_key_t keys_[kMaxKeys];
215 static PthreadKeyUnregister instance_;
218 struct StaticMetaBase {
219 // Represents an ID of a thread local object. Initially set to the maximum
220 // uint. This representation allows us to avoid a branch in accessing TLS data
221 // (because if you test capacity > id if id = maxint then the test will always
222 // fail). It allows us to keep a constexpr constructor and avoid SIOF.
225 std::atomic<uint32_t> value;
227 constexpr EntryID() : value(kEntryIDInvalid) {
230 EntryID(EntryID&& other) noexcept : value(other.value.load()) {
231 other.value = kEntryIDInvalid;
234 EntryID& operator=(EntryID&& other) {
235 assert(this != &other);
236 value = other.value.load();
237 other.value = kEntryIDInvalid;
241 EntryID(const EntryID& other) = delete;
242 EntryID& operator=(const EntryID& other) = delete;
244 uint32_t getOrInvalid() {
245 // It's OK for this to be relaxed, even though we're effectively doing
246 // double checked locking in using this value. We only care about the
247 // uniqueness of IDs, getOrAllocate does not modify any other memory
248 // this thread will use.
249 return value.load(std::memory_order_relaxed);
252 uint32_t getOrAllocate(StaticMetaBase& meta) {
253 uint32_t id = getOrInvalid();
254 if (id != kEntryIDInvalid) {
257 // The lock inside allocate ensures that a single value is allocated
258 return meta.allocate(this);
262 StaticMetaBase(ThreadEntry* (*threadEntry)(), bool strict);
264 [[noreturn]] ~StaticMetaBase() {
265 folly::assume_unreachable();
268 void push_back(ThreadEntry* t) {
270 t->prev = head_.prev;
271 head_.prev->next = t;
275 void erase(ThreadEntry* t) {
276 t->next->prev = t->prev;
277 t->prev->next = t->next;
278 t->next = t->prev = t;
281 static void onThreadExit(void* ptr);
283 uint32_t allocate(EntryID* ent);
285 void destroy(EntryID* ent);
288 * Reserve enough space in the ThreadEntry::elements for the item
291 void reserve(EntryID* id);
293 ElementWrapper& getElement(EntryID* ent);
295 static void initAtFork();
296 static void registerAtFork(
297 folly::Function<void()> prepare,
298 folly::Function<void()> parent,
299 folly::Function<void()> child);
302 std::vector<uint32_t> freeIds_;
304 SharedMutex accessAllThreadsLock_;
305 pthread_key_t pthreadKey_;
307 ThreadEntry* (*threadEntry_)();
311 // Held in a singleton to track our global instances.
312 // We have one of these per "Tag", by default one for the whole system
315 // Creating and destroying ThreadLocalPtr objects, as well as thread exit
316 // for threads that use ThreadLocalPtr objects collide on a lock inside
317 // StaticMeta; you can specify multiple Tag types to break that lock.
318 template <class Tag, class AccessMode>
319 struct StaticMeta : StaticMetaBase {
322 &StaticMeta::getThreadEntrySlow,
323 std::is_same<AccessMode, AccessModeStrict>::value) {
325 /*prepare*/ &StaticMeta::preFork,
326 /*parent*/ &StaticMeta::onForkParent,
327 /*child*/ &StaticMeta::onForkChild);
330 static StaticMeta<Tag, AccessMode>& instance() {
331 // Leak it on exit, there's only one per process and we don't have to
332 // worry about synchronization with exiting threads.
333 /* library-local */ static auto instance =
334 detail::createGlobal<StaticMeta<Tag, AccessMode>, void>();
338 #ifdef FOLLY_TLD_USE_FOLLY_TLS
339 // Eliminate as many branches as possible:
340 // One branch on capacityCache, vs. three:
341 // 1) instance() static initializer
342 // 2) getThreadEntry null check
343 // 3) elementsCapacity size check.
344 // 3 will never be true if 1 or 2 are false.
345 FOLLY_ALWAYS_INLINE static ElementWrapper& get(EntryID* ent) {
346 uint32_t id = ent->getOrInvalid();
347 if (UNLIKELY(capacityCache_ <= id)) {
350 return threadEntryCache_->elements[id];
354 static ElementWrapper& getSlow(EntryID* ent) {
355 ElementWrapper& res = instance().getElement(ent);
356 // Cache new capacity
357 capacityCache_ = getThreadEntry()->elementsCapacity;
361 static ElementWrapper& get(EntryID* ent) {
362 return instance().getElement(ent);
366 ElementWrapper& getElement(EntryID* ent) {
367 ThreadEntry* threadEntry = getThreadEntry();
368 uint32_t id = ent->getOrInvalid();
369 // if id is invalid, it is equal to uint32_t's max value.
370 // x <= max value is always true
371 if (UNLIKELY(threadEntry->elementsCapacity <= id)) {
373 id = ent->getOrInvalid();
374 assert(threadEntry->elementsCapacity > id);
376 return threadEntry->elements[id];
379 static ThreadEntry* getThreadEntrySlow() {
380 auto& meta = instance();
381 auto key = meta.pthreadKey_;
382 ThreadEntry* threadEntry =
383 static_cast<ThreadEntry*>(pthread_getspecific(key));
385 #ifdef FOLLY_TLD_USE_FOLLY_TLS
386 static FOLLY_TLS ThreadEntry threadEntrySingleton;
387 threadEntry = &threadEntrySingleton;
389 threadEntry = new ThreadEntry();
391 threadEntry->meta = &meta;
392 int ret = pthread_setspecific(key, threadEntry);
393 checkPosixError(ret, "pthread_setspecific failed");
398 inline static ThreadEntry* getThreadEntry() {
399 #ifdef FOLLY_TLD_USE_FOLLY_TLS
400 if (UNLIKELY(threadEntryCache_ == nullptr)) {
401 threadEntryCache_ = instance().threadEntry_();
403 return threadEntryCache_;
405 return instance().threadEntry_();
409 static void preFork() {
410 instance().lock_.lock(); // Make sure it's created
413 static void onForkParent() {
414 instance().lock_.unlock();
417 static void onForkChild() {
418 // only the current thread survives
419 instance().head_.next = instance().head_.prev = &instance().head_;
420 ThreadEntry* threadEntry = getThreadEntry();
421 // If this thread was in the list before the fork, add it back.
422 if (threadEntry->elementsCapacity != 0) {
423 instance().push_back(threadEntry);
425 instance().lock_.unlock();
428 #ifdef FOLLY_TLD_USE_FOLLY_TLS
429 static FOLLY_TLS ThreadEntry* threadEntryCache_;
430 static FOLLY_TLS size_t capacityCache_;
434 #ifdef FOLLY_TLD_USE_FOLLY_TLS
435 template <class Tag, class AccessMode>
436 FOLLY_TLS ThreadEntry* StaticMeta<Tag, AccessMode>::threadEntryCache_{nullptr};
437 template <class Tag, class AccessMode>
438 FOLLY_TLS size_t StaticMeta<Tag, AccessMode>::capacityCache_{0};
440 } // namespace threadlocal_detail