/*
- * Copyright 2016 Facebook, Inc.
+ * Copyright 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#pragma once
#include <limits.h>
-#include <pthread.h>
#include <atomic>
#include <functional>
#include <glog/logging.h>
#include <folly/Exception.h>
-#include <folly/Foreach.h>
#include <folly/Function.h>
-#include <folly/Malloc.h>
#include <folly/MicroSpinLock.h>
#include <folly/Portability.h>
#include <folly/ScopeGuard.h>
+#include <folly/SharedMutex.h>
+#include <folly/container/Foreach.h>
+#include <folly/memory/Malloc.h>
+#include <folly/portability/PThread.h>
#include <folly/detail/StaticSingletonManager.h>
//
// XXX: Ideally we would instead determine if emutls is in use at runtime as it
// is possible to configure glibc on Linux to use emutls regardless.
-#if !FOLLY_MOBILE && !defined(__APPLE__)
+#if !FOLLY_MOBILE && !defined(__APPLE__) && !defined(_MSC_VER)
#define FOLLY_TLD_USE_FOLLY_TLS 1
#else
#undef FOLLY_TLD_USE_FOLLY_TLS
#endif
namespace folly {
+
+enum class TLPDestructionMode { THIS_THREAD, ALL_THREADS };
+struct AccessModeStrict {};
+
namespace threadlocal_detail {
/**
DCHECK(deleter2 == nullptr);
if (p) {
ptr = p;
- deleter2 = new std::function<DeleterFunType>([d = d](
- void* pt, TLPDestructionMode mode) {
- d(static_cast<Ptr>(pt), mode);
- });
+ auto d2 = d; // gcc-4.8 doesn't decay types correctly in lambda captures
+ deleter2 = new std::function<DeleterFunType>(
+ [d2](void* pt, TLPDestructionMode mode) {
+ d2(static_cast<Ptr>(pt), mode);
+ });
ownsDeleter = true;
guard.dismiss();
}
constexpr uint32_t kEntryIDInvalid = std::numeric_limits<uint32_t>::max();
-class PthreadKeyUnregisterTester;
+struct PthreadKeyUnregisterTester;
/**
* We want to disable onThreadExit call at the end of shutdown, we don't care
static constexpr size_t kMaxKeys = 1UL << 16;
~PthreadKeyUnregister() {
+ // If static constructor priorities are not supported then
+ // ~PthreadKeyUnregister logic is not safe.
+#if !defined(__APPLE__) && !defined(_MSC_VER)
MSLGuard lg(lock_);
while (size_) {
pthread_key_delete(keys_[--size_]);
}
+#endif
}
static void registerKey(pthread_key_t key) {
* usage.
*/
constexpr PthreadKeyUnregister() : lock_(), size_(0), keys_() { }
- friend class folly::threadlocal_detail::PthreadKeyUnregisterTester;
+ friend struct folly::threadlocal_detail::PthreadKeyUnregisterTester;
void registerKeyImpl(pthread_key_t key) {
MSLGuard lg(lock_);
}
};
- explicit StaticMetaBase(ThreadEntry* (*threadEntry)());
+ StaticMetaBase(ThreadEntry* (*threadEntry)(), bool strict);
- ~StaticMetaBase() {
- LOG(FATAL) << "StaticMeta lives forever!";
+ [[noreturn]] ~StaticMetaBase() {
+ folly::assume_unreachable();
}
void push_back(ThreadEntry* t) {
*/
void reserve(EntryID* id);
- ElementWrapper& get(EntryID* ent);
+ ElementWrapper& getElement(EntryID* ent);
static void initAtFork();
static void registerAtFork(
uint32_t nextId_;
std::vector<uint32_t> freeIds_;
std::mutex lock_;
+ SharedMutex accessAllThreadsLock_;
pthread_key_t pthreadKey_;
ThreadEntry head_;
ThreadEntry* (*threadEntry_)();
+ bool strict_;
};
// Held in a singleton to track our global instances.
// Creating and destroying ThreadLocalPtr objects, as well as thread exit
// for threads that use ThreadLocalPtr objects collide on a lock inside
// StaticMeta; you can specify multiple Tag types to break that lock.
-template <class Tag>
+template <class Tag, class AccessMode>
struct StaticMeta : StaticMetaBase {
- StaticMeta() : StaticMetaBase(&StaticMeta::getThreadEntrySlow) {
+ StaticMeta()
+ : StaticMetaBase(
+ &StaticMeta::getThreadEntrySlow,
+ std::is_same<AccessMode, AccessModeStrict>::value) {
registerAtFork(
/*prepare*/ &StaticMeta::preFork,
/*parent*/ &StaticMeta::onForkParent,
/*child*/ &StaticMeta::onForkChild);
}
- static StaticMeta<Tag>& instance() {
+ static StaticMeta<Tag, AccessMode>& instance() {
// Leak it on exit, there's only one per process and we don't have to
// worry about synchronization with exiting threads.
- static auto instance = detail::createGlobal<StaticMeta<Tag>, void>();
+ /* library-local */ static auto instance =
+ detail::createGlobal<StaticMeta<Tag, AccessMode>, void>();
return *instance;
}
- ElementWrapper& get(EntryID* ent) {
+#ifdef FOLLY_TLD_USE_FOLLY_TLS
+ // Eliminate as many branches as possible:
+ // One branch on capacityCache, vs. three:
+ // 1) instance() static initializer
+ // 2) getThreadEntry null check
+ // 3) elementsCapacity size check.
+ // 3 will never be true if 1 or 2 are false.
+ FOLLY_ALWAYS_INLINE static ElementWrapper& get(EntryID* ent) {
+ uint32_t id = ent->getOrInvalid();
+ if (UNLIKELY(capacityCache_ <= id)) {
+ return getSlow(ent);
+ } else {
+ return threadEntryCache_->elements[id];
+ }
+ }
+
+ static ElementWrapper& getSlow(EntryID* ent) {
+ ElementWrapper& res = instance().getElement(ent);
+ // Cache new capacity
+ capacityCache_ = getThreadEntry()->elementsCapacity;
+ return res;
+ }
+#else
+ static ElementWrapper& get(EntryID* ent) {
+ return instance().getElement(ent);
+ }
+#endif
+
+ ElementWrapper& getElement(EntryID* ent) {
ThreadEntry* threadEntry = getThreadEntry();
uint32_t id = ent->getOrInvalid();
// if id is invalid, it is equal to uint32_t's max value.
inline static ThreadEntry* getThreadEntry() {
#ifdef FOLLY_TLD_USE_FOLLY_TLS
- static FOLLY_TLS ThreadEntry* threadEntryCache{nullptr};
- if (UNLIKELY(threadEntryCache == nullptr)) {
- threadEntryCache = instance().threadEntry_();
+ if (UNLIKELY(threadEntryCache_ == nullptr)) {
+ threadEntryCache_ = instance().threadEntry_();
}
- return threadEntryCache;
+ return threadEntryCache_;
#else
return instance().threadEntry_();
#endif
}
- static void preFork(void) {
+ static void preFork() {
instance().lock_.lock(); // Make sure it's created
}
- static void onForkParent(void) { instance().lock_.unlock(); }
+ static void onForkParent() {
+ instance().lock_.unlock();
+ }
- static void onForkChild(void) {
+ static void onForkChild() {
// only the current thread survives
instance().head_.next = instance().head_.prev = &instance().head_;
ThreadEntry* threadEntry = getThreadEntry();
}
instance().lock_.unlock();
}
+
+#ifdef FOLLY_TLD_USE_FOLLY_TLS
+ static FOLLY_TLS ThreadEntry* threadEntryCache_;
+ static FOLLY_TLS size_t capacityCache_;
+#endif
};
-} // namespace threadlocal_detail
-} // namespace folly
+#ifdef FOLLY_TLD_USE_FOLLY_TLS
+template <class Tag, class AccessMode>
+FOLLY_TLS ThreadEntry* StaticMeta<Tag, AccessMode>::threadEntryCache_{nullptr};
+template <class Tag, class AccessMode>
+FOLLY_TLS size_t StaticMeta<Tag, AccessMode>::capacityCache_{0};
+#endif
+} // namespace threadlocal_detail
+} // namespace folly