2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "folly/ThreadLocal.h"
19 #include <sys/types.h>
26 #include <condition_variable>
31 #include <unordered_map>
33 #include <boost/thread/tss.hpp>
34 #include <gflags/gflags.h>
35 #include <glog/logging.h>
36 #include <gtest/gtest.h>
38 #include "folly/Benchmark.h"
40 using namespace folly;
49 static void customDeleter(Widget* w, TLPDestructionMode mode) {
50 totalVal_ += (mode == TLPDestructionMode::ALL_THREADS) * 1000;
54 int Widget::totalVal_ = 0;
56 TEST(ThreadLocalPtr, BasicDestructor) {
57 Widget::totalVal_ = 0;
58 ThreadLocalPtr<Widget> w;
60 w.reset(new Widget());
63 EXPECT_EQ(10, Widget::totalVal_);
66 TEST(ThreadLocalPtr, CustomDeleter1) {
67 Widget::totalVal_ = 0;
69 ThreadLocalPtr<Widget> w;
71 w.reset(new Widget(), Widget::customDeleter);
74 EXPECT_EQ(10, Widget::totalVal_);
76 EXPECT_EQ(10, Widget::totalVal_);
79 TEST(ThreadLocalPtr, resetNull) {
80 ThreadLocalPtr<int> tl;
83 EXPECT_TRUE(static_cast<bool>(tl));
84 EXPECT_EQ(*tl.get(), 4);
89 // Test deleting the ThreadLocalPtr object
90 TEST(ThreadLocalPtr, CustomDeleter2) {
91 Widget::totalVal_ = 0;
94 std::condition_variable cv;
100 State state = State::START;
102 ThreadLocalPtr<Widget> w;
103 t = std::thread([&]() {
104 w.reset(new Widget(), Widget::customDeleter);
107 // Notify main thread that we're done
109 std::unique_lock<std::mutex> lock(mutex);
114 // Wait for main thread to allow us to exit
116 std::unique_lock<std::mutex> lock(mutex);
117 while (state != State::EXIT) {
123 // Wait for main thread to start (and set w.get()->val_)
125 std::unique_lock<std::mutex> lock(mutex);
126 while (state != State::DONE) {
131 // Thread started but hasn't exited yet
132 EXPECT_EQ(0, Widget::totalVal_);
134 // Destroy ThreadLocalPtr<Widget> (by letting it go out of scope)
137 EXPECT_EQ(1010, Widget::totalVal_);
139 // Allow thread to exit
141 std::unique_lock<std::mutex> lock(mutex);
147 EXPECT_EQ(1010, Widget::totalVal_);
150 TEST(ThreadLocal, BasicDestructor) {
151 Widget::totalVal_ = 0;
152 ThreadLocal<Widget> w;
153 std::thread([&w]() { w->val_ += 10; }).join();
154 EXPECT_EQ(10, Widget::totalVal_);
157 TEST(ThreadLocal, SimpleRepeatDestructor) {
158 Widget::totalVal_ = 0;
160 ThreadLocal<Widget> w;
164 ThreadLocal<Widget> w;
167 EXPECT_EQ(20, Widget::totalVal_);
170 TEST(ThreadLocal, InterleavedDestructors) {
171 Widget::totalVal_ = 0;
172 ThreadLocal<Widget>* w = NULL;
174 const int wVersionMax = 2;
177 auto th = std::thread([&]() {
178 int wVersionPrev = 0;
181 std::lock_guard<std::mutex> g(lock);
182 if (wVersion > wVersionMax) {
185 if (wVersion > wVersionPrev) {
186 // We have a new version of w, so it should be initialized to zero
187 EXPECT_EQ((*w)->val_, 0);
191 std::lock_guard<std::mutex> g(lock);
192 wVersionPrev = wVersion;
197 FOR_EACH_RANGE(i, 0, wVersionMax) {
200 std::lock_guard<std::mutex> g(lock);
203 w = new ThreadLocal<Widget>();
207 std::lock_guard<std::mutex> g(lock);
208 if (thIter > thIterPrev) {
214 std::lock_guard<std::mutex> g(lock);
215 wVersion = wVersionMax + 1;
218 EXPECT_EQ(wVersionMax * 10, Widget::totalVal_);
221 class SimpleThreadCachedInt {
224 ThreadLocal<int,NewTag> val_;
233 for (const auto& i : val_.accessAllThreads()) {
240 TEST(ThreadLocalPtr, AccessAllThreadsCounter) {
241 const int kNumThreads = 10;
242 SimpleThreadCachedInt stci;
243 std::atomic<bool> run(true);
244 std::atomic<int> totalAtomic(0);
245 std::vector<std::thread> threads;
246 for (int i = 0; i < kNumThreads; ++i) {
247 threads.push_back(std::thread([&,i]() {
249 totalAtomic.fetch_add(1);
250 while (run.load()) { usleep(100); }
253 while (totalAtomic.load() != kNumThreads) { usleep(100); }
254 EXPECT_EQ(kNumThreads, stci.read());
256 for (auto& t : threads) {
261 TEST(ThreadLocal, resetNull) {
263 tl.reset(new int(4));
264 EXPECT_EQ(*tl.get(), 4);
266 EXPECT_EQ(*tl.get(), 0);
267 tl.reset(new int(5));
268 EXPECT_EQ(*tl.get(), 5);
275 folly::ThreadLocal<int, Tag> tl;
279 TEST(ThreadLocal, Movable1) {
282 EXPECT_TRUE(a.tl.get() != b.tl.get());
286 EXPECT_TRUE(a.tl.get() != b.tl.get());
289 TEST(ThreadLocal, Movable2) {
290 std::map<int, Foo> map;
298 for (auto& m : map) {
299 tls.insert(m.second.tl.get());
302 // Make sure that we have 4 different instances of *tl
303 EXPECT_EQ(4, tls.size());
308 constexpr size_t kFillObjectSize = 300;
310 std::atomic<uint64_t> gDestroyed;
313 * Fill a chunk of memory with a unique-ish pattern that includes the thread id
314 * (so deleting one of these from another thread would cause a failure)
316 * Verify it explicitly and on destruction.
320 explicit FillObject(uint64_t idx) : idx_(idx) {
322 for (size_t i = 0; i < kFillObjectSize; ++i) {
329 for (size_t i = 0; i < kFillObjectSize; ++i) {
330 CHECK_EQ(v, data_[i]);
339 uint64_t val() const {
340 return (idx_ << 40) | uint64_t(pthread_self());
344 uint64_t data_[kFillObjectSize];
349 #if FOLLY_HAVE_STD__THIS_THREAD__SLEEP_FOR
350 TEST(ThreadLocal, Stress) {
351 constexpr size_t numFillObjects = 250;
352 std::array<ThreadLocalPtr<FillObject>, numFillObjects> objects;
354 constexpr size_t numThreads = 32;
355 constexpr size_t numReps = 20;
357 std::vector<std::thread> threads;
358 threads.reserve(numThreads);
360 for (size_t i = 0; i < numThreads; ++i) {
361 threads.emplace_back([&objects] {
362 for (size_t rep = 0; rep < numReps; ++rep) {
363 for (size_t i = 0; i < objects.size(); ++i) {
364 objects[i].reset(new FillObject(rep * objects.size() + i));
365 std::this_thread::sleep_for(std::chrono::microseconds(100));
367 for (size_t i = 0; i < objects.size(); ++i) {
374 for (auto& t : threads) {
378 EXPECT_EQ(numFillObjects * numThreads * numReps, gDestroyed);
382 // Yes, threads and fork don't mix
383 // (http://cppwisdom.quora.com/Why-threads-and-fork-dont-mix) but if you're
384 // stupid or desperate enough to try, we shouldn't stand in your way.
388 HoldsOne() : value_(1) { }
389 // Do an actual access to catch the buggy case where this == nullptr
390 int value() const { return value_; }
395 struct HoldsOneTag {};
397 ThreadLocal<HoldsOne, HoldsOneTag> ptr;
401 for (auto& p : ptr.accessAllThreads()) {
409 TEST(ThreadLocal, Fork) {
410 EXPECT_EQ(1, ptr->value()); // ensure created
411 EXPECT_EQ(1, totalValue());
412 // Spawn a new thread
415 bool started = false;
416 std::condition_variable startedCond;
417 bool stopped = false;
418 std::condition_variable stoppedCond;
420 std::thread t([&] () {
421 EXPECT_EQ(1, ptr->value()); // ensure created
423 std::unique_lock<std::mutex> lock(mutex);
425 startedCond.notify_all();
428 std::unique_lock<std::mutex> lock(mutex);
430 stoppedCond.wait(lock);
436 std::unique_lock<std::mutex> lock(mutex);
438 startedCond.wait(lock);
442 EXPECT_EQ(2, totalValue());
447 int v = totalValue();
449 // exit successfully if v == 1 (one thread)
450 // diagnostic error code otherwise :)
456 } else if (pid > 0) {
459 EXPECT_EQ(pid, waitpid(pid, &status, 0));
460 EXPECT_TRUE(WIFEXITED(status));
461 EXPECT_EQ(0, WEXITSTATUS(status));
463 EXPECT_TRUE(false) << "fork failed";
466 EXPECT_EQ(2, totalValue());
469 std::unique_lock<std::mutex> lock(mutex);
471 stoppedCond.notify_all();
476 EXPECT_EQ(1, totalValue());
479 // Simple reference implementation using pthread_get_specific
481 class PThreadGetSpecific {
483 PThreadGetSpecific() : key_(0) {
484 pthread_key_create(&key_, OnThreadExit);
488 return static_cast<T*>(pthread_getspecific(key_));
493 pthread_setspecific(key_, t);
495 static void OnThreadExit(void* obj) {
496 delete static_cast<T*>(obj);
502 DEFINE_int32(numThreads, 8, "Number simultaneous threads for benchmarks.");
505 BENCHMARK(FB_CONCATENATE(BM_mt_, var), iters) { \
506 const int itersPerThread = iters / FLAGS_numThreads; \
507 std::vector<std::thread> threads; \
508 for (int i = 0; i < FLAGS_numThreads; ++i) { \
509 threads.push_back(std::thread([&]() { \
510 var.reset(new int(0)); \
511 for (int i = 0; i < itersPerThread; ++i) { \
516 for (auto& t : threads) { \
521 ThreadLocalPtr<int> tlp;
523 PThreadGetSpecific<int> pthread_get_specific;
524 REG(pthread_get_specific);
525 boost::thread_specific_ptr<int> boost_tsp;
527 BENCHMARK_DRAW_LINE();
529 int main(int argc, char** argv) {
530 testing::InitGoogleTest(&argc, argv);
531 google::ParseCommandLineFlags(&argc, &argv, true);
532 google::SetCommandLineOptionWithMode(
533 "bm_max_iters", "100000000", google::SET_FLAG_IF_DEFAULT
535 if (FLAGS_benchmark) {
536 folly::runBenchmarks();
538 return RUN_ALL_TESTS();
542 Ran with 24 threads on dual 12-core Xeon(R) X5650 @ 2.67GHz with 12-MB caches
544 Benchmark Iters Total t t/iter iter/sec
545 ------------------------------------------------------------------------------
546 * BM_mt_tlp 100000000 39.88 ms 398.8 ps 2.335 G
547 +5.91% BM_mt_pthread_get_specific 100000000 42.23 ms 422.3 ps 2.205 G
548 + 295% BM_mt_boost_tsp 100000000 157.8 ms 1.578 ns 604.5 M
549 ------------------------------------------------------------------------------