/*
- * Copyright 2014 Facebook, Inc.
+ * Copyright 2013-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*/
#include <folly/test/DeterministicSchedule.h>
+
+#include <assert.h>
+
#include <algorithm>
#include <list>
#include <mutex>
#include <random>
-#include <utility>
#include <unordered_map>
-#include <assert.h>
+#include <utility>
-namespace folly { namespace test {
+#include <folly/Random.h>
+
+namespace folly {
+namespace test {
FOLLY_TLS sem_t* DeterministicSchedule::tls_sem;
FOLLY_TLS DeterministicSchedule* DeterministicSchedule::tls_sched;
+FOLLY_TLS unsigned DeterministicSchedule::tls_threadId;
+thread_local AuxAct DeterministicSchedule::tls_aux_act;
+AuxChk DeterministicSchedule::aux_chk;
// access is protected by futexLock
static std::unordered_map<detail::Futex<DeterministicAtomic>*,
- std::list<std::pair<uint32_t,bool*>>> futexQueues;
+ std::list<std::pair<uint32_t, bool*>>> futexQueues;
static std::mutex futexLock;
DeterministicSchedule::DeterministicSchedule(
- const std::function<int(int)>& scheduler)
- : scheduler_(scheduler)
-{
+ const std::function<size_t(size_t)>& scheduler)
+ : scheduler_(scheduler), nextThreadId_(1), step_(0) {
assert(tls_sem == nullptr);
assert(tls_sched == nullptr);
+ assert(tls_aux_act == nullptr);
tls_sem = new sem_t;
sem_init(tls_sem, 0, 1);
beforeThreadExit();
}
-std::function<int(int)>
-DeterministicSchedule::uniform(long seed) {
+std::function<size_t(size_t)> DeterministicSchedule::uniform(uint64_t seed) {
auto rand = std::make_shared<std::ranlux48>(seed);
- return [rand](int numActive) {
- auto dist = std::uniform_int_distribution<int>(0, numActive - 1);
+ return [rand](size_t numActive) {
+ auto dist = std::uniform_int_distribution<size_t>(0, numActive - 1);
return dist(*rand);
};
}
struct UniformSubset {
- UniformSubset(long seed, int subsetSize, int stepsBetweenSelect)
- : uniform_(DeterministicSchedule::uniform(seed))
- , subsetSize_(subsetSize)
- , stepsBetweenSelect_(stepsBetweenSelect)
- , stepsLeft_(0)
- {
- }
+ UniformSubset(uint64_t seed, size_t subsetSize, size_t stepsBetweenSelect)
+ : uniform_(DeterministicSchedule::uniform(seed)),
+ subsetSize_(subsetSize),
+ stepsBetweenSelect_(stepsBetweenSelect),
+ stepsLeft_(0) {}
- int operator()(int numActive) {
+ size_t operator()(size_t numActive) {
adjustPermSize(numActive);
if (stepsLeft_-- == 0) {
stepsLeft_ = stepsBetweenSelect_ - 1;
}
private:
- std::function<int(int)> uniform_;
- const int subsetSize_;
- const int stepsBetweenSelect_;
+ std::function<size_t(size_t)> uniform_;
+ const size_t subsetSize_;
+ const size_t stepsBetweenSelect_;
- int stepsLeft_;
+ size_t stepsLeft_;
// only the first subsetSize_ is properly randomized
- std::vector<int> perm_;
+ std::vector<size_t> perm_;
- void adjustPermSize(int numActive) {
+ void adjustPermSize(size_t numActive) {
if (perm_.size() > numActive) {
- perm_.erase(std::remove_if(perm_.begin(), perm_.end(),
- [=](int x){ return x >= numActive; }), perm_.end());
+ perm_.erase(std::remove_if(perm_.begin(),
+ perm_.end(),
+ [=](size_t x) { return x >= numActive; }),
+ perm_.end());
} else {
while (perm_.size() < numActive) {
perm_.push_back(perm_.size());
}
void shufflePrefix() {
- for (int i = 0; i < std::min(int(perm_.size() - 1), subsetSize_); ++i) {
- int j = uniform_(perm_.size() - i) + i;
+ for (size_t i = 0; i < std::min(perm_.size() - 1, subsetSize_); ++i) {
+ size_t j = uniform_(perm_.size() - i) + i;
std::swap(perm_[i], perm_[j]);
}
}
};
-std::function<int(int)>
-DeterministicSchedule::uniformSubset(long seed, int n, int m) {
+std::function<size_t(size_t)>
+DeterministicSchedule::uniformSubset(uint64_t seed, size_t n, size_t m) {
auto gen = std::make_shared<UniformSubset>(seed, n, m);
- return [=](int numActive) { return (*gen)(numActive); };
+ return [=](size_t numActive) { return (*gen)(numActive); };
}
-void
-DeterministicSchedule::beforeSharedAccess() {
+void DeterministicSchedule::beforeSharedAccess() {
if (tls_sem) {
sem_wait(tls_sem);
}
}
-void
-DeterministicSchedule::afterSharedAccess() {
+void DeterministicSchedule::afterSharedAccess() {
auto sched = tls_sched;
if (!sched) {
return;
}
+ sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]);
+}
+void DeterministicSchedule::afterSharedAccess(bool success) {
+ auto sched = tls_sched;
+ if (!sched) {
+ return;
+ }
+ sched->callAux(success);
sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]);
}
-int
-DeterministicSchedule::getRandNumber(int n) {
+size_t DeterministicSchedule::getRandNumber(size_t n) {
if (tls_sched) {
return tls_sched->scheduler_(n);
}
- return std::rand() % n;
+ return Random::rand32() % n;
}
-sem_t*
-DeterministicSchedule::beforeThreadCreate() {
+int DeterministicSchedule::getcpu(unsigned* cpu,
+ unsigned* node,
+ void* /* unused */) {
+ if (!tls_threadId && tls_sched) {
+ beforeSharedAccess();
+ tls_threadId = tls_sched->nextThreadId_++;
+ afterSharedAccess();
+ }
+ if (cpu) {
+ *cpu = tls_threadId;
+ }
+ if (node) {
+ *node = tls_threadId;
+ }
+ return 0;
+}
+
+void DeterministicSchedule::setAuxAct(AuxAct& aux) {
+ tls_aux_act = aux;
+}
+
+void DeterministicSchedule::setAuxChk(AuxChk& aux) {
+ aux_chk = aux;
+}
+
+void DeterministicSchedule::clearAuxChk() {
+ aux_chk = nullptr;
+}
+
+sem_t* DeterministicSchedule::beforeThreadCreate() {
sem_t* s = new sem_t;
sem_init(s, 0, 0);
beforeSharedAccess();
return s;
}
-void
-DeterministicSchedule::afterThreadCreate(sem_t* sem) {
+void DeterministicSchedule::afterThreadCreate(sem_t* sem) {
assert(tls_sem == nullptr);
assert(tls_sched == nullptr);
tls_sem = sem;
}
}
-void
-DeterministicSchedule::beforeThreadExit() {
+void DeterministicSchedule::beforeThreadExit() {
assert(tls_sched == this);
beforeSharedAccess();
sems_.erase(std::find(sems_.begin(), sems_.end(), tls_sem));
active_.erase(std::this_thread::get_id());
if (sems_.size() > 0) {
+ FOLLY_TEST_DSCHED_VLOG("exiting");
afterSharedAccess();
}
sem_destroy(tls_sem);
delete tls_sem;
tls_sem = nullptr;
tls_sched = nullptr;
+ tls_aux_act = nullptr;
}
-void
-DeterministicSchedule::join(std::thread& child) {
+void DeterministicSchedule::join(std::thread& child) {
auto sched = tls_sched;
if (sched) {
bool done = false;
while (!done) {
beforeSharedAccess();
done = !sched->active_.count(child.get_id());
+ if (done) {
+ FOLLY_TEST_DSCHED_VLOG("joined " << std::hex << child.get_id());
+ }
afterSharedAccess();
}
}
child.join();
}
-void
-DeterministicSchedule::post(sem_t* sem) {
+void DeterministicSchedule::callAux(bool success) {
+ ++step_;
+ if (tls_aux_act) {
+ tls_aux_act(success);
+ tls_aux_act = nullptr;
+ }
+ if (aux_chk) {
+ aux_chk(step_);
+ }
+}
+
+void DeterministicSchedule::post(sem_t* sem) {
beforeSharedAccess();
sem_post(sem);
+ FOLLY_TEST_DSCHED_VLOG("sem_post(" << sem << ")");
afterSharedAccess();
}
-bool
-DeterministicSchedule::tryWait(sem_t* sem) {
+bool DeterministicSchedule::tryWait(sem_t* sem) {
beforeSharedAccess();
int rv = sem_trywait(sem);
+ int e = rv == 0 ? 0 : errno;
+ FOLLY_TEST_DSCHED_VLOG("sem_trywait(" << sem << ") = " << rv
+ << " errno=" << e);
afterSharedAccess();
if (rv == 0) {
return true;
} else {
- assert(errno == EAGAIN);
+ assert(e == EAGAIN);
return false;
}
}
-void
-DeterministicSchedule::wait(sem_t* sem) {
+void DeterministicSchedule::wait(sem_t* sem) {
while (!tryWait(sem)) {
// we're not busy waiting because this is a deterministic schedule
}
}
+} // namespace test
+} // namespace folly
-}}
-
-namespace folly { namespace detail {
+namespace folly {
+namespace detail {
using namespace test;
+using namespace std::chrono;
+
+template <>
+FutexResult Futex<DeterministicAtomic>::futexWaitImpl(
+ uint32_t expected,
+ system_clock::time_point const* absSystemTimeout,
+ steady_clock::time_point const* absSteadyTimeout,
+ uint32_t waitMask) {
+ bool hasTimeout = absSystemTimeout != nullptr || absSteadyTimeout != nullptr;
+ bool awoken = false;
+ FutexResult result = FutexResult::AWOKEN;
-template<>
-bool Futex<DeterministicAtomic>::futexWait(uint32_t expected,
- uint32_t waitMask) {
- bool rv;
DeterministicSchedule::beforeSharedAccess();
+ FOLLY_TEST_DSCHED_VLOG(this << ".futexWait(" << std::hex << expected
+ << ", .., " << std::hex << waitMask
+ << ") beginning..");
futexLock.lock();
- if (data != expected) {
- rv = false;
- } else {
+ if (this->data == expected) {
auto& queue = futexQueues[this];
- bool done = false;
- queue.push_back(std::make_pair(waitMask, &done));
- while (!done) {
- futexLock.unlock();
- DeterministicSchedule::afterSharedAccess();
- DeterministicSchedule::beforeSharedAccess();
- futexLock.lock();
- }
- rv = true;
- }
- futexLock.unlock();
- DeterministicSchedule::afterSharedAccess();
- return rv;
-}
-
-FutexResult futexWaitUntilImpl(Futex<DeterministicAtomic>* futex,
- uint32_t expected, uint32_t waitMask) {
- if (futex == nullptr) {
- return FutexResult::VALUE_CHANGED;
- }
-
- bool rv = false;
- int futexErrno = 0;
-
- DeterministicSchedule::beforeSharedAccess();
- futexLock.lock();
- if (futex->data == expected) {
- auto& queue = futexQueues[futex];
- queue.push_back(std::make_pair(waitMask, &rv));
+ queue.emplace_back(waitMask, &awoken);
auto ours = queue.end();
ours--;
- while (!rv) {
+ while (!awoken) {
futexLock.unlock();
DeterministicSchedule::afterSharedAccess();
DeterministicSchedule::beforeSharedAccess();
// Simulate spurious wake-ups, timeouts each time with
// a 10% probability if we haven't been woken up already
- if (!rv && DeterministicSchedule::getRandNumber(100) < 10) {
- assert(futexQueues.count(futex) != 0 &&
- &futexQueues[futex] == &queue);
+ if (!awoken && hasTimeout &&
+ DeterministicSchedule::getRandNumber(100) < 10) {
+ assert(futexQueues.count(this) != 0 && &futexQueues[this] == &queue);
queue.erase(ours);
if (queue.empty()) {
- futexQueues.erase(futex);
+ futexQueues.erase(this);
}
- rv = false;
// Simulate ETIMEDOUT 90% of the time and other failures
// remaining time
- futexErrno =
- DeterministicSchedule::getRandNumber(100) >= 10 ? ETIMEDOUT : EINTR;
+ result = DeterministicSchedule::getRandNumber(100) >= 10
+ ? FutexResult::TIMEDOUT
+ : FutexResult::INTERRUPTED;
break;
}
}
} else {
- futexErrno = EWOULDBLOCK;
+ result = FutexResult::VALUE_CHANGED;
}
futexLock.unlock();
+
+ char const* resultStr = "?";
+ switch (result) {
+ case FutexResult::AWOKEN:
+ resultStr = "AWOKEN";
+ break;
+ case FutexResult::TIMEDOUT:
+ resultStr = "TIMEDOUT";
+ break;
+ case FutexResult::INTERRUPTED:
+ resultStr = "INTERRUPTED";
+ break;
+ case FutexResult::VALUE_CHANGED:
+ resultStr = "VALUE_CHANGED";
+ break;
+ }
+ FOLLY_TEST_DSCHED_VLOG(this << ".futexWait(" << std::hex << expected
+ << ", .., " << std::hex << waitMask << ") -> "
+ << resultStr);
DeterministicSchedule::afterSharedAccess();
- return futexErrnoToFutexResult(rv ? 0 : -1, futexErrno);
+ return result;
}
-template<>
+template <>
int Futex<DeterministicAtomic>::futexWake(int count, uint32_t wakeMask) {
int rv = 0;
DeterministicSchedule::beforeSharedAccess();
}
}
futexLock.unlock();
+ FOLLY_TEST_DSCHED_VLOG(this << ".futexWake(" << count << ", " << std::hex
+ << wakeMask << ") -> " << rv);
DeterministicSchedule::afterSharedAccess();
return rv;
}
+} // namespace detail
-
-template<>
+template <>
CacheLocality const& CacheLocality::system<test::DeterministicAtomic>() {
static CacheLocality cache(CacheLocality::uniform(16));
return cache;
}
-template<>
-test::DeterministicAtomic<size_t>
- SequentialThreadId<test::DeterministicAtomic>::prevId(0);
-
-template<>
-FOLLY_TLS size_t
- SequentialThreadId<test::DeterministicAtomic>::currentId(0);
-
-template<>
-const AccessSpreader<test::DeterministicAtomic>
-AccessSpreader<test::DeterministicAtomic>::stripeByCore(
- CacheLocality::system<>().numCachesByLevel.front());
-
-template<>
-const AccessSpreader<test::DeterministicAtomic>
-AccessSpreader<test::DeterministicAtomic>::stripeByChip(
- CacheLocality::system<>().numCachesByLevel.back());
-
-template<>
-AccessSpreaderArray<test::DeterministicAtomic,128>
-AccessSpreaderArray<test::DeterministicAtomic,128>::sharedInstance = {};
-
-
-template<>
-Getcpu::Func
-AccessSpreader<test::DeterministicAtomic>::pickGetcpuFunc(size_t numStripes) {
- return &SequentialThreadId<test::DeterministicAtomic>::getcpu;
+template <>
+Getcpu::Func AccessSpreader<test::DeterministicAtomic>::pickGetcpuFunc() {
+ return &detail::DeterministicSchedule::getcpu;
}
-
-}}
+} // namespace folly