Fix some copyright lines in folly/detail/ and folly/test/
[folly.git] / folly / test / DeterministicSchedule.cpp
1 /*
2  * Copyright 2013-present Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *   http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <folly/test/DeterministicSchedule.h>
18
19 #include <assert.h>
20
21 #include <algorithm>
22 #include <list>
23 #include <mutex>
24 #include <random>
25 #include <unordered_map>
26 #include <utility>
27
28 #include <folly/Random.h>
29
30 namespace folly {
31 namespace test {
32
33 FOLLY_TLS sem_t* DeterministicSchedule::tls_sem;
34 FOLLY_TLS DeterministicSchedule* DeterministicSchedule::tls_sched;
35 FOLLY_TLS unsigned DeterministicSchedule::tls_threadId;
36 thread_local AuxAct DeterministicSchedule::tls_aux_act;
37 AuxChk DeterministicSchedule::aux_chk;
38
39 // access is protected by futexLock
40 static std::unordered_map<detail::Futex<DeterministicAtomic>*,
41                           std::list<std::pair<uint32_t, bool*>>> futexQueues;
42
43 static std::mutex futexLock;
44
45 DeterministicSchedule::DeterministicSchedule(
46     const std::function<size_t(size_t)>& scheduler)
47     : scheduler_(scheduler), nextThreadId_(1), step_(0) {
48   assert(tls_sem == nullptr);
49   assert(tls_sched == nullptr);
50   assert(tls_aux_act == nullptr);
51
52   tls_sem = new sem_t;
53   sem_init(tls_sem, 0, 1);
54   sems_.push_back(tls_sem);
55
56   tls_sched = this;
57 }
58
59 DeterministicSchedule::~DeterministicSchedule() {
60   assert(tls_sched == this);
61   assert(sems_.size() == 1);
62   assert(sems_[0] == tls_sem);
63   beforeThreadExit();
64 }
65
66 std::function<size_t(size_t)> DeterministicSchedule::uniform(uint64_t seed) {
67   auto rand = std::make_shared<std::ranlux48>(seed);
68   return [rand](size_t numActive) {
69     auto dist = std::uniform_int_distribution<size_t>(0, numActive - 1);
70     return dist(*rand);
71   };
72 }
73
74 struct UniformSubset {
75   UniformSubset(uint64_t seed, size_t subsetSize, size_t stepsBetweenSelect)
76       : uniform_(DeterministicSchedule::uniform(seed)),
77         subsetSize_(subsetSize),
78         stepsBetweenSelect_(stepsBetweenSelect),
79         stepsLeft_(0) {}
80
81   size_t operator()(size_t numActive) {
82     adjustPermSize(numActive);
83     if (stepsLeft_-- == 0) {
84       stepsLeft_ = stepsBetweenSelect_ - 1;
85       shufflePrefix();
86     }
87     return perm_[uniform_(std::min(numActive, subsetSize_))];
88   }
89
90  private:
91   std::function<size_t(size_t)> uniform_;
92   const size_t subsetSize_;
93   const size_t stepsBetweenSelect_;
94
95   size_t stepsLeft_;
96   // only the first subsetSize_ is properly randomized
97   std::vector<size_t> perm_;
98
99   void adjustPermSize(size_t numActive) {
100     if (perm_.size() > numActive) {
101       perm_.erase(std::remove_if(perm_.begin(),
102                                  perm_.end(),
103                                  [=](size_t x) { return x >= numActive; }),
104                   perm_.end());
105     } else {
106       while (perm_.size() < numActive) {
107         perm_.push_back(perm_.size());
108       }
109     }
110     assert(perm_.size() == numActive);
111   }
112
113   void shufflePrefix() {
114     for (size_t i = 0; i < std::min(perm_.size() - 1, subsetSize_); ++i) {
115       size_t j = uniform_(perm_.size() - i) + i;
116       std::swap(perm_[i], perm_[j]);
117     }
118   }
119 };
120
121 std::function<size_t(size_t)>
122 DeterministicSchedule::uniformSubset(uint64_t seed, size_t n, size_t m) {
123   auto gen = std::make_shared<UniformSubset>(seed, n, m);
124   return [=](size_t numActive) { return (*gen)(numActive); };
125 }
126
127 void DeterministicSchedule::beforeSharedAccess() {
128   if (tls_sem) {
129     sem_wait(tls_sem);
130   }
131 }
132
133 void DeterministicSchedule::afterSharedAccess() {
134   auto sched = tls_sched;
135   if (!sched) {
136     return;
137   }
138   sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]);
139 }
140
141 void DeterministicSchedule::afterSharedAccess(bool success) {
142   auto sched = tls_sched;
143   if (!sched) {
144     return;
145   }
146   sched->callAux(success);
147   sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]);
148 }
149
150 size_t DeterministicSchedule::getRandNumber(size_t n) {
151   if (tls_sched) {
152     return tls_sched->scheduler_(n);
153   }
154   return Random::rand32() % n;
155 }
156
157 int DeterministicSchedule::getcpu(unsigned* cpu,
158                                   unsigned* node,
159                                   void* /* unused */) {
160   if (!tls_threadId && tls_sched) {
161     beforeSharedAccess();
162     tls_threadId = tls_sched->nextThreadId_++;
163     afterSharedAccess();
164   }
165   if (cpu) {
166     *cpu = tls_threadId;
167   }
168   if (node) {
169     *node = tls_threadId;
170   }
171   return 0;
172 }
173
174 void DeterministicSchedule::setAuxAct(AuxAct& aux) {
175   tls_aux_act = aux;
176 }
177
178 void DeterministicSchedule::setAuxChk(AuxChk& aux) {
179   aux_chk = aux;
180 }
181
182 void DeterministicSchedule::clearAuxChk() {
183   aux_chk = nullptr;
184 }
185
186 sem_t* DeterministicSchedule::beforeThreadCreate() {
187   sem_t* s = new sem_t;
188   sem_init(s, 0, 0);
189   beforeSharedAccess();
190   sems_.push_back(s);
191   afterSharedAccess();
192   return s;
193 }
194
195 void DeterministicSchedule::afterThreadCreate(sem_t* sem) {
196   assert(tls_sem == nullptr);
197   assert(tls_sched == nullptr);
198   tls_sem = sem;
199   tls_sched = this;
200   bool started = false;
201   while (!started) {
202     beforeSharedAccess();
203     if (active_.count(std::this_thread::get_id()) == 1) {
204       started = true;
205     }
206     afterSharedAccess();
207   }
208 }
209
210 void DeterministicSchedule::beforeThreadExit() {
211   assert(tls_sched == this);
212   beforeSharedAccess();
213   sems_.erase(std::find(sems_.begin(), sems_.end(), tls_sem));
214   active_.erase(std::this_thread::get_id());
215   if (sems_.size() > 0) {
216     FOLLY_TEST_DSCHED_VLOG("exiting");
217     afterSharedAccess();
218   }
219   sem_destroy(tls_sem);
220   delete tls_sem;
221   tls_sem = nullptr;
222   tls_sched = nullptr;
223   tls_aux_act = nullptr;
224 }
225
226 void DeterministicSchedule::join(std::thread& child) {
227   auto sched = tls_sched;
228   if (sched) {
229     bool done = false;
230     while (!done) {
231       beforeSharedAccess();
232       done = !sched->active_.count(child.get_id());
233       if (done) {
234         FOLLY_TEST_DSCHED_VLOG("joined " << std::hex << child.get_id());
235       }
236       afterSharedAccess();
237     }
238   }
239   child.join();
240 }
241
242 void DeterministicSchedule::callAux(bool success) {
243   ++step_;
244   if (tls_aux_act) {
245     tls_aux_act(success);
246     tls_aux_act = nullptr;
247   }
248   if (aux_chk) {
249     aux_chk(step_);
250   }
251 }
252
253 void DeterministicSchedule::post(sem_t* sem) {
254   beforeSharedAccess();
255   sem_post(sem);
256   FOLLY_TEST_DSCHED_VLOG("sem_post(" << sem << ")");
257   afterSharedAccess();
258 }
259
260 bool DeterministicSchedule::tryWait(sem_t* sem) {
261   beforeSharedAccess();
262   int rv = sem_trywait(sem);
263   int e = rv == 0 ? 0 : errno;
264   FOLLY_TEST_DSCHED_VLOG("sem_trywait(" << sem << ") = " << rv
265                                         << " errno=" << e);
266   afterSharedAccess();
267   if (rv == 0) {
268     return true;
269   } else {
270     assert(e == EAGAIN);
271     return false;
272   }
273 }
274
275 void DeterministicSchedule::wait(sem_t* sem) {
276   while (!tryWait(sem)) {
277     // we're not busy waiting because this is a deterministic schedule
278   }
279 }
280 } // namespace test
281 } // namespace folly
282
283 namespace folly {
284 namespace detail {
285
286 using namespace test;
287 using namespace std::chrono;
288
289 template <>
290 FutexResult Futex<DeterministicAtomic>::futexWaitImpl(
291     uint32_t expected,
292     time_point<system_clock>* absSystemTimeout,
293     time_point<steady_clock>* absSteadyTimeout,
294     uint32_t waitMask) {
295   bool hasTimeout = absSystemTimeout != nullptr || absSteadyTimeout != nullptr;
296   bool awoken = false;
297   FutexResult result = FutexResult::AWOKEN;
298
299   DeterministicSchedule::beforeSharedAccess();
300   FOLLY_TEST_DSCHED_VLOG(this << ".futexWait(" << std::hex << expected
301                               << ", .., " << std::hex << waitMask
302                               << ") beginning..");
303   futexLock.lock();
304   if (this->data == expected) {
305     auto& queue = futexQueues[this];
306     queue.emplace_back(waitMask, &awoken);
307     auto ours = queue.end();
308     ours--;
309     while (!awoken) {
310       futexLock.unlock();
311       DeterministicSchedule::afterSharedAccess();
312       DeterministicSchedule::beforeSharedAccess();
313       futexLock.lock();
314
315       // Simulate spurious wake-ups, timeouts each time with
316       // a 10% probability if we haven't been woken up already
317       if (!awoken && hasTimeout &&
318           DeterministicSchedule::getRandNumber(100) < 10) {
319         assert(futexQueues.count(this) != 0 && &futexQueues[this] == &queue);
320         queue.erase(ours);
321         if (queue.empty()) {
322           futexQueues.erase(this);
323         }
324         // Simulate ETIMEDOUT 90% of the time and other failures
325         // remaining time
326         result = DeterministicSchedule::getRandNumber(100) >= 10
327                      ? FutexResult::TIMEDOUT
328                      : FutexResult::INTERRUPTED;
329         break;
330       }
331     }
332   } else {
333     result = FutexResult::VALUE_CHANGED;
334   }
335   futexLock.unlock();
336
337   char const* resultStr = "?";
338   switch (result) {
339     case FutexResult::AWOKEN:
340       resultStr = "AWOKEN";
341       break;
342     case FutexResult::TIMEDOUT:
343       resultStr = "TIMEDOUT";
344       break;
345     case FutexResult::INTERRUPTED:
346       resultStr = "INTERRUPTED";
347       break;
348     case FutexResult::VALUE_CHANGED:
349       resultStr = "VALUE_CHANGED";
350       break;
351   }
352   FOLLY_TEST_DSCHED_VLOG(this << ".futexWait(" << std::hex << expected
353                               << ", .., " << std::hex << waitMask << ") -> "
354                               << resultStr);
355   DeterministicSchedule::afterSharedAccess();
356   return result;
357 }
358
359 template <>
360 int Futex<DeterministicAtomic>::futexWake(int count, uint32_t wakeMask) {
361   int rv = 0;
362   DeterministicSchedule::beforeSharedAccess();
363   futexLock.lock();
364   if (futexQueues.count(this) > 0) {
365     auto& queue = futexQueues[this];
366     auto iter = queue.begin();
367     while (iter != queue.end() && rv < count) {
368       auto cur = iter++;
369       if ((cur->first & wakeMask) != 0) {
370         *(cur->second) = true;
371         rv++;
372         queue.erase(cur);
373       }
374     }
375     if (queue.empty()) {
376       futexQueues.erase(this);
377     }
378   }
379   futexLock.unlock();
380   FOLLY_TEST_DSCHED_VLOG(this << ".futexWake(" << count << ", " << std::hex
381                               << wakeMask << ") -> " << rv);
382   DeterministicSchedule::afterSharedAccess();
383   return rv;
384 }
385 } // namespace detail
386
387 template <>
388 CacheLocality const& CacheLocality::system<test::DeterministicAtomic>() {
389   static CacheLocality cache(CacheLocality::uniform(16));
390   return cache;
391 }
392
393 template <>
394 Getcpu::Func AccessSpreader<test::DeterministicAtomic>::pickGetcpuFunc() {
395   return &detail::DeterministicSchedule::getcpu;
396 }
397 } // namespace folly