3e0233c0ade29be1900917c70b163b93e8e49353
[folly.git] / folly / test / DeterministicSchedule.cpp
1 /*
2  * Copyright 2015 Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *   http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <folly/test/DeterministicSchedule.h>
18 #include <algorithm>
19 #include <list>
20 #include <mutex>
21 #include <random>
22 #include <utility>
23 #include <unordered_map>
24 #include <assert.h>
25
26 namespace folly { namespace test {
27
28 FOLLY_TLS sem_t* DeterministicSchedule::tls_sem;
29 FOLLY_TLS DeterministicSchedule* DeterministicSchedule::tls_sched;
30
31 // access is protected by futexLock
32 static std::unordered_map<detail::Futex<DeterministicAtomic>*,
33                           std::list<std::pair<uint32_t,bool*>>> futexQueues;
34
35 static std::mutex futexLock;
36
37 DeterministicSchedule::DeterministicSchedule(
38         const std::function<int(int)>& scheduler)
39   : scheduler_(scheduler)
40 {
41   assert(tls_sem == nullptr);
42   assert(tls_sched == nullptr);
43
44   tls_sem = new sem_t;
45   sem_init(tls_sem, 0, 1);
46   sems_.push_back(tls_sem);
47
48   tls_sched = this;
49 }
50
51 DeterministicSchedule::~DeterministicSchedule() {
52   assert(tls_sched == this);
53   assert(sems_.size() == 1);
54   assert(sems_[0] == tls_sem);
55   beforeThreadExit();
56 }
57
58 std::function<int(int)>
59 DeterministicSchedule::uniform(long seed) {
60   auto rand = std::make_shared<std::ranlux48>(seed);
61   return [rand](size_t numActive) {
62     auto dist = std::uniform_int_distribution<int>(0, numActive - 1);
63     return dist(*rand);
64   };
65 }
66
67 struct UniformSubset {
68   UniformSubset(long seed, int subsetSize, int stepsBetweenSelect)
69     : uniform_(DeterministicSchedule::uniform(seed))
70     , subsetSize_(subsetSize)
71     , stepsBetweenSelect_(stepsBetweenSelect)
72     , stepsLeft_(0)
73   {
74   }
75
76   size_t operator()(size_t numActive) {
77     adjustPermSize(numActive);
78     if (stepsLeft_-- == 0) {
79       stepsLeft_ = stepsBetweenSelect_ - 1;
80       shufflePrefix();
81     }
82     return perm_[uniform_(std::min(numActive, subsetSize_))];
83   }
84
85  private:
86   std::function<int(int)> uniform_;
87   const size_t subsetSize_;
88   const int stepsBetweenSelect_;
89
90   int stepsLeft_;
91   // only the first subsetSize_ is properly randomized
92   std::vector<int> perm_;
93
94   void adjustPermSize(size_t numActive) {
95     if (perm_.size() > numActive) {
96       perm_.erase(std::remove_if(perm_.begin(), perm_.end(),
97               [=](size_t x){ return x >= numActive; }), perm_.end());
98     } else {
99       while (perm_.size() < numActive) {
100         perm_.push_back(perm_.size());
101       }
102     }
103     assert(perm_.size() == numActive);
104   }
105
106   void shufflePrefix() {
107     for (size_t i = 0; i < std::min(perm_.size() - 1, subsetSize_); ++i) {
108       int j = uniform_(perm_.size() - i) + i;
109       std::swap(perm_[i], perm_[j]);
110     }
111   }
112 };
113
114 std::function<int(int)>
115 DeterministicSchedule::uniformSubset(long seed, int n, int m) {
116   auto gen = std::make_shared<UniformSubset>(seed, n, m);
117   return [=](size_t numActive) { return (*gen)(numActive); };
118 }
119
120 void
121 DeterministicSchedule::beforeSharedAccess() {
122   if (tls_sem) {
123     sem_wait(tls_sem);
124   }
125 }
126
127 void
128 DeterministicSchedule::afterSharedAccess() {
129   auto sched = tls_sched;
130   if (!sched) {
131     return;
132   }
133
134   sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]);
135 }
136
137 int
138 DeterministicSchedule::getRandNumber(int n) {
139   if (tls_sched) {
140     return tls_sched->scheduler_(n);
141   }
142   return std::rand() % n;
143 }
144
145 sem_t*
146 DeterministicSchedule::beforeThreadCreate() {
147   sem_t* s = new sem_t;
148   sem_init(s, 0, 0);
149   beforeSharedAccess();
150   sems_.push_back(s);
151   afterSharedAccess();
152   return s;
153 }
154
155 void
156 DeterministicSchedule::afterThreadCreate(sem_t* sem) {
157   assert(tls_sem == nullptr);
158   assert(tls_sched == nullptr);
159   tls_sem = sem;
160   tls_sched = this;
161   bool started = false;
162   while (!started) {
163     beforeSharedAccess();
164     if (active_.count(std::this_thread::get_id()) == 1) {
165       started = true;
166     }
167     afterSharedAccess();
168   }
169 }
170
171 void
172 DeterministicSchedule::beforeThreadExit() {
173   assert(tls_sched == this);
174   beforeSharedAccess();
175   sems_.erase(std::find(sems_.begin(), sems_.end(), tls_sem));
176   active_.erase(std::this_thread::get_id());
177   if (sems_.size() > 0) {
178     afterSharedAccess();
179   }
180   sem_destroy(tls_sem);
181   delete tls_sem;
182   tls_sem = nullptr;
183   tls_sched = nullptr;
184 }
185
186 void
187 DeterministicSchedule::join(std::thread& child) {
188   auto sched = tls_sched;
189   if (sched) {
190     bool done = false;
191     while (!done) {
192       beforeSharedAccess();
193       done = !sched->active_.count(child.get_id());
194       afterSharedAccess();
195     }
196   }
197   child.join();
198 }
199
200 void
201 DeterministicSchedule::post(sem_t* sem) {
202   beforeSharedAccess();
203   sem_post(sem);
204   afterSharedAccess();
205 }
206
207 bool
208 DeterministicSchedule::tryWait(sem_t* sem) {
209   beforeSharedAccess();
210   int rv = sem_trywait(sem);
211   afterSharedAccess();
212   if (rv == 0) {
213     return true;
214   } else {
215     assert(errno == EAGAIN);
216     return false;
217   }
218 }
219
220 void
221 DeterministicSchedule::wait(sem_t* sem) {
222   while (!tryWait(sem)) {
223     // we're not busy waiting because this is a deterministic schedule
224   }
225 }
226
227 }}
228
229 namespace folly { namespace detail {
230
231 using namespace test;
232 using namespace std::chrono;
233
234 template <>
235 FutexResult
236 Futex<DeterministicAtomic>::futexWaitImpl(
237         uint32_t expected,
238         time_point<system_clock>* absSystemTimeout,
239         time_point<steady_clock>* absSteadyTimeout,
240         uint32_t waitMask) {
241   bool hasTimeout = absSystemTimeout != nullptr || absSteadyTimeout != nullptr;
242   bool awoken = false;
243   FutexResult result = FutexResult::AWOKEN;
244   int futexErrno = 0;
245
246   DeterministicSchedule::beforeSharedAccess();
247   futexLock.lock();
248   if (data == expected) {
249     auto& queue = futexQueues[this];
250     queue.push_back(std::make_pair(waitMask, &awoken));
251     auto ours = queue.end();
252     ours--;
253     while (!awoken) {
254       futexLock.unlock();
255       DeterministicSchedule::afterSharedAccess();
256       DeterministicSchedule::beforeSharedAccess();
257       futexLock.lock();
258
259       // Simulate spurious wake-ups, timeouts each time with
260       // a 10% probability if we haven't been woken up already
261       if (!awoken && hasTimeout &&
262           DeterministicSchedule::getRandNumber(100) < 10) {
263         assert(futexQueues.count(this) != 0 &&
264                &futexQueues[this] == &queue);
265         queue.erase(ours);
266         if (queue.empty()) {
267           futexQueues.erase(this);
268         }
269         // Simulate ETIMEDOUT 90% of the time and other failures
270         // remaining time
271         result =
272           DeterministicSchedule::getRandNumber(100) >= 10
273               ? FutexResult::TIMEDOUT : FutexResult::INTERRUPTED;
274         break;
275       }
276     }
277   } else {
278     result = FutexResult::VALUE_CHANGED;
279   }
280   futexLock.unlock();
281   DeterministicSchedule::afterSharedAccess();
282   return result;
283 }
284
285 template<>
286 int
287 Futex<DeterministicAtomic>::futexWake(int count, uint32_t wakeMask) {
288   int rv = 0;
289   DeterministicSchedule::beforeSharedAccess();
290   futexLock.lock();
291   if (futexQueues.count(this) > 0) {
292     auto& queue = futexQueues[this];
293     auto iter = queue.begin();
294     while (iter != queue.end() && rv < count) {
295       auto cur = iter++;
296       if ((cur->first & wakeMask) != 0) {
297         *(cur->second) = true;
298         rv++;
299         queue.erase(cur);
300       }
301     }
302     if (queue.empty()) {
303       futexQueues.erase(this);
304     }
305   }
306   futexLock.unlock();
307   DeterministicSchedule::afterSharedAccess();
308   return rv;
309 }
310
311
312 template<>
313 CacheLocality const& CacheLocality::system<test::DeterministicAtomic>() {
314   static CacheLocality cache(CacheLocality::uniform(16));
315   return cache;
316 }
317
318 template<>
319 test::DeterministicAtomic<size_t>
320     SequentialThreadId<test::DeterministicAtomic>::prevId(0);
321
322 template<>
323 FOLLY_TLS size_t
324     SequentialThreadId<test::DeterministicAtomic>::currentId(0);
325
326 template<>
327 const AccessSpreader<test::DeterministicAtomic>
328 AccessSpreader<test::DeterministicAtomic>::stripeByCore(
329     CacheLocality::system<>().numCachesByLevel.front());
330
331 template<>
332 const AccessSpreader<test::DeterministicAtomic>
333 AccessSpreader<test::DeterministicAtomic>::stripeByChip(
334     CacheLocality::system<>().numCachesByLevel.back());
335
336 template<>
337 AccessSpreaderArray<test::DeterministicAtomic,128>
338 AccessSpreaderArray<test::DeterministicAtomic,128>::sharedInstance = {};
339
340
341 template<>
342 Getcpu::Func
343 AccessSpreader<test::DeterministicAtomic>::pickGetcpuFunc(size_t numStripes) {
344   return &SequentialThreadId<test::DeterministicAtomic>::getcpu;
345 }
346
347 }}