2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/detail/Futex.h>
20 #include <condition_variable>
22 #include <boost/intrusive/list.hpp>
23 #include <folly/Hash.h>
24 #include <folly/ScopeGuard.h>
28 # include <linux/futex.h>
29 # include <sys/syscall.h>
32 using namespace std::chrono;
34 namespace folly { namespace detail {
38 ////////////////////////////////////////////////////
39 // native implementation using the futex() syscall
43 /// Certain toolchains (like Android's) don't include the full futex API in
44 /// their headers even though they support it. Make sure we have our constants
45 /// even if the headers don't have them.
46 #ifndef FUTEX_WAIT_BITSET
47 # define FUTEX_WAIT_BITSET 9
49 #ifndef FUTEX_WAKE_BITSET
50 # define FUTEX_WAKE_BITSET 10
52 #ifndef FUTEX_PRIVATE_FLAG
53 # define FUTEX_PRIVATE_FLAG 128
55 #ifndef FUTEX_CLOCK_REALTIME
56 # define FUTEX_CLOCK_REALTIME 256
59 int nativeFutexWake(void* addr, int count, uint32_t wakeMask) {
60 int rv = syscall(__NR_futex,
62 FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, /* op */
64 nullptr, /* timeout */
73 template <class Clock>
75 timeSpecFromTimePoint(time_point<Clock> absTime)
77 auto epoch = absTime.time_since_epoch();
78 if (epoch.count() < 0) {
79 // kernel timespec_valid requires non-negative seconds and nanos in [0,1G)
80 epoch = Clock::duration::zero();
83 // timespec-safe seconds and nanoseconds;
84 // chrono::{nano,}seconds are `long long int`
85 // whereas timespec uses smaller types
86 using time_t_seconds = duration<std::time_t, seconds::period>;
87 using long_nanos = duration<long int, nanoseconds::period>;
89 auto secs = duration_cast<time_t_seconds>(epoch);
90 auto nanos = duration_cast<long_nanos>(epoch - secs);
91 struct timespec result = { secs.count(), nanos.count() };
95 FutexResult nativeFutexWaitImpl(void* addr,
97 time_point<system_clock>* absSystemTime,
98 time_point<steady_clock>* absSteadyTime,
100 assert(absSystemTime == nullptr || absSteadyTime == nullptr);
102 int op = FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG;
104 struct timespec* timeout = nullptr;
106 if (absSystemTime != nullptr) {
107 op |= FUTEX_CLOCK_REALTIME;
108 ts = timeSpecFromTimePoint(*absSystemTime);
110 } else if (absSteadyTime != nullptr) {
111 ts = timeSpecFromTimePoint(*absSteadyTime);
115 // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET requires an absolute timeout
116 // value - http://locklessinc.com/articles/futex_cheat_sheet/
117 int rv = syscall(__NR_futex,
121 timeout, /* timeout */
123 waitMask); /* val3 */
126 return FutexResult::AWOKEN;
130 assert(timeout != nullptr);
131 return FutexResult::TIMEDOUT;
133 return FutexResult::INTERRUPTED;
135 return FutexResult::VALUE_CHANGED;
138 // EINVAL, EACCESS, or EFAULT. EINVAL means there was an invalid
139 // op (should be impossible) or an invalid timeout (should have
140 // been sanitized by timeSpecFromTimePoint). EACCESS or EFAULT
141 // means *addr points to invalid memory, which is unlikely because
142 // the caller should have segfaulted already. We can either
143 // crash, or return a value that lets the process continue for
144 // a bit. We choose the latter. VALUE_CHANGED probably turns the
145 // caller into a spin lock.
146 return FutexResult::VALUE_CHANGED;
153 ///////////////////////////////////////////////////////
154 // compatibility implementation using standard C++ API
156 // Our emulated futex uses 4096 lists of wait nodes. There are two levels
157 // of locking: a per-list mutex that controls access to the list and a
158 // per-node mutex, condvar, and bool that are used for the actual wakeups.
159 // The per-node mutex allows us to do precise wakeups without thundering
162 struct EmulatedFutexWaitNode : public boost::intrusive::list_base_hook<> {
164 const uint32_t waitMask_;
166 // tricky: hold both bucket and node mutex to write, either to read
169 std::condition_variable cond_;
171 EmulatedFutexWaitNode(void* addr, uint32_t waitMask)
173 , waitMask_(waitMask)
179 struct EmulatedFutexBucket {
181 boost::intrusive::list<EmulatedFutexWaitNode> waiters_;
183 static const size_t kNumBuckets = 4096;
184 static EmulatedFutexBucket* gBuckets;
185 static std::once_flag gBucketInit;
187 static EmulatedFutexBucket& bucketFor(void* addr) {
188 std::call_once(gBucketInit, [](){
189 gBuckets = new EmulatedFutexBucket[kNumBuckets];
191 uint64_t mixedBits = folly::hash::twang_mix64(
192 reinterpret_cast<uintptr_t>(addr));
193 return gBuckets[mixedBits % kNumBuckets];
197 EmulatedFutexBucket* EmulatedFutexBucket::gBuckets;
198 std::once_flag EmulatedFutexBucket::gBucketInit;
200 int emulatedFutexWake(void* addr, int count, uint32_t waitMask) {
201 auto& bucket = EmulatedFutexBucket::bucketFor(addr);
202 std::unique_lock<std::mutex> bucketLock(bucket.mutex_);
205 for (auto iter = bucket.waiters_.begin();
206 numAwoken < count && iter != bucket.waiters_.end(); ) {
208 auto& node = *iter++;
209 if (node.addr_ == addr && (node.waitMask_ & waitMask) != 0) {
212 // we unlink, but waiter destroys the node
213 bucket.waiters_.erase(current);
215 std::unique_lock<std::mutex> nodeLock(node.mutex_);
216 node.signaled_ = true;
217 node.cond_.notify_one();
223 FutexResult emulatedFutexWaitImpl(
226 time_point<system_clock>* absSystemTime,
227 time_point<steady_clock>* absSteadyTime,
229 auto& bucket = EmulatedFutexBucket::bucketFor(addr);
230 EmulatedFutexWaitNode node(addr, waitMask);
233 std::unique_lock<std::mutex> bucketLock(bucket.mutex_);
236 memcpy(&actual, addr, sizeof(uint32_t));
237 if (actual != expected) {
238 return FutexResult::VALUE_CHANGED;
241 bucket.waiters_.push_back(node);
242 } // bucketLock scope
244 std::cv_status status = std::cv_status::no_timeout;
246 std::unique_lock<std::mutex> nodeLock(node.mutex_);
247 while (!node.signaled_ && status != std::cv_status::timeout) {
248 if (absSystemTime != nullptr) {
249 status = node.cond_.wait_until(nodeLock, *absSystemTime);
250 } else if (absSteadyTime != nullptr) {
251 status = node.cond_.wait_until(nodeLock, *absSteadyTime);
253 node.cond_.wait(nodeLock);
258 if (status == std::cv_status::timeout) {
259 // it's not really a timeout until we unlink the unsignaled node
260 std::unique_lock<std::mutex> bucketLock(bucket.mutex_);
261 if (!node.signaled_) {
262 bucket.waiters_.erase(bucket.waiters_.iterator_to(node));
263 return FutexResult::TIMEDOUT;
266 return FutexResult::AWOKEN;
272 /////////////////////////////////
273 // Futex<> specializations
277 Futex<std::atomic>::futexWake(int count, uint32_t wakeMask) {
279 return nativeFutexWake(this, count, wakeMask);
281 return emulatedFutexWake(this, count, wakeMask);
287 Futex<EmulatedFutexAtomic>::futexWake(int count, uint32_t wakeMask) {
288 return emulatedFutexWake(this, count, wakeMask);
293 Futex<std::atomic>::futexWaitImpl(uint32_t expected,
294 time_point<system_clock>* absSystemTime,
295 time_point<steady_clock>* absSteadyTime,
298 return nativeFutexWaitImpl(
299 this, expected, absSystemTime, absSteadyTime, waitMask);
301 return emulatedFutexWaitImpl(
302 this, expected, absSystemTime, absSteadyTime, waitMask);
308 Futex<EmulatedFutexAtomic>::futexWaitImpl(
310 time_point<system_clock>* absSystemTime,
311 time_point<steady_clock>* absSteadyTime,
313 return emulatedFutexWaitImpl(
314 this, expected, absSystemTime, absSteadyTime, waitMask);
317 }} // namespace folly::detail