/*
- * Copyright 2012 Facebook, Inc.
+ * Copyright 2012-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* limitations under the License.
*/
-#ifndef FOLLY_EXPERIMENTAL_EVENTCOUNT_H_
-#define FOLLY_EXPERIMENTAL_EVENTCOUNT_H_
+#pragma once
-#include <syscall.h>
-#include <linux/futex.h>
-#include <sys/time.h>
-#include <climits>
#include <atomic>
+#include <climits>
#include <thread>
+#include <glog/logging.h>
-namespace folly {
-
-namespace detail {
+#include <folly/Likely.h>
+#include <folly/detail/Futex.h>
+#include <folly/lang/Bits.h>
+#include <folly/portability/SysTime.h>
+#include <folly/portability/Unistd.h>
-int futex(int* uaddr, int op, int val, const struct timespec* timeout,
- int* uaddr2, int val3) noexcept {
- return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
-}
-
-} // namespace detail
+namespace folly {
/**
* Event count: a condition variable for lock free algorithms.
*/
class EventCount {
public:
- EventCount() noexcept : epoch_(0), waiters_(0) { }
+ EventCount() noexcept : val_(0) { }
class Key {
friend class EventCount;
- explicit Key(int e) noexcept : epoch_(e) { }
- int epoch_;
+ explicit Key(uint32_t e) noexcept : epoch_(e) { }
+ uint32_t epoch_;
};
void notify() noexcept;
EventCount& operator=(const EventCount&) = delete;
EventCount& operator=(EventCount&&) = delete;
- std::atomic<int> epoch_;
- std::atomic<int> waiters_;
+ // This requires 64-bit
+ static_assert(sizeof(int) == 4, "bad platform");
+ static_assert(sizeof(uint32_t) == 4, "bad platform");
+ static_assert(sizeof(uint64_t) == 8, "bad platform");
+ static_assert(sizeof(std::atomic<uint64_t>) == 8, "bad platform");
+ static_assert(sizeof(detail::Futex<std::atomic>) == 4, "bad platform");
+
+ static constexpr size_t kEpochOffset = kIsLittleEndian ? 1 : 0;
+
+ // val_ stores the epoch in the most significant 32 bits and the
+ // waiter count in the least significant 32 bits.
+ std::atomic<uint64_t> val_;
+
+ static constexpr uint64_t kAddWaiter = uint64_t(1);
+ static constexpr uint64_t kSubWaiter = uint64_t(-1);
+ static constexpr size_t kEpochShift = 32;
+ static constexpr uint64_t kAddEpoch = uint64_t(1) << kEpochShift;
+ static constexpr uint64_t kWaiterMask = kAddEpoch - 1;
};
inline void EventCount::notify() noexcept {
}
inline void EventCount::doNotify(int n) noexcept {
- // The order is important: epoch_ is incremented before waiters_ is checked.
- // prepareWait() increments waiters_ before checking epoch_, so it is
- // impossible to miss a wakeup.
- ++epoch_;
- if (waiters_ != 0) {
- detail::futex(reinterpret_cast<int*>(&epoch_), FUTEX_WAKE, n, nullptr,
- nullptr, 0);
+ uint64_t prev = val_.fetch_add(kAddEpoch, std::memory_order_acq_rel);
+ if (UNLIKELY(prev & kWaiterMask)) {
+ (reinterpret_cast<detail::Futex<std::atomic>*>(&val_) + kEpochOffset)
+ ->futexWake(n);
}
}
inline EventCount::Key EventCount::prepareWait() noexcept {
- ++waiters_;
- return Key(epoch_);
+ uint64_t prev = val_.fetch_add(kAddWaiter, std::memory_order_acq_rel);
+ return Key(prev >> kEpochShift);
}
inline void EventCount::cancelWait() noexcept {
- --waiters_;
+ // memory_order_relaxed would suffice for correctness, but the faster
+ // #waiters gets to 0, the less likely it is that we'll do spurious wakeups
+ // (and thus system calls).
+ uint64_t prev = val_.fetch_add(kSubWaiter, std::memory_order_seq_cst);
+ DCHECK_NE((prev & kWaiterMask), 0);
}
inline void EventCount::wait(Key key) noexcept {
- while (epoch_ == key.epoch_) {
- detail::futex(reinterpret_cast<int*>(&epoch_), FUTEX_WAIT, key.epoch_,
- nullptr, nullptr, 0);
+ while ((val_.load(std::memory_order_acquire) >> kEpochShift) == key.epoch_) {
+ (reinterpret_cast<detail::Futex<std::atomic>*>(&val_) + kEpochOffset)
+ ->futexWait(key.epoch_);
}
- --waiters_;
+ // memory_order_relaxed would suffice for correctness, but the faster
+ // #waiters gets to 0, the less likely it is that we'll do spurious wakeups
+ // (and thus system calls)
+ uint64_t prev = val_.fetch_add(kSubWaiter, std::memory_order_seq_cst);
+ DCHECK_NE((prev & kWaiterMask), 0);
}
template <class Condition>
void EventCount::await(Condition condition) {
- if (condition()) return; // fast path
+ if (condition()) {
+ return; // fast path
+ }
// condition() is the only thing that may throw, everything else is
// noexcept, so we can hoist the try/catch block outside of the loop
}
}
-} // namespace folly
-
-#endif /* FOLLY_EXPERIMENTAL_EVENTCOUNT_H_ */
-
+} // namespace folly