/*
- * Copyright 2015 Facebook, Inc.
+ * Copyright 2014-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <atomic>
#include <mutex>
#include <stdexcept>
+#include <utility>
#include <vector>
-#include <folly/Optional.h>
-#include <folly/SmallLocks.h>
-
-#include <folly/futures/Try.h>
-#include <folly/futures/Promise.h>
-#include <folly/futures/Future.h>
#include <folly/Executor.h>
+#include <folly/Function.h>
+#include <folly/MicroSpinLock.h>
+#include <folly/Optional.h>
+#include <folly/ScopeGuard.h>
+#include <folly/Try.h>
+#include <folly/Utility.h>
+#include <folly/futures/FutureException.h>
#include <folly/futures/detail/FSM.h>
+#include <folly/portability/BitsFunctexcept.h>
#include <folly/io/async/Request.h>
-namespace folly { namespace detail {
+namespace folly {
+namespace futures {
+namespace detail {
/*
OnlyCallback
/// first blush, but it's the same principle. In general, as long as the user
/// doesn't access a Future or Promise object from more than one thread at a
/// time there won't be any problems.
-template<typename T>
-class Core {
+template <typename T>
+class Core final {
+ static_assert(!std::is_void<T>::value,
+ "void futures are not supported. Use Unit instead.");
public:
/// This must be heap-constructed. There's probably a way to enforce that in
/// code but since this is just internal detail code and I don't know how
/// off-hand, I'm punting.
- Core() {}
+ Core() : result_(), fsm_(State::Start), attached_(2) {}
explicit Core(Try<T>&& t)
: result_(std::move(t)),
fsm_(State::OnlyResult),
attached_(1) {}
+ template <typename... Args>
+ explicit Core(in_place_t, Args&&... args) noexcept(
+ std::is_nothrow_constructible<T, Args&&...>::value)
+ : result_(in_place, in_place, std::forward<Args>(args)...),
+ fsm_(State::OnlyResult),
+ attached_(1) {}
+
~Core() {
DCHECK(attached_ == 0);
}
Core& operator=(Core&&) = delete;
/// May call from any thread
- bool hasResult() const {
+ bool hasResult() const noexcept {
switch (fsm_.getState()) {
case State::OnlyResult:
case State::Armed:
}
/// May call from any thread
- bool ready() const {
+ bool ready() const noexcept {
return hasResult();
}
if (ready()) {
return *result_;
} else {
- throw FutureNotReady();
+ throwFutureNotReady();
}
}
- template <typename F>
- class LambdaBufHelper {
- public:
- explicit LambdaBufHelper(F&& func) : func_(std::forward<F>(func)) {}
- void operator()(Try<T>&& t) {
- SCOPE_EXIT { this->~LambdaBufHelper(); };
- func_(std::move(t));
- }
- private:
- F func_;
- };
-
/// Call only from Future thread.
template <typename F>
- void setCallback(F func) {
+ void setCallback(F&& func) {
bool transitionToArmed = false;
auto setCallback_ = [&]{
context_ = RequestContext::saveContext();
-
- // Move the lambda into the Core if it fits
- if (sizeof(LambdaBufHelper<F>) <= lambdaBufSize) {
- auto funcLoc = static_cast<LambdaBufHelper<F>*>((void*)lambdaBuf_);
- new (funcLoc) LambdaBufHelper<F>(std::forward<F>(func));
- callback_ = std::ref(*funcLoc);
- } else {
- callback_ = std::move(func);
- }
+ callback_ = std::forward<F>(func);
};
FSM_START(fsm_)
case State::OnlyCallback:
case State::Armed:
case State::Done:
- throw std::logic_error("setCallback called twice");
+ std::__throw_logic_error("setCallback called twice");
FSM_END
// we could always call this, it is an optimization to only call it when
case State::OnlyResult:
case State::Armed:
case State::Done:
- throw std::logic_error("setResult called twice");
+ std::__throw_logic_error("setResult called twice");
FSM_END
if (transitionToArmed) {
// detachPromise() and setResult() should never be called in parallel
// so we don't need to protect this.
if (UNLIKELY(!result_)) {
- setResult(Try<T>(exception_wrapper(BrokenPromise())));
+ setResult(Try<T>(exception_wrapper(BrokenPromise(typeid(T).name()))));
}
detachOne();
}
interruptLock_.lock();
}
if (!interrupt_ && !hasResult()) {
- interrupt_ = folly::make_unique<exception_wrapper>(std::move(e));
+ interrupt_ = std::make_unique<exception_wrapper>(std::move(e));
if (interruptHandler_) {
interruptHandler_(*interrupt_);
}
interruptHandler_ = std::move(fn);
}
- protected:
+ private:
+ // Helper class that stores a pointer to the `Core` object and calls
+ // `derefCallback` and `detachOne` in the destructor.
+ class CoreAndCallbackReference {
+ public:
+ explicit CoreAndCallbackReference(Core* core) noexcept : core_(core) {}
+
+ ~CoreAndCallbackReference() {
+ if (core_) {
+ core_->derefCallback();
+ core_->detachOne();
+ }
+ }
+
+ CoreAndCallbackReference(CoreAndCallbackReference const& o) = delete;
+ CoreAndCallbackReference& operator=(CoreAndCallbackReference const& o) =
+ delete;
+
+ CoreAndCallbackReference(CoreAndCallbackReference&& o) noexcept {
+ std::swap(core_, o.core_);
+ }
+
+ Core* getCore() const noexcept {
+ return core_;
+ }
+
+ private:
+ Core* core_{nullptr};
+ };
+
void maybeCallback() {
FSM_START(fsm_)
case State::Armed:
}
void doCallback() {
- RequestContext::setContext(context_);
-
Executor* x = executor_;
- int8_t priority;
+ // initialize, solely to appease clang's -Wconditional-uninitialized
+ int8_t priority = 0;
if (x) {
if (!executorLock_.try_lock()) {
executorLock_.lock();
}
if (x) {
- // keep Core alive until executor did its thing
- ++attached_;
+ exception_wrapper ew;
+ // We need to reset `callback_` after it was executed (which can happen
+ // through the executor or, if `Executor::add` throws, below). The
+ // executor might discard the function without executing it (now or
+ // later), in which case `callback_` also needs to be reset.
+ // The `Core` has to be kept alive throughout that time, too. Hence we
+ // increment `attached_` and `callbackReferences_` by two, and construct
+ // exactly two `CoreAndCallbackReference` objects, which call
+ // `derefCallback` and `detachOne` in their destructor. One will guard
+ // this scope, the other one will guard the lambda passed to the executor.
+ attached_ += 2;
+ callbackReferences_ += 2;
+ CoreAndCallbackReference guard_local_scope(this);
+ CoreAndCallbackReference guard_lambda(this);
try {
if (LIKELY(x->getNumPriorities() == 1)) {
- x->add([this]() mutable {
- SCOPE_EXIT { detachOne(); };
- callback_(std::move(*result_));
+ x->add([core_ref = std::move(guard_lambda)]() mutable {
+ auto cr = std::move(core_ref);
+ Core* const core = cr.getCore();
+ RequestContextScopeGuard rctx(core->context_);
+ core->callback_(std::move(*core->result_));
});
} else {
- x->addWithPriority([this]() mutable {
- SCOPE_EXIT { detachOne(); };
- callback_(std::move(*result_));
- }, priority);
+ x->addWithPriority(
+ [core_ref = std::move(guard_lambda)]() mutable {
+ auto cr = std::move(core_ref);
+ Core* const core = cr.getCore();
+ RequestContextScopeGuard rctx(core->context_);
+ core->callback_(std::move(*core->result_));
+ },
+ priority);
}
+ } catch (const std::exception& e) {
+ ew = exception_wrapper(std::current_exception(), e);
} catch (...) {
- result_ = Try<T>(exception_wrapper(std::current_exception()));
+ ew = exception_wrapper(std::current_exception());
+ }
+ if (ew) {
+ RequestContextScopeGuard rctx(context_);
+ result_ = Try<T>(std::move(ew));
callback_(std::move(*result_));
}
} else {
+ attached_++;
+ SCOPE_EXIT {
+ callback_ = {};
+ detachOne();
+ };
+ RequestContextScopeGuard rctx(context_);
callback_(std::move(*result_));
}
}
void detachOne() {
- auto a = --attached_;
- assert(a >= 0);
- assert(a <= 2);
- if (a == 0) {
+ auto a = attached_--;
+ assert(a >= 1);
+ if (a == 1) {
delete this;
}
}
- // lambdaBuf occupies exactly one cache line
- static constexpr size_t lambdaBufSize = 8 * sizeof(void*);
- char lambdaBuf_[lambdaBufSize];
+ void derefCallback() {
+ if (--callbackReferences_ == 0) {
+ callback_ = {};
+ }
+ }
+
+ folly::Function<void(Try<T>&&)> callback_;
// place result_ next to increase the likelihood that the value will be
// contained entirely in one cache line
- folly::Optional<Try<T>> result_ {};
- std::function<void(Try<T>&&)> callback_ {nullptr};
- FSM<State> fsm_ {State::Start};
- std::atomic<unsigned char> attached_ {2};
+ folly::Optional<Try<T>> result_;
+ FSM<State> fsm_;
+ std::atomic<unsigned char> attached_;
+ std::atomic<unsigned char> callbackReferences_{0};
std::atomic<bool> active_ {true};
std::atomic<bool> interruptHandlerSet_ {false};
folly::MicroSpinLock interruptLock_ {0};
std::function<void(exception_wrapper const&)> interruptHandler_ {nullptr};
};
-template <typename... Ts>
-struct CollectAllVariadicContext {
- CollectAllVariadicContext() {}
- template <typename T, size_t I>
- inline void setPartialResult(Try<T>& t) {
- std::get<I>(results) = std::move(t);
- }
- ~CollectAllVariadicContext() {
- p.setValue(std::move(results));
- }
- Promise<std::tuple<Try<Ts>...>> p;
- std::tuple<Try<Ts>...> results;
- typedef Future<std::tuple<Try<Ts>...>> type;
-};
-
-template <typename... Ts>
-struct CollectVariadicContext {
- CollectVariadicContext() {}
- template <typename T, size_t I>
- inline void setPartialResult(Try<T>& t) {
- if (t.hasException()) {
- if (!threw.exchange(true)) {
- p.setException(std::move(t.exception()));
- }
- } else if (!threw) {
- std::get<I>(results) = std::move(t.value());
- }
- }
- ~CollectVariadicContext() {
- if (!threw.exchange(true)) {
- p.setValue(std::move(results));
- }
- }
- Promise<std::tuple<Ts...>> p;
- std::tuple<Ts...> results;
- std::atomic<bool> threw;
- typedef Future<std::tuple<Ts...>> type;
-};
-
-template <template <typename ...> class T, typename... Ts>
-void collectVariadicHelper(const std::shared_ptr<T<Ts...>>& ctx) {
+template <template <typename...> class T, typename... Ts>
+void collectVariadicHelper(const std::shared_ptr<T<Ts...>>& /* ctx */) {
// base case
}
-template <template <typename ...> class T, typename... Ts,
- typename THead, typename... TTail>
+template <
+ template <typename...> class T,
+ typename... Ts,
+ typename THead,
+ typename... TTail>
void collectVariadicHelper(const std::shared_ptr<T<Ts...>>& ctx,
THead&& head, TTail&&... tail) {
- head.setCallback_([ctx](Try<typename THead::value_type>&& t) {
- ctx->template setPartialResult<typename THead::value_type,
- sizeof...(Ts) - sizeof...(TTail) - 1>(t);
+ using ValueType = typename std::decay<THead>::type::value_type;
+ std::forward<THead>(head).setCallback_([ctx](Try<ValueType>&& t) {
+ ctx->template setPartialResult<
+ ValueType,
+ sizeof...(Ts) - sizeof...(TTail)-1>(t);
});
// template tail-recursion
collectVariadicHelper(ctx, std::forward<TTail>(tail)...);
}
-}} // folly::detail
+} // namespace detail
+} // namespace futures
+} // namespace folly