#include <stdexcept>
#include <vector>
-#include <folly/Optional.h>
+#include <folly/Executor.h>
+#include <folly/Function.h>
#include <folly/MicroSpinLock.h>
-
-#include <folly/futures/Try.h>
-#include <folly/futures/Promise.h>
+#include <folly/Optional.h>
#include <folly/futures/Future.h>
-#include <folly/Executor.h>
+#include <folly/futures/Promise.h>
+#include <folly/futures/Try.h>
#include <folly/futures/detail/FSM.h>
#include <folly/io/async/Request.h>
Core(Core&&) noexcept = delete;
Core& operator=(Core&&) = delete;
+ // Core is assumed to be convertible only if the type is convertible
+ // and the size is the same. This is a compromise for the complexity
+ // of having to make Core truly have a conversion constructor which
+ // would cause various other problems.
+ // If we made Core move constructible then we would need to update the
+ // Promise and Future with the location of the new Core. This is complex
+ // and may be inefficient.
+ // Core should only be modified so that for size(T) == size(U),
+ // sizeof(Core<T>) == size(Core<U>).
+ // This assumption is used as a proxy to make sure that
+ // the members of Core<T> and Core<U> line up so that we can use a
+ // reinterpret cast.
+ template <
+ class U,
+ typename = typename std::enable_if<std::is_convertible<U, T>::value &&
+ sizeof(U) == sizeof(T)>::type>
+ static Core<T>* convert(Core<U>* from) {
+ return reinterpret_cast<Core<T>*>(from);
+ }
+
/// May call from any thread
bool hasResult() const {
switch (fsm_.getState()) {
}
}
- template <typename F>
- class LambdaBufHelper {
- public:
- template <typename FF>
- explicit LambdaBufHelper(FF&& func) : func_(std::forward<FF>(func)) {}
- void operator()(Try<T>&& t) {
- SCOPE_EXIT { this->~LambdaBufHelper(); };
- func_(std::move(t));
- }
- private:
- F func_;
- };
-
/// Call only from Future thread.
template <typename F>
- void setCallback(F func) {
+ void setCallback(F&& func) {
bool transitionToArmed = false;
auto setCallback_ = [&]{
context_ = RequestContext::saveContext();
-
- // Move the lambda into the Core if it fits
- if (sizeof(LambdaBufHelper<F>) <= lambdaBufSize) {
- auto funcLoc = reinterpret_cast<LambdaBufHelper<F>*>(&lambdaBuf_);
- new (funcLoc) LambdaBufHelper<F>(std::forward<F>(func));
- callback_ = std::ref(*funcLoc);
- } else {
- callback_ = std::move(func);
- }
+ callback_ = std::forward<F>(func);
};
FSM_START(fsm_)
executorLock_.unlock();
}
+ // keep Core alive until callback did its thing
+ ++attached_;
+
if (x) {
- // keep Core alive until executor did its thing
- ++attached_;
try {
if (LIKELY(x->getNumPriorities() == 1)) {
x->add([this]() mutable {
callback_(std::move(*result_));
}
} else {
+ SCOPE_EXIT { detachOne(); };
RequestContext::setContext(context_);
SCOPE_EXIT { callback_ = {}; };
callback_(std::move(*result_));
}
}
- // lambdaBuf occupies exactly one cache line
- static constexpr size_t lambdaBufSize = 8 * sizeof(void*);
- typename std::aligned_storage<lambdaBufSize>::type lambdaBuf_;
+ // Core should only be modified so that for size(T) == size(U),
+ // sizeof(Core<T>) == size(Core<U>).
+ // See Core::convert for details.
+
+ folly::Function<
+ void(Try<T>&&),
+ folly::FunctionMoveCtor::MAY_THROW,
+ 8 * sizeof(void*)>
+ callback_;
// place result_ next to increase the likelihood that the value will be
// contained entirely in one cache line
folly::Optional<Try<T>> result_;
- std::function<void(Try<T>&&)> callback_ {nullptr};
FSM<State> fsm_;
std::atomic<unsigned char> attached_;
std::atomic<bool> active_ {true};