2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
24 #include <folly/Executor.h>
25 #include <folly/Function.h>
26 #include <folly/MicroSpinLock.h>
27 #include <folly/Optional.h>
28 #include <folly/ScopeGuard.h>
29 #include <folly/Try.h>
30 #include <folly/futures/Future.h>
31 #include <folly/futures/Promise.h>
32 #include <folly/futures/detail/FSM.h>
34 #include <folly/io/async/Request.h>
36 namespace folly { namespace detail {
45 This state machine is fairly self-explanatory. The most important bit is
46 that the callback is only executed on the transition from Armed to Done,
47 and that transition can happen immediately after transitioning from Only*
48 to Armed, if it is active (the usual case).
50 enum class State : uint8_t {
58 /// The shared state object for Future and Promise.
59 /// Some methods must only be called by either the Future thread or the
60 /// Promise thread. The Future thread is the thread that currently "owns" the
61 /// Future and its callback-related operations, and the Promise thread is
62 /// likewise the thread that currently "owns" the Promise and its
63 /// result-related operations. Also, Futures own interruption, Promises own
64 /// interrupt handlers. Unfortunately, there are things that users can do to
65 /// break this, and we can't detect that. However if they follow move
66 /// semantics religiously wrt threading, they should be ok.
68 /// It's worth pointing out that Futures and/or Promises can and usually will
69 /// migrate between threads, though this usually happens within the API code.
70 /// For example, an async operation will probably make a Promise, grab its
71 /// Future, then move the Promise into another thread that will eventually
72 /// fulfill it. With executors and via, this gets slightly more complicated at
73 /// first blush, but it's the same principle. In general, as long as the user
74 /// doesn't access a Future or Promise object from more than one thread at a
75 /// time there won't be any problems.
78 static_assert(!std::is_void<T>::value,
79 "void futures are not supported. Use Unit instead.");
81 /// This must be heap-constructed. There's probably a way to enforce that in
82 /// code but since this is just internal detail code and I don't know how
83 /// off-hand, I'm punting.
84 Core() : result_(), fsm_(State::Start), attached_(2) {}
86 explicit Core(Try<T>&& t)
87 : result_(std::move(t)),
88 fsm_(State::OnlyResult),
92 DCHECK(attached_ == 0);
96 Core(Core const&) = delete;
97 Core& operator=(Core const&) = delete;
99 // not movable (see comment in the implementation of Future::then)
100 Core(Core&&) noexcept = delete;
101 Core& operator=(Core&&) = delete;
103 // Core is assumed to be convertible only if the type is convertible
104 // and the size is the same. This is a compromise for the complexity
105 // of having to make Core truly have a conversion constructor which
106 // would cause various other problems.
107 // If we made Core move constructible then we would need to update the
108 // Promise and Future with the location of the new Core. This is complex
109 // and may be inefficient.
110 // Core should only be modified so that for size(T) == size(U),
111 // sizeof(Core<T>) == size(Core<U>).
112 // This assumption is used as a proxy to make sure that
113 // the members of Core<T> and Core<U> line up so that we can use a
117 typename = typename std::enable_if<std::is_convertible<U, T>::value &&
118 sizeof(U) == sizeof(T)>::type>
119 static Core<T>* convert(Core<U>* from) {
120 return reinterpret_cast<Core<T>*>(from);
123 /// May call from any thread
124 bool hasResult() const {
125 switch (fsm_.getState()) {
126 case State::OnlyResult:
137 /// May call from any thread
142 /// May call from any thread
147 throw FutureNotReady();
151 /// Call only from Future thread.
152 template <typename F>
153 void setCallback(F&& func) {
154 bool transitionToArmed = false;
155 auto setCallback_ = [&]{
156 context_ = RequestContext::saveContext();
157 callback_ = std::forward<F>(func);
162 FSM_UPDATE(fsm_, State::OnlyCallback, setCallback_);
165 case State::OnlyResult:
166 FSM_UPDATE(fsm_, State::Armed, setCallback_);
167 transitionToArmed = true;
170 case State::OnlyCallback:
173 throw std::logic_error("setCallback called twice");
176 // we could always call this, it is an optimization to only call it when
177 // it might be needed.
178 if (transitionToArmed) {
183 /// Call only from Promise thread
184 void setResult(Try<T>&& t) {
185 bool transitionToArmed = false;
186 auto setResult_ = [&]{ result_ = std::move(t); };
189 FSM_UPDATE(fsm_, State::OnlyResult, setResult_);
192 case State::OnlyCallback:
193 FSM_UPDATE(fsm_, State::Armed, setResult_);
194 transitionToArmed = true;
197 case State::OnlyResult:
200 throw std::logic_error("setResult called twice");
203 if (transitionToArmed) {
208 /// Called by a destructing Future (in the Future thread, by definition)
209 void detachFuture() {
214 /// Called by a destructing Promise (in the Promise thread, by definition)
215 void detachPromise() {
216 // detachPromise() and setResult() should never be called in parallel
217 // so we don't need to protect this.
218 if (UNLIKELY(!result_)) {
219 setResult(Try<T>(exception_wrapper(BrokenPromise(typeid(T).name()))));
224 /// May call from any thread
226 active_.store(false, std::memory_order_release);
229 /// May call from any thread
231 active_.store(true, std::memory_order_release);
235 /// May call from any thread
236 bool isActive() { return active_.load(std::memory_order_acquire); }
238 /// Call only from Future thread
239 void setExecutor(Executor* x, int8_t priority = Executor::MID_PRI) {
240 if (!executorLock_.try_lock()) {
241 executorLock_.lock();
244 priority_ = priority;
245 executorLock_.unlock();
248 void setExecutorNoLock(Executor* x, int8_t priority = Executor::MID_PRI) {
250 priority_ = priority;
253 Executor* getExecutor() {
257 /// Call only from Future thread
258 void raise(exception_wrapper e) {
259 if (!interruptLock_.try_lock()) {
260 interruptLock_.lock();
262 if (!interrupt_ && !hasResult()) {
263 interrupt_ = folly::make_unique<exception_wrapper>(std::move(e));
264 if (interruptHandler_) {
265 interruptHandler_(*interrupt_);
268 interruptLock_.unlock();
271 std::function<void(exception_wrapper const&)> getInterruptHandler() {
272 if (!interruptHandlerSet_.load(std::memory_order_acquire)) {
275 if (!interruptLock_.try_lock()) {
276 interruptLock_.lock();
278 auto handler = interruptHandler_;
279 interruptLock_.unlock();
283 /// Call only from Promise thread
284 void setInterruptHandler(std::function<void(exception_wrapper const&)> fn) {
285 if (!interruptLock_.try_lock()) {
286 interruptLock_.lock();
292 setInterruptHandlerNoLock(std::move(fn));
295 interruptLock_.unlock();
298 void setInterruptHandlerNoLock(
299 std::function<void(exception_wrapper const&)> fn) {
300 interruptHandlerSet_.store(true, std::memory_order_relaxed);
301 interruptHandler_ = std::move(fn);
305 class CountedReference {
307 ~CountedReference() {
314 explicit CountedReference(Core* core) noexcept : core_(core) {
315 // do not construct a CountedReference from nullptr!
321 // CountedReference must be copy-constructable as long as
322 // folly::Executor::add takes a std::function
323 CountedReference(CountedReference const& o) noexcept : core_(o.core_) {
329 CountedReference& operator=(CountedReference const& o) noexcept {
331 new (this) CountedReference(o);
335 CountedReference(CountedReference&& o) noexcept {
336 std::swap(core_, o.core_);
339 CountedReference& operator=(CountedReference&& o) noexcept {
341 new (this) CountedReference(std::move(o));
345 Core* getCore() const noexcept {
350 Core* core_{nullptr};
353 void maybeCallback() {
356 if (active_.load(std::memory_order_acquire)) {
357 FSM_UPDATE2(fsm_, State::Done, []{}, [this]{ this->doCallback(); });
367 Executor* x = executor_;
370 if (!executorLock_.try_lock()) {
371 executorLock_.lock();
374 priority = priority_;
375 executorLock_.unlock();
380 if (LIKELY(x->getNumPriorities() == 1)) {
381 x->add([core_ref = CountedReference(this)]() mutable {
382 auto cr = std::move(core_ref);
383 Core* const core = cr.getCore();
384 RequestContextScopeGuard rctx(core->context_);
385 SCOPE_EXIT { core->callback_ = {}; };
386 core->callback_(std::move(*core->result_));
389 x->addWithPriority([core_ref = CountedReference(this)]() mutable {
390 auto cr = std::move(core_ref);
391 Core* const core = cr.getCore();
392 RequestContextScopeGuard rctx(core->context_);
393 SCOPE_EXIT { core->callback_ = {}; };
394 core->callback_(std::move(*core->result_));
398 CountedReference core_ref(this);
399 RequestContextScopeGuard rctx(context_);
400 result_ = Try<T>(exception_wrapper(std::current_exception()));
401 SCOPE_EXIT { callback_ = {}; };
402 callback_(std::move(*result_));
405 CountedReference core_ref(this);
406 RequestContextScopeGuard rctx(context_);
407 SCOPE_EXIT { callback_ = {}; };
408 callback_(std::move(*result_));
413 auto a = attached_--;
420 // Core should only be modified so that for size(T) == size(U),
421 // sizeof(Core<T>) == size(Core<U>).
422 // See Core::convert for details.
424 folly::Function<void(Try<T>&&)> callback_;
425 // place result_ next to increase the likelihood that the value will be
426 // contained entirely in one cache line
427 folly::Optional<Try<T>> result_;
429 std::atomic<unsigned char> attached_;
430 std::atomic<bool> active_ {true};
431 std::atomic<bool> interruptHandlerSet_ {false};
432 folly::MicroSpinLock interruptLock_ {0};
433 folly::MicroSpinLock executorLock_ {0};
434 int8_t priority_ {-1};
435 Executor* executor_ {nullptr};
436 std::shared_ptr<RequestContext> context_ {nullptr};
437 std::unique_ptr<exception_wrapper> interrupt_ {};
438 std::function<void(exception_wrapper const&)> interruptHandler_ {nullptr};
441 template <typename... Ts>
442 struct CollectAllVariadicContext {
443 CollectAllVariadicContext() {}
444 template <typename T, size_t I>
445 inline void setPartialResult(Try<T>& t) {
446 std::get<I>(results) = std::move(t);
448 ~CollectAllVariadicContext() {
449 p.setValue(std::move(results));
451 Promise<std::tuple<Try<Ts>...>> p;
452 std::tuple<Try<Ts>...> results;
453 typedef Future<std::tuple<Try<Ts>...>> type;
456 template <typename... Ts>
457 struct CollectVariadicContext {
458 CollectVariadicContext() {}
459 template <typename T, size_t I>
460 inline void setPartialResult(Try<T>& t) {
461 if (t.hasException()) {
462 if (!threw.exchange(true)) {
463 p.setException(std::move(t.exception()));
466 std::get<I>(results) = std::move(t);
469 ~CollectVariadicContext() {
470 if (!threw.exchange(true)) {
471 p.setValue(unwrap(std::move(results)));
474 Promise<std::tuple<Ts...>> p;
475 std::tuple<folly::Try<Ts>...> results;
476 std::atomic<bool> threw {false};
477 typedef Future<std::tuple<Ts...>> type;
480 template <typename... Ts2>
481 static std::tuple<Ts...> unwrap(std::tuple<folly::Try<Ts>...>&& o,
483 static_assert(sizeof...(ts2) <
484 std::tuple_size<std::tuple<folly::Try<Ts>...>>::value,
485 "Non-templated unwrap should be used instead");
486 assert(std::get<sizeof...(ts2)>(o).hasValue());
488 return unwrap(std::move(o),
489 std::forward<Ts2>(ts2)...,
490 std::move(*std::get<sizeof...(ts2)>(o)));
493 static std::tuple<Ts...> unwrap(std::tuple<folly::Try<Ts>...>&& /* o */,
495 return std::tuple<Ts...>(std::forward<Ts>(ts)...);
499 template <template <typename...> class T, typename... Ts>
500 void collectVariadicHelper(const std::shared_ptr<T<Ts...>>& /* ctx */) {
504 template <template <typename ...> class T, typename... Ts,
505 typename THead, typename... TTail>
506 void collectVariadicHelper(const std::shared_ptr<T<Ts...>>& ctx,
507 THead&& head, TTail&&... tail) {
508 head.setCallback_([ctx](Try<typename THead::value_type>&& t) {
509 ctx->template setPartialResult<typename THead::value_type,
510 sizeof...(Ts) - sizeof...(TTail) - 1>(t);
512 // template tail-recursion
513 collectVariadicHelper(ctx, std::forward<TTail>(tail)...);