2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
24 #include <folly/Optional.h>
25 #include <folly/SmallLocks.h>
27 #include <folly/futures/Try.h>
28 #include <folly/futures/Promise.h>
29 #include <folly/futures/Future.h>
30 #include <folly/Executor.h>
31 #include <folly/futures/detail/FSM.h>
33 #include <folly/io/async/Request.h>
35 namespace folly { namespace detail {
44 This state machine is fairly self-explanatory. The most important bit is
45 that the callback is only executed on the transition from Armed to Done,
46 and that transition can happen immediately after transitioning from Only*
47 to Armed, if it is active (the usual case).
49 enum class State : uint8_t {
57 /// The shared state object for Future and Promise.
58 /// Some methods must only be called by either the Future thread or the
59 /// Promise thread. The Future thread is the thread that currently "owns" the
60 /// Future and its callback-related operations, and the Promise thread is
61 /// likewise the thread that currently "owns" the Promise and its
62 /// result-related operations. Also, Futures own interruption, Promises own
63 /// interrupt handlers. Unfortunately, there are things that users can do to
64 /// break this, and we can't detect that. However if they follow move
65 /// semantics religiously wrt threading, they should be ok.
67 /// It's worth pointing out that Futures and/or Promises can and usually will
68 /// migrate between threads, though this usually happens within the API code.
69 /// For example, an async operation will probably make a Promise, grab its
70 /// Future, then move the Promise into another thread that will eventually
71 /// fulfill it. With executors and via, this gets slightly more complicated at
72 /// first blush, but it's the same principle. In general, as long as the user
73 /// doesn't access a Future or Promise object from more than one thread at a
74 /// time there won't be any problems.
77 static_assert(!std::is_void<T>::value,
78 "void futures are not supported. Use Unit instead.");
80 /// This must be heap-constructed. There's probably a way to enforce that in
81 /// code but since this is just internal detail code and I don't know how
82 /// off-hand, I'm punting.
83 Core() : result_(), fsm_(State::Start), attached_(2) {}
85 explicit Core(Try<T>&& t)
86 : result_(std::move(t)),
87 fsm_(State::OnlyResult),
91 DCHECK(attached_ == 0);
95 Core(Core const&) = delete;
96 Core& operator=(Core const&) = delete;
98 // not movable (see comment in the implementation of Future::then)
99 Core(Core&&) noexcept = delete;
100 Core& operator=(Core&&) = delete;
102 /// May call from any thread
103 bool hasResult() const {
104 switch (fsm_.getState()) {
105 case State::OnlyResult:
116 /// May call from any thread
121 /// May call from any thread
126 throw FutureNotReady();
130 template <typename F>
131 class LambdaBufHelper {
133 explicit LambdaBufHelper(F&& func) : func_(std::forward<F>(func)) {}
134 void operator()(Try<T>&& t) {
135 SCOPE_EXIT { this->~LambdaBufHelper(); };
142 /// Call only from Future thread.
143 template <typename F>
144 void setCallback(F func) {
145 bool transitionToArmed = false;
146 auto setCallback_ = [&]{
147 context_ = RequestContext::saveContext();
149 // Move the lambda into the Core if it fits
150 if (sizeof(LambdaBufHelper<F>) <= lambdaBufSize) {
151 auto funcLoc = static_cast<LambdaBufHelper<F>*>((void*)lambdaBuf_);
152 new (funcLoc) LambdaBufHelper<F>(std::forward<F>(func));
153 callback_ = std::ref(*funcLoc);
155 callback_ = std::move(func);
161 FSM_UPDATE(fsm_, State::OnlyCallback, setCallback_);
164 case State::OnlyResult:
165 FSM_UPDATE(fsm_, State::Armed, setCallback_);
166 transitionToArmed = true;
169 case State::OnlyCallback:
172 throw std::logic_error("setCallback called twice");
175 // we could always call this, it is an optimization to only call it when
176 // it might be needed.
177 if (transitionToArmed) {
182 /// Call only from Promise thread
183 void setResult(Try<T>&& t) {
184 bool transitionToArmed = false;
185 auto setResult_ = [&]{ result_ = std::move(t); };
188 FSM_UPDATE(fsm_, State::OnlyResult, setResult_);
191 case State::OnlyCallback:
192 FSM_UPDATE(fsm_, State::Armed, setResult_);
193 transitionToArmed = true;
196 case State::OnlyResult:
199 throw std::logic_error("setResult called twice");
202 if (transitionToArmed) {
207 /// Called by a destructing Future (in the Future thread, by definition)
208 void detachFuture() {
213 /// Called by a destructing Promise (in the Promise thread, by definition)
214 void detachPromise() {
215 // detachPromise() and setResult() should never be called in parallel
216 // so we don't need to protect this.
217 if (UNLIKELY(!result_)) {
218 setResult(Try<T>(exception_wrapper(BrokenPromise())));
223 /// May call from any thread
225 active_.store(false, std::memory_order_release);
228 /// May call from any thread
230 active_.store(true, std::memory_order_release);
234 /// May call from any thread
235 bool isActive() { return active_.load(std::memory_order_acquire); }
237 /// Call only from Future thread
238 void setExecutor(Executor* x, int8_t priority = Executor::MID_PRI) {
239 if (!executorLock_.try_lock()) {
240 executorLock_.lock();
243 priority_ = priority;
244 executorLock_.unlock();
247 void setExecutorNoLock(Executor* x, int8_t priority = Executor::MID_PRI) {
249 priority_ = priority;
252 Executor* getExecutor() {
256 /// Call only from Future thread
257 void raise(exception_wrapper e) {
258 if (!interruptLock_.try_lock()) {
259 interruptLock_.lock();
261 if (!interrupt_ && !hasResult()) {
262 interrupt_ = folly::make_unique<exception_wrapper>(std::move(e));
263 if (interruptHandler_) {
264 interruptHandler_(*interrupt_);
267 interruptLock_.unlock();
270 std::function<void(exception_wrapper const&)> getInterruptHandler() {
271 if (!interruptHandlerSet_.load(std::memory_order_acquire)) {
274 if (!interruptLock_.try_lock()) {
275 interruptLock_.lock();
277 auto handler = interruptHandler_;
278 interruptLock_.unlock();
282 /// Call only from Promise thread
283 void setInterruptHandler(std::function<void(exception_wrapper const&)> fn) {
284 if (!interruptLock_.try_lock()) {
285 interruptLock_.lock();
291 setInterruptHandlerNoLock(std::move(fn));
294 interruptLock_.unlock();
297 void setInterruptHandlerNoLock(
298 std::function<void(exception_wrapper const&)> fn) {
299 interruptHandlerSet_.store(true, std::memory_order_relaxed);
300 interruptHandler_ = std::move(fn);
304 void maybeCallback() {
307 if (active_.load(std::memory_order_acquire)) {
308 FSM_UPDATE2(fsm_, State::Done, []{}, [this]{ this->doCallback(); });
318 Executor* x = executor_;
321 if (!executorLock_.try_lock()) {
322 executorLock_.lock();
325 priority = priority_;
326 executorLock_.unlock();
330 // keep Core alive until executor did its thing
333 if (LIKELY(x->getNumPriorities() == 1)) {
334 x->add([this]() mutable {
335 SCOPE_EXIT { detachOne(); };
336 RequestContext::setContext(context_);
337 callback_(std::move(*result_));
340 x->addWithPriority([this]() mutable {
341 SCOPE_EXIT { detachOne(); };
342 RequestContext::setContext(context_);
343 callback_(std::move(*result_));
347 --attached_; // Account for extra ++attached_ before try
348 RequestContext::setContext(context_);
349 result_ = Try<T>(exception_wrapper(std::current_exception()));
350 callback_(std::move(*result_));
353 RequestContext::setContext(context_);
354 callback_(std::move(*result_));
359 auto a = --attached_;
367 // lambdaBuf occupies exactly one cache line
368 static constexpr size_t lambdaBufSize = 8 * sizeof(void*);
369 char lambdaBuf_[lambdaBufSize];
370 // place result_ next to increase the likelihood that the value will be
371 // contained entirely in one cache line
372 folly::Optional<Try<T>> result_;
373 std::function<void(Try<T>&&)> callback_ {nullptr};
375 std::atomic<unsigned char> attached_;
376 std::atomic<bool> active_ {true};
377 std::atomic<bool> interruptHandlerSet_ {false};
378 folly::MicroSpinLock interruptLock_ {0};
379 folly::MicroSpinLock executorLock_ {0};
380 int8_t priority_ {-1};
381 Executor* executor_ {nullptr};
382 std::shared_ptr<RequestContext> context_ {nullptr};
383 std::unique_ptr<exception_wrapper> interrupt_ {};
384 std::function<void(exception_wrapper const&)> interruptHandler_ {nullptr};
387 template <typename... Ts>
388 struct CollectAllVariadicContext {
389 CollectAllVariadicContext() {}
390 template <typename T, size_t I>
391 inline void setPartialResult(Try<T>& t) {
392 std::get<I>(results) = std::move(t);
394 ~CollectAllVariadicContext() {
395 p.setValue(std::move(results));
397 Promise<std::tuple<Try<Ts>...>> p;
398 std::tuple<Try<Ts>...> results;
399 typedef Future<std::tuple<Try<Ts>...>> type;
402 template <typename... Ts>
403 struct CollectVariadicContext {
404 CollectVariadicContext() {}
405 template <typename T, size_t I>
406 inline void setPartialResult(Try<T>& t) {
407 if (t.hasException()) {
408 if (!threw.exchange(true)) {
409 p.setException(std::move(t.exception()));
412 std::get<I>(results) = std::move(t.value());
415 ~CollectVariadicContext() {
416 if (!threw.exchange(true)) {
417 p.setValue(std::move(results));
420 Promise<std::tuple<Ts...>> p;
421 std::tuple<Ts...> results;
422 std::atomic<bool> threw {false};
423 typedef Future<std::tuple<Ts...>> type;
426 template <template <typename ...> class T, typename... Ts>
427 void collectVariadicHelper(const std::shared_ptr<T<Ts...>>& ctx) {
431 template <template <typename ...> class T, typename... Ts,
432 typename THead, typename... TTail>
433 void collectVariadicHelper(const std::shared_ptr<T<Ts...>>& ctx,
434 THead&& head, TTail&&... tail) {
435 head.setCallback_([ctx](Try<typename THead::value_type>&& t) {
436 ctx->template setPartialResult<typename THead::value_type,
437 sizeof...(Ts) - sizeof...(TTail) - 1>(t);
439 // template tail-recursion
440 collectVariadicHelper(ctx, std::forward<TTail>(tail)...);