/*
- * Copyright 2015 Facebook, Inc.
+ * Copyright 2013-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <folly/experimental/io/AsyncIO.h>
#include <sys/eventfd.h>
-#include <unistd.h>
#include <cerrno>
#include <ostream>
#include <stdexcept>
#include <folly/Format.h>
#include <folly/Likely.h>
#include <folly/String.h>
+#include <folly/portability/Unistd.h>
namespace folly {
AsyncIOOp::AsyncIOOp(NotificationCallback cb)
- : cb_(std::move(cb)),
- state_(State::UNINITIALIZED),
- result_(-EINVAL) {
+ : cb_(std::move(cb)), state_(State::UNINITIALIZED), result_(-EINVAL) {
memset(&iocb_, 0, sizeof(iocb_));
}
}
}
+void AsyncIOOp::cancel() {
+ DCHECK_EQ(state_, State::PENDING);
+ state_ = State::CANCELED;
+}
+
ssize_t AsyncIOOp::result() const {
CHECK_EQ(state_, State::COMPLETED);
return result_;
state_ = State::INITIALIZED;
}
-AsyncIO::AsyncIO(size_t capacity, PollMode pollMode)
- : ctx_(0),
- ctxSet_(false),
- pending_(0),
- submitted_(0),
- capacity_(capacity),
- pollFd_(-1) {
+AsyncIO::AsyncIO(size_t capacity, PollMode pollMode) : capacity_(capacity) {
CHECK_GT(capacity_, 0);
completed_.reserve(capacity_);
if (pollMode == POLLABLE) {
// returns negative errno
if (rc == -EAGAIN) {
long aio_nr, aio_max;
- std::unique_ptr<FILE, int(*)(FILE*)>
- fp(fopen("/proc/sys/fs/aio-nr", "r"), fclose);
+ std::unique_ptr<FILE, int (*)(FILE*)> fp(
+ fopen("/proc/sys/fs/aio-nr", "r"), fclose);
PCHECK(fp);
CHECK_EQ(fscanf(fp.get(), "%ld", &aio_nr), 1);
- std::unique_ptr<FILE, int(*)(FILE*)>
- aio_max_fp(fopen("/proc/sys/fs/aio-max-nr", "r"), fclose);
+ std::unique_ptr<FILE, int (*)(FILE*)> aio_max_fp(
+ fopen("/proc/sys/fs/aio-max-nr", "r"), fclose);
PCHECK(aio_max_fp);
CHECK_EQ(fscanf(aio_max_fp.get(), "%ld", &aio_max), 1);
void AsyncIO::submit(Op* op) {
CHECK_EQ(op->state(), Op::State::INITIALIZED);
- initializeContext(); // on demand
+ initializeContext(); // on demand
// We can increment past capacity, but we'll clean up after ourselves.
auto p = pending_.fetch_add(1, std::memory_order_acq_rel);
throw std::range_error("AsyncIO: too many pending requests");
}
iocb* cb = &op->iocb_;
- cb->data = nullptr; // unused
+ cb->data = nullptr; // unused
if (pollFd_ != -1) {
io_set_eventfd(cb, pollFd_);
}
CHECK_EQ(pollFd_, -1) << "wait() only allowed on non-pollable object";
auto p = pending_.load(std::memory_order_acquire);
CHECK_LE(minRequests, p);
- return doWait(minRequests, p);
+ return doWait(WaitType::COMPLETE, minRequests, p, completed_);
+}
+
+Range<AsyncIO::Op**> AsyncIO::cancel() {
+ CHECK(ctx_);
+ auto p = pending_.load(std::memory_order_acquire);
+ return doWait(WaitType::CANCEL, p, p, canceled_);
}
Range<AsyncIO::Op**> AsyncIO::pollCompleted() {
rc = ::read(pollFd_, &numEvents, 8);
} while (rc == -1 && errno == EINTR);
if (UNLIKELY(rc == -1 && errno == EAGAIN)) {
- return Range<Op**>(); // nothing completed
+ return Range<Op**>(); // nothing completed
}
checkUnixError(rc, "AsyncIO: read from event fd failed");
DCHECK_EQ(rc, 8);
DCHECK_LE(numEvents, pending_);
// Don't reap more than numEvents, as we've just reset the counter to 0.
- return doWait(numEvents, numEvents);
+ return doWait(WaitType::COMPLETE, numEvents, numEvents, completed_);
}
-Range<AsyncIO::Op**> AsyncIO::doWait(size_t minRequests, size_t maxRequests) {
+Range<AsyncIO::Op**> AsyncIO::doWait(
+ WaitType type,
+ size_t minRequests,
+ size_t maxRequests,
+ std::vector<Op*>& result) {
io_event events[maxRequests];
+ // Unfortunately, Linux AIO doesn't implement io_cancel, so even for
+ // WaitType::CANCEL we have to wait for IO completion.
size_t count = 0;
do {
int ret;
// GOTCHA: io_getevents() may returns less than min_nr results if
// interrupted after some events have been read (if before, -EINTR
// is returned).
- ret = io_getevents(ctx_,
- minRequests - count,
- maxRequests - count,
- events + count,
- /* timeout */ nullptr); // wait forever
+ ret = io_getevents(
+ ctx_,
+ minRequests - count,
+ maxRequests - count,
+ events + count,
+ /* timeout */ nullptr); // wait forever
} while (ret == -EINTR);
// Check as may not be able to recover without leaking events.
- CHECK_GE(ret, 0)
- << "AsyncIO: io_getevents failed with error " << errnoStr(-ret);
+ CHECK_GE(ret, 0) << "AsyncIO: io_getevents failed with error "
+ << errnoStr(-ret);
count += ret;
} while (count < minRequests);
DCHECK_LE(count, maxRequests);
- completed_.clear();
- if (count == 0) {
- return folly::Range<Op**>();
- }
-
+ result.clear();
for (size_t i = 0; i < count; ++i) {
DCHECK(events[i].obj);
Op* op = boost::intrusive::get_parent_from_member(
events[i].obj, &AsyncIOOp::iocb_);
decrementPending();
- op->complete(events[i].res);
- completed_.push_back(op);
+ switch (type) {
+ case WaitType::COMPLETE:
+ op->complete(events[i].res);
+ break;
+ case WaitType::CANCEL:
+ op->cancel();
+ break;
+ }
+ result.push_back(op);
}
- return folly::Range<Op**>(&completed_.front(), count);
+ return range(result);
}
-AsyncIOQueue::AsyncIOQueue(AsyncIO* asyncIO)
- : asyncIO_(asyncIO) {
-}
+AsyncIOQueue::AsyncIOQueue(AsyncIO* asyncIO) : asyncIO_(asyncIO) {}
AsyncIOQueue::~AsyncIOQueue() {
CHECK_EQ(asyncIO_->pending(), 0);
maybeDequeue();
}
-void AsyncIOQueue::onCompleted(AsyncIOOp* op) {
+void AsyncIOQueue::onCompleted(AsyncIOOp* /* op */) {
maybeDequeue();
}
// Interpose our completion callback
auto& nextCb = op->notificationCallback();
- op->setNotificationCallback([this, nextCb](AsyncIOOp* op) {
- this->onCompleted(op);
- if (nextCb) nextCb(op);
+ op->setNotificationCallback([this, nextCb](AsyncIOOp* op2) {
+ this->onCompleted(op2);
+ if (nextCb) {
+ nextCb(op2);
+ }
});
asyncIO_->submit(op);
namespace {
-#define X(c) case c: return #c
+#define X(c) \
+ case c: \
+ return #c
const char* asyncIoOpStateToString(AsyncIOOp::State state) {
switch (state) {
X(AsyncIOOp::State::INITIALIZED);
X(AsyncIOOp::State::PENDING);
X(AsyncIOOp::State::COMPLETED);
+ X(AsyncIOOp::State::CANCELED);
}
return "<INVALID AsyncIOOp::State>";
}
std::string path = folly::to<std::string>("/proc/self/fd/", fd);
char link[PATH_MAX];
const ssize_t length =
- std::max<ssize_t>(readlink(path.c_str(), link, PATH_MAX), 0);
+ std::max<ssize_t>(readlink(path.c_str(), link, PATH_MAX), 0);
return path.assign(link, length);
}
std::ostream& operator<<(std::ostream& os, const iocb& cb) {
os << folly::format(
- "data={}, key={}, opcode={}, reqprio={}, fd={}, f={}, ",
- cb.data, cb.key, iocbCmdToString(cb.aio_lio_opcode),
- cb.aio_reqprio, cb.aio_fildes, fd2name(cb.aio_fildes));
+ "data={}, key={}, opcode={}, reqprio={}, fd={}, f={}, ",
+ cb.data,
+ cb.key,
+ iocbCmdToString(cb.aio_lio_opcode),
+ cb.aio_reqprio,
+ cb.aio_fildes,
+ fd2name(cb.aio_fildes));
switch (cb.aio_lio_opcode) {
case IO_CMD_PREAD:
case IO_CMD_PWRITE:
- os << folly::format("buf={}, off={}, size={}, ",
- cb.u.c.buf, cb.u.c.nbytes, cb.u.c.offset);
+ os << folly::format(
+ "buf={}, offset={}, nbytes={}, ",
+ cb.u.c.buf,
+ cb.u.c.offset,
+ cb.u.c.nbytes);
+ break;
default:
os << "[TODO: write debug string for "
<< iocbCmdToString(cb.aio_lio_opcode) << "] ";
+ break;
}
return os;
}
-} // anonymous namespace
+} // namespace
std::ostream& operator<<(std::ostream& os, const AsyncIOOp& op) {
os << "{" << op.state_ << ", ";
return os << asyncIoOpStateToString(state);
}
-} // namespace folly
+} // namespace folly