/*
- * Copyright 2015 Facebook, Inc.
+ * Copyright 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <folly/io/async/AsyncSocket.h>
-#include <folly/io/async/EventBase.h>
+#include <folly/ExceptionWrapper.h>
#include <folly/SocketAddress.h>
#include <folly/io/IOBuf.h>
+#include <folly/Portability.h>
+#include <folly/portability/Fcntl.h>
+#include <folly/portability/Sockets.h>
+#include <folly/portability/SysUio.h>
+#include <folly/portability/Unistd.h>
-#include <poll.h>
#include <errno.h>
#include <limits.h>
-#include <unistd.h>
-#include <fcntl.h>
+#include <thread>
#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
+#include <boost/preprocessor/control/if.hpp>
using std::string;
using std::unique_ptr;
+namespace fsp = folly::portability::sockets;
+
namespace folly {
// static members initializers
const AsyncSocket::OptionMap AsyncSocket::emptyOptionMap;
-const folly::SocketAddress AsyncSocket::anyAddress =
- folly::SocketAddress("0.0.0.0", 0);
const AsyncSocketException socketClosedLocallyEx(
AsyncSocketException::END_OF_FILE, "socket closed locally");
const AsyncSocketException socketShutdownForWritesEx(
AsyncSocketException::END_OF_FILE, "socket shutdown for writes");
-// TODO: It might help performance to provide a version of WriteRequest that
+// TODO: It might help performance to provide a version of BytesWriteRequest that
// users could derive from, so we can avoid the extra allocation for each call
// to write()/writev(). We could templatize TFramedAsyncChannel just like the
// protocols are currently templatized for transports.
// storage space, and only our internal version would allocate it at the end of
// the WriteRequest.
-/**
- * A WriteRequest object tracks information about a pending write() or writev()
- * operation.
+/* The default WriteRequest implementation, used for write(), writev() and
+ * writeChain()
*
- * A new WriteRequest operation is allocated on the heap for all write
+ * A new BytesWriteRequest operation is allocated on the heap for all write
* operations that cannot be completed immediately.
*/
-class AsyncSocket::WriteRequest {
+class AsyncSocket::BytesWriteRequest : public AsyncSocket::WriteRequest {
public:
- static WriteRequest* newRequest(WriteCallback* callback,
- const iovec* ops,
- uint32_t opCount,
- unique_ptr<IOBuf>&& ioBuf,
- WriteFlags flags) {
+ static BytesWriteRequest* newRequest(AsyncSocket* socket,
+ WriteCallback* callback,
+ const iovec* ops,
+ uint32_t opCount,
+ uint32_t partialWritten,
+ uint32_t bytesWritten,
+ unique_ptr<IOBuf>&& ioBuf,
+ WriteFlags flags) {
assert(opCount > 0);
// Since we put a variable size iovec array at the end
- // of each WriteRequest, we have to manually allocate the memory.
- void* buf = malloc(sizeof(WriteRequest) +
+ // of each BytesWriteRequest, we have to manually allocate the memory.
+ void* buf = malloc(sizeof(BytesWriteRequest) +
(opCount * sizeof(struct iovec)));
if (buf == nullptr) {
throw std::bad_alloc();
}
- return new(buf) WriteRequest(callback, ops, opCount, std::move(ioBuf),
- flags);
+ return new(buf) BytesWriteRequest(socket, callback, ops, opCount,
+ partialWritten, bytesWritten,
+ std::move(ioBuf), flags);
}
- void destroy() {
- this->~WriteRequest();
+ void destroy() override {
+ this->~BytesWriteRequest();
free(this);
}
- bool cork() const {
- return isSet(flags_, WriteFlags::CORK);
- }
-
- WriteFlags flags() const {
- return flags_;
- }
-
- WriteRequest* getNext() const {
- return next_;
- }
-
- WriteCallback* getCallback() const {
- return callback_;
- }
-
- uint32_t getBytesWritten() const {
- return bytesWritten_;
- }
-
- const struct iovec* getOps() const {
- assert(opCount_ > opIndex_);
- return writeOps_ + opIndex_;
+ WriteResult performWrite() override {
+ WriteFlags writeFlags = flags_;
+ if (getNext() != nullptr) {
+ writeFlags |= WriteFlags::CORK;
+ }
+ auto writeResult = socket_->performWrite(
+ getOps(), getOpCount(), writeFlags, &opsWritten_, &partialBytes_);
+ bytesWritten_ = writeResult.writeReturn > 0 ? writeResult.writeReturn : 0;
+ return writeResult;
}
- uint32_t getOpCount() const {
- assert(opCount_ > opIndex_);
- return opCount_ - opIndex_;
+ bool isComplete() override {
+ return opsWritten_ == getOpCount();
}
- void consume(uint32_t wholeOps, uint32_t partialBytes,
- uint32_t totalBytesWritten) {
- // Advance opIndex_ forward by wholeOps
- opIndex_ += wholeOps;
+ void consume() override {
+ // Advance opIndex_ forward by opsWritten_
+ opIndex_ += opsWritten_;
assert(opIndex_ < opCount_);
// If we've finished writing any IOBufs, release them
if (ioBuf_) {
- for (uint32_t i = wholeOps; i != 0; --i) {
+ for (uint32_t i = opsWritten_; i != 0; --i) {
assert(ioBuf_);
ioBuf_ = ioBuf_->pop();
}
}
- // Move partialBytes forward into the current iovec buffer
+ // Move partialBytes_ forward into the current iovec buffer
struct iovec* currentOp = writeOps_ + opIndex_;
- assert((partialBytes < currentOp->iov_len) || (currentOp->iov_len == 0));
+ assert((partialBytes_ < currentOp->iov_len) || (currentOp->iov_len == 0));
currentOp->iov_base =
- reinterpret_cast<uint8_t*>(currentOp->iov_base) + partialBytes;
- currentOp->iov_len -= partialBytes;
-
- // Increment the bytesWritten_ count by totalBytesWritten
- bytesWritten_ += totalBytesWritten;
- }
+ reinterpret_cast<uint8_t*>(currentOp->iov_base) + partialBytes_;
+ currentOp->iov_len -= partialBytes_;
- void append(WriteRequest* next) {
- assert(next_ == nullptr);
- next_ = next;
+ // Increment the totalBytesWritten_ count by bytesWritten_;
+ assert(bytesWritten_ >= 0);
+ totalBytesWritten_ += uint32_t(bytesWritten_);
}
private:
- WriteRequest(WriteCallback* callback,
- const struct iovec* ops,
- uint32_t opCount,
- unique_ptr<IOBuf>&& ioBuf,
- WriteFlags flags)
- : next_(nullptr)
- , callback_(callback)
- , bytesWritten_(0)
+ BytesWriteRequest(AsyncSocket* socket,
+ WriteCallback* callback,
+ const struct iovec* ops,
+ uint32_t opCount,
+ uint32_t partialBytes,
+ uint32_t bytesWritten,
+ unique_ptr<IOBuf>&& ioBuf,
+ WriteFlags flags)
+ : AsyncSocket::WriteRequest(socket, callback)
, opCount_(opCount)
, opIndex_(0)
, flags_(flags)
- , ioBuf_(std::move(ioBuf)) {
+ , ioBuf_(std::move(ioBuf))
+ , opsWritten_(0)
+ , partialBytes_(partialBytes)
+ , bytesWritten_(bytesWritten) {
memcpy(writeOps_, ops, sizeof(*ops) * opCount_);
}
- // Private destructor, to ensure callers use destroy()
- ~WriteRequest() {}
+ // private destructor, to ensure callers use destroy()
+ ~BytesWriteRequest() override = default;
+
+ const struct iovec* getOps() const {
+ assert(opCount_ > opIndex_);
+ return writeOps_ + opIndex_;
+ }
+
+ uint32_t getOpCount() const {
+ assert(opCount_ > opIndex_);
+ return opCount_ - opIndex_;
+ }
- WriteRequest* next_; ///< pointer to next WriteRequest
- WriteCallback* callback_; ///< completion callback
- uint32_t bytesWritten_; ///< bytes written
uint32_t opCount_; ///< number of entries in writeOps_
uint32_t opIndex_; ///< current index into writeOps_
WriteFlags flags_; ///< set for WriteFlags
unique_ptr<IOBuf> ioBuf_; ///< underlying IOBuf, or nullptr if N/A
+
+ // for consume(), how much we wrote on the last write
+ uint32_t opsWritten_; ///< complete ops written
+ uint32_t partialBytes_; ///< partial bytes of incomplete op written
+ ssize_t bytesWritten_; ///< bytes written altogether
+
struct iovec writeOps_[]; ///< write operation(s) list
};
AsyncSocket::AsyncSocket()
- : eventBase_(nullptr)
- , writeTimeout_(this, nullptr)
- , ioHandler_(this, nullptr) {
+ : eventBase_(nullptr),
+ writeTimeout_(this, nullptr),
+ ioHandler_(this, nullptr),
+ immediateReadHandler_(this) {
VLOG(5) << "new AsyncSocket()";
init();
}
AsyncSocket::AsyncSocket(EventBase* evb)
- : eventBase_(evb)
- , writeTimeout_(this, evb)
- , ioHandler_(this, evb) {
+ : eventBase_(evb),
+ writeTimeout_(this, evb),
+ ioHandler_(this, evb),
+ immediateReadHandler_(this) {
VLOG(5) << "new AsyncSocket(" << this << ", evb=" << evb << ")";
init();
}
}
AsyncSocket::AsyncSocket(EventBase* evb, int fd)
- : eventBase_(evb)
- , writeTimeout_(this, evb)
- , ioHandler_(this, evb, fd) {
+ : eventBase_(evb),
+ writeTimeout_(this, evb),
+ ioHandler_(this, evb, fd),
+ immediateReadHandler_(this) {
VLOG(5) << "new AsyncSocket(" << this << ", evb=" << evb << ", fd="
<< fd << ")";
init();
return fd;
}
+const folly::SocketAddress& AsyncSocket::anyAddress() {
+ static const folly::SocketAddress anyAddress =
+ folly::SocketAddress("0.0.0.0", 0);
+ return anyAddress;
+}
+
void AsyncSocket::setShutdownSocketSet(ShutdownSocketSet* newSS) {
if (shutdownSocketSet_ == newSS) {
return;
void AsyncSocket::setCloseOnExec() {
int rv = fcntl(fd_, F_SETFD, FD_CLOEXEC);
if (rv != 0) {
- throw AsyncSocketException(AsyncSocketException::INTERNAL_ERROR,
- withAddr("failed to set close-on-exec flag"),
- errno);
+ auto errnoCopy = errno;
+ throw AsyncSocketException(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("failed to set close-on-exec flag"),
+ errnoCopy);
}
}
return invalidState(callback);
}
+ connectTimeout_ = std::chrono::milliseconds(timeout);
+ connectStartTime_ = std::chrono::steady_clock::now();
+ // Make connect end time at least >= connectStartTime.
+ connectEndTime_ = connectStartTime_;
+
assert(fd_ == -1);
state_ = StateEnum::CONNECTING;
connectCallback_ = callback;
// constant (PF_xxx) rather than an address family (AF_xxx), but the
// distinction is mainly just historical. In pretty much all
// implementations the PF_foo and AF_foo constants are identical.
- fd_ = socket(address.getFamily(), SOCK_STREAM, 0);
+ fd_ = fsp::socket(address.getFamily(), SOCK_STREAM, 0);
if (fd_ < 0) {
- throw AsyncSocketException(AsyncSocketException::INTERNAL_ERROR,
- withAddr("failed to create socket"), errno);
+ auto errnoCopy = errno;
+ throw AsyncSocketException(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("failed to create socket"),
+ errnoCopy);
}
if (shutdownSocketSet_) {
shutdownSocketSet_->add(fd_);
// Put the socket in non-blocking mode
int flags = fcntl(fd_, F_GETFL, 0);
if (flags == -1) {
- throw AsyncSocketException(AsyncSocketException::INTERNAL_ERROR,
- withAddr("failed to get socket flags"), errno);
+ auto errnoCopy = errno;
+ throw AsyncSocketException(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("failed to get socket flags"),
+ errnoCopy);
}
int rv = fcntl(fd_, F_SETFL, flags | O_NONBLOCK);
if (rv == -1) {
+ auto errnoCopy = errno;
throw AsyncSocketException(
AsyncSocketException::INTERNAL_ERROR,
withAddr("failed to put socket in non-blocking mode"),
- errno);
+ errnoCopy);
}
#if !defined(MSG_NOSIGNAL) && defined(F_SETNOSIGPIPE)
// iOS and OS X don't support MSG_NOSIGNAL; set F_SETNOSIGPIPE instead
rv = fcntl(fd_, F_SETNOSIGPIPE, 1);
if (rv == -1) {
+ auto errnoCopy = errno;
throw AsyncSocketException(
AsyncSocketException::INTERNAL_ERROR,
"failed to enable F_SETNOSIGPIPE on socket",
- errno);
+ errnoCopy);
}
#endif
<< ", fd=" << fd_ << ", host=" << address.describe().c_str();
// bind the socket
- if (bindAddr != anyAddress) {
+ if (bindAddr != anyAddress()) {
int one = 1;
- if (::setsockopt(fd_, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one))) {
+ if (setsockopt(fd_, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one))) {
+ auto errnoCopy = errno;
doClose();
throw AsyncSocketException(
- AsyncSocketException::NOT_OPEN,
- "failed to setsockopt prior to bind on " + bindAddr.describe(),
- errno);
+ AsyncSocketException::NOT_OPEN,
+ "failed to setsockopt prior to bind on " + bindAddr.describe(),
+ errnoCopy);
}
bindAddr.getAddress(&addrStorage);
- if (::bind(fd_, saddr, bindAddr.getActualSize()) != 0) {
+ if (bind(fd_, saddr, bindAddr.getActualSize()) != 0) {
+ auto errnoCopy = errno;
doClose();
- throw AsyncSocketException(AsyncSocketException::NOT_OPEN,
- "failed to bind to async socket: " +
- bindAddr.describe(),
- errno);
+ throw AsyncSocketException(
+ AsyncSocketException::NOT_OPEN,
+ "failed to bind to async socket: " + bindAddr.describe(),
+ errnoCopy);
}
}
// Apply the additional options if any.
for (const auto& opt: options) {
- int rv = opt.first.apply(fd_, opt.second);
+ rv = opt.first.apply(fd_, opt.second);
if (rv != 0) {
- throw AsyncSocketException(AsyncSocketException::INTERNAL_ERROR,
- withAddr("failed to set socket option"),
- errno);
+ auto errnoCopy = errno;
+ throw AsyncSocketException(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("failed to set socket option"),
+ errnoCopy);
}
}
// Perform the connect()
address.getAddress(&addrStorage);
- rv = ::connect(fd_, saddr, address.getActualSize());
- if (rv < 0) {
- if (errno == EINPROGRESS) {
- // Connection in progress.
- if (timeout > 0) {
- // Start a timer in case the connection takes too long.
- if (!writeTimeout_.scheduleTimeout(timeout)) {
- throw AsyncSocketException(AsyncSocketException::INTERNAL_ERROR,
- withAddr("failed to schedule AsyncSocket connect timeout"));
- }
- }
-
- // Register for write events, so we'll
- // be notified when the connection finishes/fails.
- // Note that we don't register for a persistent event here.
- assert(eventFlags_ == EventHandler::NONE);
- eventFlags_ = EventHandler::WRITE;
- if (!ioHandler_.registerHandler(eventFlags_)) {
- throw AsyncSocketException(AsyncSocketException::INTERNAL_ERROR,
- withAddr("failed to register AsyncSocket connect handler"));
- }
+ if (tfoEnabled_) {
+ state_ = StateEnum::FAST_OPEN;
+ tfoAttempted_ = true;
+ } else {
+ if (socketConnect(saddr, addr_.getActualSize()) < 0) {
return;
- } else {
- throw AsyncSocketException(AsyncSocketException::NOT_OPEN,
- "connect failed (immediately)", errno);
}
}
VLOG(8) << "AsyncSocket::connect succeeded immediately; this=" << this;
assert(readCallback_ == nullptr);
assert(writeReqHead_ == nullptr);
- state_ = StateEnum::ESTABLISHED;
- if (callback) {
- connectCallback_ = nullptr;
- callback->connectSuccess();
+ if (state_ != StateEnum::FAST_OPEN) {
+ state_ = StateEnum::ESTABLISHED;
+ }
+ invokeConnectSuccess();
+}
+
+int AsyncSocket::socketConnect(const struct sockaddr* saddr, socklen_t len) {
+#if __linux__
+ if (noTransparentTls_) {
+ // Ignore return value, errors are ok
+ setsockopt(fd_, SOL_SOCKET, SO_NO_TRANSPARENT_TLS, nullptr, 0);
+ }
+#endif
+ int rv = fsp::connect(fd_, saddr, len);
+ if (rv < 0) {
+ auto errnoCopy = errno;
+ if (errnoCopy == EINPROGRESS) {
+ scheduleConnectTimeout();
+ registerForConnectEvents();
+ } else {
+ throw AsyncSocketException(
+ AsyncSocketException::NOT_OPEN,
+ "connect failed (immediately)",
+ errnoCopy);
+ }
+ }
+ return rv;
+}
+
+void AsyncSocket::scheduleConnectTimeout() {
+ // Connection in progress.
+ auto timeout = connectTimeout_.count();
+ if (timeout > 0) {
+ // Start a timer in case the connection takes too long.
+ if (!writeTimeout_.scheduleTimeout(uint32_t(timeout))) {
+ throw AsyncSocketException(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("failed to schedule AsyncSocket connect timeout"));
+ }
+ }
+}
+
+void AsyncSocket::registerForConnectEvents() {
+ // Register for write events, so we'll
+ // be notified when the connection finishes/fails.
+ // Note that we don't register for a persistent event here.
+ assert(eventFlags_ == EventHandler::NONE);
+ eventFlags_ = EventHandler::WRITE;
+ if (!ioHandler_.registerHandler(eventFlags_)) {
+ throw AsyncSocketException(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("failed to register AsyncSocket connect handler"));
}
}
void AsyncSocket::cancelConnect() {
connectCallback_ = nullptr;
- if (state_ == StateEnum::CONNECTING) {
+ if (state_ == StateEnum::CONNECTING || state_ == StateEnum::FAST_OPEN) {
closeNow();
}
}
// If we are currently pending on write requests, immediately update
// writeTimeout_ with the new value.
if ((eventFlags_ & EventHandler::WRITE) &&
- (state_ != StateEnum::CONNECTING)) {
+ (state_ != StateEnum::CONNECTING && state_ != StateEnum::FAST_OPEN)) {
assert(state_ == StateEnum::ESTABLISHED);
assert((shutdownFlags_ & SHUT_WRITE) == 0);
if (sendTimeout_ > 0) {
return;
}
+ /* We are removing a read callback */
+ if (callback == nullptr &&
+ immediateReadHandler_.isLoopCallbackScheduled()) {
+ immediateReadHandler_.cancelLoopCallback();
+ }
+
if (shutdownFlags_ & SHUT_READ) {
// Reads have already been shut down on this socket.
//
switch ((StateEnum)state_) {
case StateEnum::CONNECTING:
+ case StateEnum::FAST_OPEN:
// For convenience, we allow the read callback to be set while we are
// still connecting. We just store the callback for now. Once the
// connection completes we'll register for read events.
iovec op;
op.iov_base = const_cast<void*>(buf);
op.iov_len = bytes;
- writeImpl(callback, &op, 1, std::move(unique_ptr<IOBuf>()), flags);
+ writeImpl(callback, &op, 1, unique_ptr<IOBuf>(), flags);
}
void AsyncSocket::writev(WriteCallback* callback,
const iovec* vec,
size_t count,
WriteFlags flags) {
- writeImpl(callback, vec, count, std::move(unique_ptr<IOBuf>()), flags);
+ writeImpl(callback, vec, count, unique_ptr<IOBuf>(), flags);
}
void AsyncSocket::writeChain(WriteCallback* callback, unique_ptr<IOBuf>&& buf,
WriteFlags flags) {
+ constexpr size_t kSmallSizeMax = 64;
size_t count = buf->countChainElements();
- if (count <= 64) {
- iovec vec[count];
+ if (count <= kSmallSizeMax) {
+ // suppress "warning: variable length array 'vec' is used [-Wvla]"
+ FOLLY_PUSH_WARNING;
+ FOLLY_GCC_DISABLE_WARNING(vla);
+ iovec vec[BOOST_PP_IF(FOLLY_HAVE_VLA, count, kSmallSizeMax)];
+ FOLLY_POP_WARNING;
+
writeChainImpl(callback, vec, count, std::move(buf), flags);
} else {
iovec* vec = new iovec[count];
uint32_t countWritten = 0;
uint32_t partialWritten = 0;
- int bytesWritten = 0;
+ ssize_t bytesWritten = 0;
bool mustRegister = false;
- if (state_ == StateEnum::ESTABLISHED && !connecting()) {
+ if ((state_ == StateEnum::ESTABLISHED || state_ == StateEnum::FAST_OPEN) &&
+ !connecting()) {
if (writeReqHead_ == nullptr) {
// If we are established and there are no other writes pending,
// we can attempt to perform the write immediately.
assert(writeReqTail_ == nullptr);
assert((eventFlags_ & EventHandler::WRITE) == 0);
- bytesWritten = performWrite(vec, count, flags,
- &countWritten, &partialWritten);
+ auto writeResult = performWrite(
+ vec, uint32_t(count), flags, &countWritten, &partialWritten);
+ bytesWritten = writeResult.writeReturn;
if (bytesWritten < 0) {
- AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
- withAddr("writev failed"), errno);
+ auto errnoCopy = errno;
+ if (writeResult.exception) {
+ return failWrite(__func__, callback, 0, *writeResult.exception);
+ }
+ AsyncSocketException ex(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("writev failed"),
+ errnoCopy);
return failWrite(__func__, callback, 0, ex);
} else if (countWritten == count) {
// We successfully wrote everything.
callback->writeSuccess();
}
return;
- } // else { continue writing the next writeReq }
- mustRegister = true;
+ } else { // continue writing the next writeReq
+ if (bufferCallback_) {
+ bufferCallback_->onEgressBuffered();
+ }
+ }
+ if (!connecting()) {
+ // Writes might put the socket back into connecting state
+ // if TFO is enabled, and using TFO fails.
+ // This means that write timeouts would not be active, however
+ // connect timeouts would affect this stage.
+ mustRegister = true;
+ }
}
} else if (!connecting()) {
// Invalid state for writing
// Create a new WriteRequest to add to the queue
WriteRequest* req;
try {
- req = WriteRequest::newRequest(callback, vec + countWritten,
- count - countWritten, std::move(ioBuf),
- flags);
+ req = BytesWriteRequest::newRequest(
+ this,
+ callback,
+ vec + countWritten,
+ uint32_t(count - countWritten),
+ partialWritten,
+ uint32_t(bytesWritten),
+ std::move(ioBuf),
+ flags);
} catch (const std::exception& ex) {
// we mainly expect to catch std::bad_alloc here
AsyncSocketException tex(AsyncSocketException::INTERNAL_ERROR,
withAddr(string("failed to append new WriteRequest: ") + ex.what()));
- return failWrite(__func__, callback, bytesWritten, tex);
+ return failWrite(__func__, callback, size_t(bytesWritten), tex);
}
- req->consume(0, partialWritten, bytesWritten);
+ req->consume();
if (writeReqTail_ == nullptr) {
assert(writeReqHead_ == nullptr);
writeReqHead_ = writeReqTail_ = req;
}
}
+void AsyncSocket::writeRequest(WriteRequest* req) {
+ if (writeReqTail_ == nullptr) {
+ assert(writeReqHead_ == nullptr);
+ writeReqHead_ = writeReqTail_ = req;
+ req->start();
+ } else {
+ writeReqTail_->append(req);
+ writeReqTail_ = req;
+ }
+}
+
void AsyncSocket::close() {
VLOG(5) << "AsyncSocket::close(): this=" << this << ", fd_=" << fd_
<< ", state=" << state_ << ", shutdownFlags="
switch (state_) {
case StateEnum::ESTABLISHED:
case StateEnum::CONNECTING:
- {
+ case StateEnum::FAST_OPEN: {
shutdownFlags_ |= (SHUT_READ | SHUT_WRITE);
state_ = StateEnum::CLOSED;
}
}
+ if (immediateReadHandler_.isLoopCallbackScheduled()) {
+ immediateReadHandler_.cancelLoopCallback();
+ }
+
if (fd_ >= 0) {
ioHandler_.changeHandlerFD(-1);
doClose();
}
- if (connectCallback_) {
- ConnectCallback* callback = connectCallback_;
- connectCallback_ = nullptr;
- callback->connectErr(socketClosedLocallyEx);
- }
+ invokeConnectErr(socketClosedLocallyEx);
failAllWrites(socketClosedLocallyEx);
}
// Shutdown writes on the file descriptor
- ::shutdown(fd_, SHUT_WR);
+ shutdown(fd_, SHUT_WR);
// Immediately fail all write requests
failAllWrites(socketShutdownForWritesEx);
// immediately shut down the write side of the socket.
shutdownFlags_ |= SHUT_WRITE_PENDING;
return;
+ case StateEnum::FAST_OPEN:
+ // In fast open state we haven't call connected yet, and if we shutdown
+ // the writes, we will never try to call connect, so shut everything down
+ shutdownFlags_ |= SHUT_WRITE;
+ // Immediately fail all write requests
+ failAllWrites(socketShutdownForWritesEx);
+ return;
case StateEnum::CLOSED:
case StateEnum::ERROR:
// We should never get here. SHUT_WRITE should always be set
}
bool AsyncSocket::good() const {
- return ((state_ == StateEnum::CONNECTING ||
- state_ == StateEnum::ESTABLISHED) &&
- (shutdownFlags_ == 0) && (eventBase_ != nullptr));
+ return (
+ (state_ == StateEnum::CONNECTING || state_ == StateEnum::FAST_OPEN ||
+ state_ == StateEnum::ESTABLISHED) &&
+ (shutdownFlags_ == 0) && (eventBase_ != nullptr));
}
bool AsyncSocket::error() const {
eventBase_ = eventBase;
ioHandler_.attachEventBase(eventBase);
writeTimeout_.attachEventBase(eventBase);
+ if (evbChangeCb_) {
+ evbChangeCb_->evbAttached(this);
+ }
}
void AsyncSocket::detachEventBase() {
eventBase_ = nullptr;
ioHandler_.detachEventBase();
writeTimeout_.detachEventBase();
+ if (evbChangeCb_) {
+ evbChangeCb_->evbDetached(this);
+ }
}
bool AsyncSocket::isDetachable() const {
}
void AsyncSocket::getLocalAddress(folly::SocketAddress* address) const {
- address->setFromLocalAddress(fd_);
+ if (!localAddr_.isInitialized()) {
+ localAddr_.setFromLocalAddress(fd_);
+ }
+ *address = localAddr_;
}
void AsyncSocket::getPeerAddress(folly::SocketAddress* address) const {
*address = addr_;
}
+bool AsyncSocket::getTFOSucceded() const {
+ return detail::tfo_succeeded(fd_);
+}
+
int AsyncSocket::setNoDelay(bool noDelay) {
if (fd_ < 0) {
VLOG(4) << "AsyncSocket::setNoDelay() called on non-open socket "
}
- if (setsockopt(fd_, IPPROTO_TCP, TCP_CONGESTION, cname.c_str(),
- cname.length() + 1) != 0) {
+ if (setsockopt(
+ fd_,
+ IPPROTO_TCP,
+ TCP_CONGESTION,
+ cname.c_str(),
+ socklen_t(cname.length() + 1)) != 0) {
int errnoCopy = errno;
VLOG(2) << "failed to update TCP_CONGESTION option on AsyncSocket "
<< this << "(fd=" << fd_ << ", state=" << state_ << "): "
}
}
-ssize_t AsyncSocket::performRead(void* buf, size_t buflen) {
- ssize_t bytes = recv(fd_, buf, buflen, MSG_DONTWAIT);
+AsyncSocket::ReadResult
+AsyncSocket::performRead(void** buf, size_t* buflen, size_t* /* offset */) {
+ VLOG(5) << "AsyncSocket::performRead() this=" << this << ", buf=" << *buf
+ << ", buflen=" << *buflen;
+
+ int recvFlags = 0;
+ if (peek_) {
+ recvFlags |= MSG_PEEK;
+ }
+
+ ssize_t bytes = recv(fd_, *buf, *buflen, MSG_DONTWAIT | recvFlags);
if (bytes < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
// No more data to read right now.
- return READ_BLOCKING;
+ return ReadResult(READ_BLOCKING);
} else {
- return READ_ERROR;
+ return ReadResult(READ_ERROR);
}
} else {
appBytesReceived_ += bytes;
- return bytes;
+ return ReadResult(bytes);
}
}
+void AsyncSocket::prepareReadBuffer(void** buf, size_t* buflen) {
+ // no matter what, buffer should be preapared for non-ssl socket
+ CHECK(readCallback_);
+ readCallback_->getReadBuffer(buf, buflen);
+}
+
void AsyncSocket::handleRead() noexcept {
VLOG(5) << "AsyncSocket::handleRead() this=" << this << ", fd=" << fd_
<< ", state=" << state_;
while (readCallback_ && eventBase_ == originalEventBase) {
// Get the buffer to read into.
void* buf = nullptr;
- size_t buflen = 0;
+ size_t buflen = 0, offset = 0;
try {
- readCallback_->getReadBuffer(&buf, &buflen);
+ prepareReadBuffer(&buf, &buflen);
+ VLOG(5) << "prepareReadBuffer() buf=" << buf << ", buflen=" << buflen;
} catch (const AsyncSocketException& ex) {
return failRead(__func__, ex);
} catch (const std::exception& ex) {
"non-exception type");
return failRead(__func__, ex);
}
- if (buf == nullptr || buflen == 0) {
+ if (!isBufferMovable_ && (buf == nullptr || buflen == 0)) {
AsyncSocketException ex(AsyncSocketException::BAD_ARGS,
"ReadCallback::getReadBuffer() returned "
"empty buffer");
}
// Perform the read
- ssize_t bytesRead = performRead(buf, buflen);
+ auto readResult = performRead(&buf, &buflen, &offset);
+ auto bytesRead = readResult.readReturn;
+ VLOG(4) << "this=" << this << ", AsyncSocket::handleRead() got "
+ << bytesRead << " bytes";
if (bytesRead > 0) {
- readCallback_->readDataAvailable(bytesRead);
+ if (!isBufferMovable_) {
+ readCallback_->readDataAvailable(bytesRead);
+ } else {
+ CHECK(kOpenSslModeMoveBufferOwnership);
+ VLOG(5) << "this=" << this << ", AsyncSocket::handleRead() got "
+ << "buf=" << buf << ", " << bytesRead << "/" << buflen
+ << ", offset=" << offset;
+ auto readBuf = folly::IOBuf::takeOwnership(buf, buflen);
+ readBuf->trimStart(offset);
+ readBuf->trimEnd(buflen - offset - bytesRead);
+ readCallback_->readBufferAvailable(std::move(readBuf));
+ }
+
// Fall through and continue around the loop if the read
// completely filled the available buffer.
// Note that readCallback_ may have been uninstalled or changed inside
// No more data to read right now.
return;
} else if (bytesRead == READ_ERROR) {
- AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
- withAddr("recv() failed"), errno);
+ readErr_ = READ_ERROR;
+ if (readResult.exception) {
+ return failRead(__func__, *readResult.exception);
+ }
+ auto errnoCopy = errno;
+ AsyncSocketException ex(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("recv() failed"),
+ errnoCopy);
return failRead(__func__, ex);
} else {
assert(bytesRead == READ_EOF);
+ readErr_ = READ_EOF;
// EOF
shutdownFlags_ |= SHUT_READ;
if (!updateEventRegistration(0, EventHandler::READ)) {
return;
}
if (maxReadsPerEvent_ && (++numReads >= maxReadsPerEvent_)) {
+ if (readCallback_ != nullptr) {
+ // We might still have data in the socket.
+ // (e.g. see comment in AsyncSSLSocket::checkForImmediateRead)
+ scheduleImmediateRead();
+ }
return;
}
}
void AsyncSocket::handleWrite() noexcept {
VLOG(5) << "AsyncSocket::handleWrite() this=" << this << ", fd=" << fd_
<< ", state=" << state_;
+ DestructorGuard dg(this);
+
if (state_ == StateEnum::CONNECTING) {
handleConnect();
return;
// (See the comment in handleRead() explaining how this can happen.)
EventBase* originalEventBase = eventBase_;
while (writeReqHead_ != nullptr && eventBase_ == originalEventBase) {
- uint32_t countWritten;
- uint32_t partialWritten;
- WriteFlags writeFlags = writeReqHead_->flags();
- if (writeReqHead_->getNext() != nullptr) {
- writeFlags = writeFlags | WriteFlags::CORK;
- }
- int bytesWritten = performWrite(writeReqHead_->getOps(),
- writeReqHead_->getOpCount(),
- writeFlags, &countWritten, &partialWritten);
- if (bytesWritten < 0) {
- AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
- withAddr("writev() failed"), errno);
+ auto writeResult = writeReqHead_->performWrite();
+ if (writeResult.writeReturn < 0) {
+ if (writeResult.exception) {
+ return failWrite(__func__, *writeResult.exception);
+ }
+ auto errnoCopy = errno;
+ AsyncSocketException ex(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("writev() failed"),
+ errnoCopy);
return failWrite(__func__, ex);
- } else if (countWritten == writeReqHead_->getOpCount()) {
+ } else if (writeReqHead_->isComplete()) {
// We finished this request
WriteRequest* req = writeReqHead_;
writeReqHead_ = req->getNext();
}
} else {
// Reads are still enabled, so we are only doing a half-shutdown
- ::shutdown(fd_, SHUT_WR);
+ shutdown(fd_, SHUT_WR);
}
}
}
// We'll continue around the loop, trying to write another request
} else {
// Partial write.
- writeReqHead_->consume(countWritten, partialWritten, bytesWritten);
+ if (bufferCallback_) {
+ bufferCallback_->onEgressBuffered();
+ }
+ writeReqHead_->consume();
// Stop after a partial write; it's highly likely that a subsequent write
// attempt will just return EAGAIN.
//
return;
}
}
+ if (!writeReqHead_ && bufferCallback_) {
+ bufferCallback_->onEgressBufferCleared();
+ }
}
void AsyncSocket::checkForImmediateRead() noexcept {
// one here just to make sure, in case one of our calling code paths ever
// changes.
DestructorGuard dg(this);
-
// If we have a readCallback_, make sure we enable read events. We
// may already be registered for reads if connectSuccess() set
// the read calback.
socklen_t len = sizeof(error);
int rv = getsockopt(fd_, SOL_SOCKET, SO_ERROR, &error, &len);
if (rv != 0) {
- AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
- withAddr("error calling getsockopt() after connect"),
- errno);
+ auto errnoCopy = errno;
+ AsyncSocketException ex(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("error calling getsockopt() after connect"),
+ errnoCopy);
VLOG(4) << "AsyncSocket::handleConnect(this=" << this << ", fd="
<< fd_ << " host=" << addr_.describe()
<< ") exception:" << ex.what();
// are still connecting we just abort the connect rather than waiting for
// it to complete.
assert((shutdownFlags_ & SHUT_READ) == 0);
- ::shutdown(fd_, SHUT_WR);
+ shutdown(fd_, SHUT_WR);
shutdownFlags_ |= SHUT_WRITE;
}
// callbacks (since the callbacks may call detachEventBase()).
EventBase* originalEventBase = eventBase_;
- // Call the connect callback.
- if (connectCallback_) {
- ConnectCallback* callback = connectCallback_;
- connectCallback_ = nullptr;
- callback->connectSuccess();
- }
-
+ invokeConnectSuccess();
// Note that the connect callback may have changed our state.
// (set or unset the read callback, called write(), closed the socket, etc.)
// The following code needs to handle these situations correctly.
if (state_ == StateEnum::CONNECTING) {
// connect() timed out
// Unregister for I/O events.
- AsyncSocketException ex(AsyncSocketException::TIMED_OUT,
- "connect timed out");
- failConnect(__func__, ex);
+ if (connectCallback_) {
+ AsyncSocketException ex(
+ AsyncSocketException::TIMED_OUT, "connect timed out");
+ failConnect(__func__, ex);
+ } else {
+ // we faced a connect error without a connect callback, which could
+ // happen due to TFO.
+ AsyncSocketException ex(
+ AsyncSocketException::TIMED_OUT, "write timed out during connection");
+ failWrite(__func__, ex);
+ }
} else {
// a normal write operation timed out
- assert(state_ == StateEnum::ESTABLISHED);
AsyncSocketException ex(AsyncSocketException::TIMED_OUT, "write timed out");
failWrite(__func__, ex);
}
}
-ssize_t AsyncSocket::performWrite(const iovec* vec,
- uint32_t count,
- WriteFlags flags,
- uint32_t* countWritten,
- uint32_t* partialWritten) {
+ssize_t AsyncSocket::tfoSendMsg(int fd, struct msghdr* msg, int msg_flags) {
+ return detail::tfo_sendmsg(fd, msg, msg_flags);
+}
+
+AsyncSocket::WriteResult
+AsyncSocket::sendSocketMessage(int fd, struct msghdr* msg, int msg_flags) {
+ ssize_t totalWritten = 0;
+ if (state_ == StateEnum::FAST_OPEN) {
+ sockaddr_storage addr;
+ auto len = addr_.getAddress(&addr);
+ msg->msg_name = &addr;
+ msg->msg_namelen = len;
+ totalWritten = tfoSendMsg(fd_, msg, msg_flags);
+ if (totalWritten >= 0) {
+ tfoFinished_ = true;
+ state_ = StateEnum::ESTABLISHED;
+ // We schedule this asynchrously so that we don't end up
+ // invoking initial read or write while a write is in progress.
+ scheduleInitialReadWrite();
+ } else if (errno == EINPROGRESS) {
+ VLOG(4) << "TFO falling back to connecting";
+ // A normal sendmsg doesn't return EINPROGRESS, however
+ // TFO might fallback to connecting if there is no
+ // cookie.
+ state_ = StateEnum::CONNECTING;
+ try {
+ scheduleConnectTimeout();
+ registerForConnectEvents();
+ } catch (const AsyncSocketException& ex) {
+ return WriteResult(
+ WRITE_ERROR, folly::make_unique<AsyncSocketException>(ex));
+ }
+ // Let's fake it that no bytes were written and return an errno.
+ errno = EAGAIN;
+ totalWritten = -1;
+ } else if (errno == EOPNOTSUPP) {
+ // Try falling back to connecting.
+ VLOG(4) << "TFO not supported";
+ state_ = StateEnum::CONNECTING;
+ try {
+ int ret = socketConnect((const sockaddr*)&addr, len);
+ if (ret == 0) {
+ // connect succeeded immediately
+ // Treat this like no data was written.
+ state_ = StateEnum::ESTABLISHED;
+ scheduleInitialReadWrite();
+ }
+ // If there was no exception during connections,
+ // we would return that no bytes were written.
+ errno = EAGAIN;
+ totalWritten = -1;
+ } catch (const AsyncSocketException& ex) {
+ return WriteResult(
+ WRITE_ERROR, folly::make_unique<AsyncSocketException>(ex));
+ }
+ } else if (errno == EAGAIN) {
+ // Normally sendmsg would indicate that the write would block.
+ // However in the fast open case, it would indicate that sendmsg
+ // fell back to a connect. This is a return code from connect()
+ // instead, and is an error condition indicating no fds available.
+ return WriteResult(
+ WRITE_ERROR,
+ folly::make_unique<AsyncSocketException>(
+ AsyncSocketException::UNKNOWN, "No more free local ports"));
+ }
+ } else {
+ totalWritten = ::sendmsg(fd, msg, msg_flags);
+ }
+ return WriteResult(totalWritten);
+}
+
+AsyncSocket::WriteResult AsyncSocket::performWrite(
+ const iovec* vec,
+ uint32_t count,
+ WriteFlags flags,
+ uint32_t* countWritten,
+ uint32_t* partialWritten) {
// We use sendmsg() instead of writev() so that we can pass in MSG_NOSIGNAL
// We correctly handle EPIPE errors, so we never want to receive SIGPIPE
// (since it may terminate the program if the main program doesn't explicitly
msg.msg_name = nullptr;
msg.msg_namelen = 0;
msg.msg_iov = const_cast<iovec *>(vec);
-#ifdef IOV_MAX // not defined on Android
- msg.msg_iovlen = std::min(count, (uint32_t)IOV_MAX);
-#else
- msg.msg_iovlen = std::min(count, (uint32_t)UIO_MAXIOV);
-#endif
+ msg.msg_iovlen = std::min<size_t>(count, kIovMax);
msg.msg_control = nullptr;
msg.msg_controllen = 0;
msg.msg_flags = 0;
// marks that this is the last byte of a record (response)
msg_flags |= MSG_EOR;
}
- ssize_t totalWritten = ::sendmsg(fd_, &msg, msg_flags);
+ auto writeResult = sendSocketMessage(fd_, &msg, msg_flags);
+ auto totalWritten = writeResult.writeReturn;
if (totalWritten < 0) {
- if (errno == EAGAIN) {
+ bool tryAgain = (errno == EAGAIN);
+#ifdef __APPLE__
+ // Apple has a bug where doing a second write on a socket which we
+ // have opened with TFO causes an ENOTCONN to be thrown. However the
+ // socket is really connected, so treat ENOTCONN as a EAGAIN until
+ // this bug is fixed.
+ tryAgain |= (errno == ENOTCONN);
+#endif
+ if (!writeResult.exception && tryAgain) {
// TCP buffer is full; we can't write any more data right now.
*countWritten = 0;
*partialWritten = 0;
- return 0;
+ return WriteResult(0);
}
// error
*countWritten = 0;
*partialWritten = 0;
- return -1;
+ return writeResult;
}
appBytesWritten_ += totalWritten;
uint32_t bytesWritten;
uint32_t n;
- for (bytesWritten = totalWritten, n = 0; n < count; ++n) {
+ for (bytesWritten = uint32_t(totalWritten), n = 0; n < count; ++n) {
const iovec* v = vec + n;
if (v->iov_len > bytesWritten) {
// Partial write finished in the middle of this iovec
*countWritten = n;
*partialWritten = bytesWritten;
- return totalWritten;
+ return WriteResult(totalWritten);
}
- bytesWritten -= v->iov_len;
+ bytesWritten -= uint32_t(v->iov_len);
}
assert(bytesWritten == 0);
*countWritten = n;
*partialWritten = 0;
- return totalWritten;
+ return WriteResult(totalWritten);
}
/**
}
}
-void AsyncSocket::finishFail() {
- assert(state_ == StateEnum::ERROR);
- assert(getDestructorGuardCount() > 0);
-
- AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
- withAddr("socket closing after error"));
- if (connectCallback_) {
- ConnectCallback* callback = connectCallback_;
- connectCallback_ = nullptr;
- callback->connectErr(ex);
- }
-
+void AsyncSocket::invokeAllErrors(const AsyncSocketException& ex) {
+ invokeConnectErr(ex);
failAllWrites(ex);
if (readCallback_) {
}
}
+void AsyncSocket::finishFail() {
+ assert(state_ == StateEnum::ERROR);
+ assert(getDestructorGuardCount() > 0);
+
+ AsyncSocketException ex(
+ AsyncSocketException::INTERNAL_ERROR,
+ withAddr("socket closing after error"));
+ invokeAllErrors(ex);
+}
+
+void AsyncSocket::finishFail(const AsyncSocketException& ex) {
+ assert(state_ == StateEnum::ERROR);
+ assert(getDestructorGuardCount() > 0);
+ invokeAllErrors(ex);
+}
+
void AsyncSocket::fail(const char* fn, const AsyncSocketException& ex) {
VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
<< state_ << " host=" << addr_.describe()
<< ex.what();
startFail();
- if (connectCallback_ != nullptr) {
- ConnectCallback* callback = connectCallback_;
- connectCallback_ = nullptr;
- callback->connectErr(ex);
- }
-
- finishFail();
+ invokeConnectErr(ex);
+ finishFail(ex);
}
void AsyncSocket::failRead(const char* fn, const AsyncSocketException& ex) {
WriteRequest* req = writeReqHead_;
writeReqHead_ = req->getNext();
WriteCallback* callback = req->getCallback();
- uint32_t bytesWritten = req->getBytesWritten();
+ uint32_t bytesWritten = req->getTotalBytesWritten();
req->destroy();
if (callback) {
callback->writeErr(bytesWritten, ex);
writeReqHead_ = req->getNext();
WriteCallback* callback = req->getCallback();
if (callback) {
- callback->writeErr(req->getBytesWritten(), ex);
+ callback->writeErr(req->getTotalBytesWritten(), ex);
}
req->destroy();
}
AsyncSocketException ex(AsyncSocketException::ALREADY_OPEN,
"connect() called with socket in invalid state");
+ connectEndTime_ = std::chrono::steady_clock::now();
if (state_ == StateEnum::CLOSED || state_ == StateEnum::ERROR) {
if (callback) {
callback->connectErr(ex);
}
}
+void AsyncSocket::invokeConnectErr(const AsyncSocketException& ex) {
+ connectEndTime_ = std::chrono::steady_clock::now();
+ if (connectCallback_) {
+ ConnectCallback* callback = connectCallback_;
+ connectCallback_ = nullptr;
+ callback->connectErr(ex);
+ }
+}
+
+void AsyncSocket::invokeConnectSuccess() {
+ connectEndTime_ = std::chrono::steady_clock::now();
+ if (connectCallback_) {
+ ConnectCallback* callback = connectCallback_;
+ connectCallback_ = nullptr;
+ callback->connectSuccess();
+ }
+}
+
void AsyncSocket::invalidState(ReadCallback* callback) {
VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_
<< "): setReadCallback(" << callback
return s + " (peer=" + peer.describe() + ", local=" + local.describe() + ")";
}
+void AsyncSocket::setBufferCallback(BufferCallback* cb) {
+ bufferCallback_ = cb;
+}
+
} // folly