2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/io/async/AsyncSocket.h>
19 #include <folly/io/async/EventBase.h>
20 #include <folly/io/async/EventHandler.h>
21 #include <folly/SocketAddress.h>
22 #include <folly/io/IOBuf.h>
23 #include <folly/portability/Fcntl.h>
24 #include <folly/portability/SysUio.h>
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <netinet/in.h>
34 #include <netinet/tcp.h>
35 #include <boost/preprocessor/control/if.hpp>
38 using std::unique_ptr;
42 // static members initializers
43 const AsyncSocket::OptionMap AsyncSocket::emptyOptionMap;
45 const AsyncSocketException socketClosedLocallyEx(
46 AsyncSocketException::END_OF_FILE, "socket closed locally");
47 const AsyncSocketException socketShutdownForWritesEx(
48 AsyncSocketException::END_OF_FILE, "socket shutdown for writes");
50 // TODO: It might help performance to provide a version of BytesWriteRequest that
51 // users could derive from, so we can avoid the extra allocation for each call
52 // to write()/writev(). We could templatize TFramedAsyncChannel just like the
53 // protocols are currently templatized for transports.
55 // We would need the version for external users where they provide the iovec
56 // storage space, and only our internal version would allocate it at the end of
59 /* The default WriteRequest implementation, used for write(), writev() and
62 * A new BytesWriteRequest operation is allocated on the heap for all write
63 * operations that cannot be completed immediately.
65 class AsyncSocket::BytesWriteRequest : public AsyncSocket::WriteRequest {
67 static BytesWriteRequest* newRequest(AsyncSocket* socket,
68 WriteCallback* callback,
71 uint32_t partialWritten,
72 uint32_t bytesWritten,
73 unique_ptr<IOBuf>&& ioBuf,
76 // Since we put a variable size iovec array at the end
77 // of each BytesWriteRequest, we have to manually allocate the memory.
78 void* buf = malloc(sizeof(BytesWriteRequest) +
79 (opCount * sizeof(struct iovec)));
81 throw std::bad_alloc();
84 return new(buf) BytesWriteRequest(socket, callback, ops, opCount,
85 partialWritten, bytesWritten,
86 std::move(ioBuf), flags);
89 void destroy() override {
90 this->~BytesWriteRequest();
94 WriteResult performWrite() override {
95 WriteFlags writeFlags = flags_;
96 if (getNext() != nullptr) {
97 writeFlags = writeFlags | WriteFlags::CORK;
99 return socket_->performWrite(
100 getOps(), getOpCount(), writeFlags, &opsWritten_, &partialBytes_);
103 bool isComplete() override {
104 return opsWritten_ == getOpCount();
107 void consume() override {
108 // Advance opIndex_ forward by opsWritten_
109 opIndex_ += opsWritten_;
110 assert(opIndex_ < opCount_);
112 // If we've finished writing any IOBufs, release them
114 for (uint32_t i = opsWritten_; i != 0; --i) {
116 ioBuf_ = ioBuf_->pop();
120 // Move partialBytes_ forward into the current iovec buffer
121 struct iovec* currentOp = writeOps_ + opIndex_;
122 assert((partialBytes_ < currentOp->iov_len) || (currentOp->iov_len == 0));
123 currentOp->iov_base =
124 reinterpret_cast<uint8_t*>(currentOp->iov_base) + partialBytes_;
125 currentOp->iov_len -= partialBytes_;
127 // Increment the totalBytesWritten_ count by bytesWritten_;
128 totalBytesWritten_ += bytesWritten_;
132 BytesWriteRequest(AsyncSocket* socket,
133 WriteCallback* callback,
134 const struct iovec* ops,
136 uint32_t partialBytes,
137 uint32_t bytesWritten,
138 unique_ptr<IOBuf>&& ioBuf,
140 : AsyncSocket::WriteRequest(socket, callback)
144 , ioBuf_(std::move(ioBuf))
146 , partialBytes_(partialBytes)
147 , bytesWritten_(bytesWritten) {
148 memcpy(writeOps_, ops, sizeof(*ops) * opCount_);
151 // private destructor, to ensure callers use destroy()
152 ~BytesWriteRequest() override = default;
154 const struct iovec* getOps() const {
155 assert(opCount_ > opIndex_);
156 return writeOps_ + opIndex_;
159 uint32_t getOpCount() const {
160 assert(opCount_ > opIndex_);
161 return opCount_ - opIndex_;
164 uint32_t opCount_; ///< number of entries in writeOps_
165 uint32_t opIndex_; ///< current index into writeOps_
166 WriteFlags flags_; ///< set for WriteFlags
167 unique_ptr<IOBuf> ioBuf_; ///< underlying IOBuf, or nullptr if N/A
169 // for consume(), how much we wrote on the last write
170 uint32_t opsWritten_; ///< complete ops written
171 uint32_t partialBytes_; ///< partial bytes of incomplete op written
172 ssize_t bytesWritten_; ///< bytes written altogether
174 struct iovec writeOps_[]; ///< write operation(s) list
177 AsyncSocket::AsyncSocket()
178 : eventBase_(nullptr)
179 , writeTimeout_(this, nullptr)
180 , ioHandler_(this, nullptr)
181 , immediateReadHandler_(this) {
182 VLOG(5) << "new AsyncSocket()";
186 AsyncSocket::AsyncSocket(EventBase* evb)
188 , writeTimeout_(this, evb)
189 , ioHandler_(this, evb)
190 , immediateReadHandler_(this) {
191 VLOG(5) << "new AsyncSocket(" << this << ", evb=" << evb << ")";
195 AsyncSocket::AsyncSocket(EventBase* evb,
196 const folly::SocketAddress& address,
197 uint32_t connectTimeout)
199 connect(nullptr, address, connectTimeout);
202 AsyncSocket::AsyncSocket(EventBase* evb,
203 const std::string& ip,
205 uint32_t connectTimeout)
207 connect(nullptr, ip, port, connectTimeout);
210 AsyncSocket::AsyncSocket(EventBase* evb, int fd)
212 , writeTimeout_(this, evb)
213 , ioHandler_(this, evb, fd)
214 , immediateReadHandler_(this) {
215 VLOG(5) << "new AsyncSocket(" << this << ", evb=" << evb << ", fd="
220 state_ = StateEnum::ESTABLISHED;
223 // init() method, since constructor forwarding isn't supported in most
225 void AsyncSocket::init() {
226 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
228 state_ = StateEnum::UNINIT;
229 eventFlags_ = EventHandler::NONE;
232 maxReadsPerEvent_ = 16;
233 connectCallback_ = nullptr;
234 readCallback_ = nullptr;
235 writeReqHead_ = nullptr;
236 writeReqTail_ = nullptr;
237 shutdownSocketSet_ = nullptr;
238 appBytesWritten_ = 0;
239 appBytesReceived_ = 0;
242 AsyncSocket::~AsyncSocket() {
243 VLOG(7) << "actual destruction of AsyncSocket(this=" << this
244 << ", evb=" << eventBase_ << ", fd=" << fd_
245 << ", state=" << state_ << ")";
248 void AsyncSocket::destroy() {
249 VLOG(5) << "AsyncSocket::destroy(this=" << this << ", evb=" << eventBase_
250 << ", fd=" << fd_ << ", state=" << state_;
251 // When destroy is called, close the socket immediately
254 // Then call DelayedDestruction::destroy() to take care of
255 // whether or not we need immediate or delayed destruction
256 DelayedDestruction::destroy();
259 int AsyncSocket::detachFd() {
260 VLOG(6) << "AsyncSocket::detachFd(this=" << this << ", fd=" << fd_
261 << ", evb=" << eventBase_ << ", state=" << state_
262 << ", events=" << std::hex << eventFlags_ << ")";
263 // Extract the fd, and set fd_ to -1 first, so closeNow() won't
264 // actually close the descriptor.
265 if (shutdownSocketSet_) {
266 shutdownSocketSet_->remove(fd_);
270 // Call closeNow() to invoke all pending callbacks with an error.
272 // Update the EventHandler to stop using this fd.
273 // This can only be done after closeNow() unregisters the handler.
274 ioHandler_.changeHandlerFD(-1);
278 const folly::SocketAddress& AsyncSocket::anyAddress() {
279 static const folly::SocketAddress anyAddress =
280 folly::SocketAddress("0.0.0.0", 0);
284 void AsyncSocket::setShutdownSocketSet(ShutdownSocketSet* newSS) {
285 if (shutdownSocketSet_ == newSS) {
288 if (shutdownSocketSet_ && fd_ != -1) {
289 shutdownSocketSet_->remove(fd_);
291 shutdownSocketSet_ = newSS;
292 if (shutdownSocketSet_ && fd_ != -1) {
293 shutdownSocketSet_->add(fd_);
297 void AsyncSocket::setCloseOnExec() {
298 int rv = fcntl(fd_, F_SETFD, FD_CLOEXEC);
300 auto errnoCopy = errno;
301 throw AsyncSocketException(
302 AsyncSocketException::INTERNAL_ERROR,
303 withAddr("failed to set close-on-exec flag"),
308 void AsyncSocket::connect(ConnectCallback* callback,
309 const folly::SocketAddress& address,
311 const OptionMap &options,
312 const folly::SocketAddress& bindAddr) noexcept {
313 DestructorGuard dg(this);
314 assert(eventBase_->isInEventBaseThread());
318 // Make sure we're in the uninitialized state
319 if (state_ != StateEnum::UNINIT) {
320 return invalidState(callback);
323 connectTimeout_ = std::chrono::milliseconds(timeout);
324 connectStartTime_ = std::chrono::steady_clock::now();
325 // Make connect end time at least >= connectStartTime.
326 connectEndTime_ = connectStartTime_;
329 state_ = StateEnum::CONNECTING;
330 connectCallback_ = callback;
332 sockaddr_storage addrStorage;
333 sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
337 // Technically the first parameter should actually be a protocol family
338 // constant (PF_xxx) rather than an address family (AF_xxx), but the
339 // distinction is mainly just historical. In pretty much all
340 // implementations the PF_foo and AF_foo constants are identical.
341 fd_ = socket(address.getFamily(), SOCK_STREAM, 0);
343 auto errnoCopy = errno;
344 throw AsyncSocketException(
345 AsyncSocketException::INTERNAL_ERROR,
346 withAddr("failed to create socket"),
349 if (shutdownSocketSet_) {
350 shutdownSocketSet_->add(fd_);
352 ioHandler_.changeHandlerFD(fd_);
356 // Put the socket in non-blocking mode
357 int flags = fcntl(fd_, F_GETFL, 0);
359 auto errnoCopy = errno;
360 throw AsyncSocketException(
361 AsyncSocketException::INTERNAL_ERROR,
362 withAddr("failed to get socket flags"),
365 int rv = fcntl(fd_, F_SETFL, flags | O_NONBLOCK);
367 auto errnoCopy = errno;
368 throw AsyncSocketException(
369 AsyncSocketException::INTERNAL_ERROR,
370 withAddr("failed to put socket in non-blocking mode"),
374 #if !defined(MSG_NOSIGNAL) && defined(F_SETNOSIGPIPE)
375 // iOS and OS X don't support MSG_NOSIGNAL; set F_SETNOSIGPIPE instead
376 rv = fcntl(fd_, F_SETNOSIGPIPE, 1);
378 auto errnoCopy = errno;
379 throw AsyncSocketException(
380 AsyncSocketException::INTERNAL_ERROR,
381 "failed to enable F_SETNOSIGPIPE on socket",
386 // By default, turn on TCP_NODELAY
387 // If setNoDelay() fails, we continue anyway; this isn't a fatal error.
388 // setNoDelay() will log an error message if it fails.
389 if (address.getFamily() != AF_UNIX) {
390 (void)setNoDelay(true);
393 VLOG(5) << "AsyncSocket::connect(this=" << this << ", evb=" << eventBase_
394 << ", fd=" << fd_ << ", host=" << address.describe().c_str();
397 if (bindAddr != anyAddress()) {
399 if (::setsockopt(fd_, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one))) {
400 auto errnoCopy = errno;
402 throw AsyncSocketException(
403 AsyncSocketException::NOT_OPEN,
404 "failed to setsockopt prior to bind on " + bindAddr.describe(),
408 bindAddr.getAddress(&addrStorage);
410 if (::bind(fd_, saddr, bindAddr.getActualSize()) != 0) {
411 auto errnoCopy = errno;
413 throw AsyncSocketException(
414 AsyncSocketException::NOT_OPEN,
415 "failed to bind to async socket: " + bindAddr.describe(),
420 // Apply the additional options if any.
421 for (const auto& opt: options) {
422 int rv = opt.first.apply(fd_, opt.second);
424 auto errnoCopy = errno;
425 throw AsyncSocketException(
426 AsyncSocketException::INTERNAL_ERROR,
427 withAddr("failed to set socket option"),
432 // Perform the connect()
433 address.getAddress(&addrStorage);
435 rv = ::connect(fd_, saddr, address.getActualSize());
437 auto errnoCopy = errno;
438 if (errnoCopy == EINPROGRESS) {
439 // Connection in progress.
441 // Start a timer in case the connection takes too long.
442 if (!writeTimeout_.scheduleTimeout(timeout)) {
443 throw AsyncSocketException(AsyncSocketException::INTERNAL_ERROR,
444 withAddr("failed to schedule AsyncSocket connect timeout"));
448 // Register for write events, so we'll
449 // be notified when the connection finishes/fails.
450 // Note that we don't register for a persistent event here.
451 assert(eventFlags_ == EventHandler::NONE);
452 eventFlags_ = EventHandler::WRITE;
453 if (!ioHandler_.registerHandler(eventFlags_)) {
454 throw AsyncSocketException(AsyncSocketException::INTERNAL_ERROR,
455 withAddr("failed to register AsyncSocket connect handler"));
459 throw AsyncSocketException(
460 AsyncSocketException::NOT_OPEN,
461 "connect failed (immediately)",
466 // If we're still here the connect() succeeded immediately.
467 // Fall through to call the callback outside of this try...catch block
468 } catch (const AsyncSocketException& ex) {
469 return failConnect(__func__, ex);
470 } catch (const std::exception& ex) {
471 // shouldn't happen, but handle it just in case
472 VLOG(4) << "AsyncSocket::connect(this=" << this << ", fd=" << fd_
473 << "): unexpected " << typeid(ex).name() << " exception: "
475 AsyncSocketException tex(AsyncSocketException::INTERNAL_ERROR,
476 withAddr(string("unexpected exception: ") +
478 return failConnect(__func__, tex);
481 // The connection succeeded immediately
482 // The read callback may not have been set yet, and no writes may be pending
483 // yet, so we don't have to register for any events at the moment.
484 VLOG(8) << "AsyncSocket::connect succeeded immediately; this=" << this;
485 assert(readCallback_ == nullptr);
486 assert(writeReqHead_ == nullptr);
487 state_ = StateEnum::ESTABLISHED;
488 invokeConnectSuccess();
491 void AsyncSocket::connect(ConnectCallback* callback,
492 const string& ip, uint16_t port,
494 const OptionMap &options) noexcept {
495 DestructorGuard dg(this);
497 connectCallback_ = callback;
498 connect(callback, folly::SocketAddress(ip, port), timeout, options);
499 } catch (const std::exception& ex) {
500 AsyncSocketException tex(AsyncSocketException::INTERNAL_ERROR,
502 return failConnect(__func__, tex);
506 void AsyncSocket::cancelConnect() {
507 connectCallback_ = nullptr;
508 if (state_ == StateEnum::CONNECTING) {
513 void AsyncSocket::setSendTimeout(uint32_t milliseconds) {
514 sendTimeout_ = milliseconds;
515 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
517 // If we are currently pending on write requests, immediately update
518 // writeTimeout_ with the new value.
519 if ((eventFlags_ & EventHandler::WRITE) &&
520 (state_ != StateEnum::CONNECTING)) {
521 assert(state_ == StateEnum::ESTABLISHED);
522 assert((shutdownFlags_ & SHUT_WRITE) == 0);
523 if (sendTimeout_ > 0) {
524 if (!writeTimeout_.scheduleTimeout(sendTimeout_)) {
525 AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
526 withAddr("failed to reschedule send timeout in setSendTimeout"));
527 return failWrite(__func__, ex);
530 writeTimeout_.cancelTimeout();
535 void AsyncSocket::setReadCB(ReadCallback *callback) {
536 VLOG(6) << "AsyncSocket::setReadCallback() this=" << this << ", fd=" << fd_
537 << ", callback=" << callback << ", state=" << state_;
539 // Short circuit if callback is the same as the existing readCallback_.
541 // Note that this is needed for proper functioning during some cleanup cases.
542 // During cleanup we allow setReadCallback(nullptr) to be called even if the
543 // read callback is already unset and we have been detached from an event
544 // base. This check prevents us from asserting
545 // eventBase_->isInEventBaseThread() when eventBase_ is nullptr.
546 if (callback == readCallback_) {
550 /* We are removing a read callback */
551 if (callback == nullptr &&
552 immediateReadHandler_.isLoopCallbackScheduled()) {
553 immediateReadHandler_.cancelLoopCallback();
556 if (shutdownFlags_ & SHUT_READ) {
557 // Reads have already been shut down on this socket.
559 // Allow setReadCallback(nullptr) to be called in this case, but don't
560 // allow a new callback to be set.
562 // For example, setReadCallback(nullptr) can happen after an error if we
563 // invoke some other error callback before invoking readError(). The other
564 // error callback that is invoked first may go ahead and clear the read
565 // callback before we get a chance to invoke readError().
566 if (callback != nullptr) {
567 return invalidState(callback);
569 assert((eventFlags_ & EventHandler::READ) == 0);
570 readCallback_ = nullptr;
574 DestructorGuard dg(this);
575 assert(eventBase_->isInEventBaseThread());
577 switch ((StateEnum)state_) {
578 case StateEnum::CONNECTING:
579 // For convenience, we allow the read callback to be set while we are
580 // still connecting. We just store the callback for now. Once the
581 // connection completes we'll register for read events.
582 readCallback_ = callback;
584 case StateEnum::ESTABLISHED:
586 readCallback_ = callback;
587 uint16_t oldFlags = eventFlags_;
589 eventFlags_ |= EventHandler::READ;
591 eventFlags_ &= ~EventHandler::READ;
594 // Update our registration if our flags have changed
595 if (eventFlags_ != oldFlags) {
596 // We intentionally ignore the return value here.
597 // updateEventRegistration() will move us into the error state if it
598 // fails, and we don't need to do anything else here afterwards.
599 (void)updateEventRegistration();
603 checkForImmediateRead();
607 case StateEnum::CLOSED:
608 case StateEnum::ERROR:
609 // We should never reach here. SHUT_READ should always be set
610 // if we are in STATE_CLOSED or STATE_ERROR.
612 return invalidState(callback);
613 case StateEnum::UNINIT:
614 // We do not allow setReadCallback() to be called before we start
616 return invalidState(callback);
619 // We don't put a default case in the switch statement, so that the compiler
620 // will warn us to update the switch statement if a new state is added.
621 return invalidState(callback);
624 AsyncSocket::ReadCallback* AsyncSocket::getReadCallback() const {
625 return readCallback_;
628 void AsyncSocket::write(WriteCallback* callback,
629 const void* buf, size_t bytes, WriteFlags flags) {
631 op.iov_base = const_cast<void*>(buf);
633 writeImpl(callback, &op, 1, unique_ptr<IOBuf>(), flags);
636 void AsyncSocket::writev(WriteCallback* callback,
640 writeImpl(callback, vec, count, unique_ptr<IOBuf>(), flags);
643 void AsyncSocket::writeChain(WriteCallback* callback, unique_ptr<IOBuf>&& buf,
645 constexpr size_t kSmallSizeMax = 64;
646 size_t count = buf->countChainElements();
647 if (count <= kSmallSizeMax) {
648 iovec vec[BOOST_PP_IF(FOLLY_HAVE_VLA, count, kSmallSizeMax)];
649 writeChainImpl(callback, vec, count, std::move(buf), flags);
651 iovec* vec = new iovec[count];
652 writeChainImpl(callback, vec, count, std::move(buf), flags);
657 void AsyncSocket::writeChainImpl(WriteCallback* callback, iovec* vec,
658 size_t count, unique_ptr<IOBuf>&& buf, WriteFlags flags) {
659 size_t veclen = buf->fillIov(vec, count);
660 writeImpl(callback, vec, veclen, std::move(buf), flags);
663 void AsyncSocket::writeImpl(WriteCallback* callback, const iovec* vec,
664 size_t count, unique_ptr<IOBuf>&& buf,
666 VLOG(6) << "AsyncSocket::writev() this=" << this << ", fd=" << fd_
667 << ", callback=" << callback << ", count=" << count
668 << ", state=" << state_;
669 DestructorGuard dg(this);
670 unique_ptr<IOBuf>ioBuf(std::move(buf));
671 assert(eventBase_->isInEventBaseThread());
673 if (shutdownFlags_ & (SHUT_WRITE | SHUT_WRITE_PENDING)) {
674 // No new writes may be performed after the write side of the socket has
677 // We could just call callback->writeError() here to fail just this write.
678 // However, fail hard and use invalidState() to fail all outstanding
679 // callbacks and move the socket into the error state. There's most likely
680 // a bug in the caller's code, so we abort everything rather than trying to
681 // proceed as best we can.
682 return invalidState(callback);
685 uint32_t countWritten = 0;
686 uint32_t partialWritten = 0;
687 int bytesWritten = 0;
688 bool mustRegister = false;
689 if (state_ == StateEnum::ESTABLISHED && !connecting()) {
690 if (writeReqHead_ == nullptr) {
691 // If we are established and there are no other writes pending,
692 // we can attempt to perform the write immediately.
693 assert(writeReqTail_ == nullptr);
694 assert((eventFlags_ & EventHandler::WRITE) == 0);
697 performWrite(vec, count, flags, &countWritten, &partialWritten);
698 bytesWritten = writeResult.writeReturn;
699 if (bytesWritten < 0) {
700 auto errnoCopy = errno;
701 if (writeResult.exception) {
702 return failWrite(__func__, callback, 0, *writeResult.exception);
704 AsyncSocketException ex(
705 AsyncSocketException::INTERNAL_ERROR,
706 withAddr("writev failed"),
708 return failWrite(__func__, callback, 0, ex);
709 } else if (countWritten == count) {
710 // We successfully wrote everything.
711 // Invoke the callback and return.
713 callback->writeSuccess();
716 } else { // continue writing the next writeReq
717 if (bufferCallback_) {
718 bufferCallback_->onEgressBuffered();
723 } else if (!connecting()) {
724 // Invalid state for writing
725 return invalidState(callback);
728 // Create a new WriteRequest to add to the queue
731 req = BytesWriteRequest::newRequest(this, callback, vec + countWritten,
732 count - countWritten, partialWritten,
733 bytesWritten, std::move(ioBuf), flags);
734 } catch (const std::exception& ex) {
735 // we mainly expect to catch std::bad_alloc here
736 AsyncSocketException tex(AsyncSocketException::INTERNAL_ERROR,
737 withAddr(string("failed to append new WriteRequest: ") + ex.what()));
738 return failWrite(__func__, callback, bytesWritten, tex);
741 if (writeReqTail_ == nullptr) {
742 assert(writeReqHead_ == nullptr);
743 writeReqHead_ = writeReqTail_ = req;
745 writeReqTail_->append(req);
749 // Register for write events if are established and not currently
750 // waiting on write events
752 assert(state_ == StateEnum::ESTABLISHED);
753 assert((eventFlags_ & EventHandler::WRITE) == 0);
754 if (!updateEventRegistration(EventHandler::WRITE, 0)) {
755 assert(state_ == StateEnum::ERROR);
758 if (sendTimeout_ > 0) {
759 // Schedule a timeout to fire if the write takes too long.
760 if (!writeTimeout_.scheduleTimeout(sendTimeout_)) {
761 AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
762 withAddr("failed to schedule send timeout"));
763 return failWrite(__func__, ex);
769 void AsyncSocket::writeRequest(WriteRequest* req) {
770 if (writeReqTail_ == nullptr) {
771 assert(writeReqHead_ == nullptr);
772 writeReqHead_ = writeReqTail_ = req;
775 writeReqTail_->append(req);
780 void AsyncSocket::close() {
781 VLOG(5) << "AsyncSocket::close(): this=" << this << ", fd_=" << fd_
782 << ", state=" << state_ << ", shutdownFlags="
783 << std::hex << (int) shutdownFlags_;
785 // close() is only different from closeNow() when there are pending writes
786 // that need to drain before we can close. In all other cases, just call
789 // Note that writeReqHead_ can be non-nullptr even in STATE_CLOSED or
790 // STATE_ERROR if close() is invoked while a previous closeNow() or failure
791 // is still running. (e.g., If there are multiple pending writes, and we
792 // call writeError() on the first one, it may call close(). In this case we
793 // will already be in STATE_CLOSED or STATE_ERROR, but the remaining pending
794 // writes will still be in the queue.)
796 // We only need to drain pending writes if we are still in STATE_CONNECTING
797 // or STATE_ESTABLISHED
798 if ((writeReqHead_ == nullptr) ||
799 !(state_ == StateEnum::CONNECTING ||
800 state_ == StateEnum::ESTABLISHED)) {
805 // Declare a DestructorGuard to ensure that the AsyncSocket cannot be
806 // destroyed until close() returns.
807 DestructorGuard dg(this);
808 assert(eventBase_->isInEventBaseThread());
810 // Since there are write requests pending, we have to set the
811 // SHUT_WRITE_PENDING flag, and wait to perform the real close until the
812 // connect finishes and we finish writing these requests.
814 // Set SHUT_READ to indicate that reads are shut down, and set the
815 // SHUT_WRITE_PENDING flag to mark that we want to shutdown once the
816 // pending writes complete.
817 shutdownFlags_ |= (SHUT_READ | SHUT_WRITE_PENDING);
819 // If a read callback is set, invoke readEOF() immediately to inform it that
820 // the socket has been closed and no more data can be read.
822 // Disable reads if they are enabled
823 if (!updateEventRegistration(0, EventHandler::READ)) {
824 // We're now in the error state; callbacks have been cleaned up
825 assert(state_ == StateEnum::ERROR);
826 assert(readCallback_ == nullptr);
828 ReadCallback* callback = readCallback_;
829 readCallback_ = nullptr;
835 void AsyncSocket::closeNow() {
836 VLOG(5) << "AsyncSocket::closeNow(): this=" << this << ", fd_=" << fd_
837 << ", state=" << state_ << ", shutdownFlags="
838 << std::hex << (int) shutdownFlags_;
839 DestructorGuard dg(this);
840 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
843 case StateEnum::ESTABLISHED:
844 case StateEnum::CONNECTING:
846 shutdownFlags_ |= (SHUT_READ | SHUT_WRITE);
847 state_ = StateEnum::CLOSED;
849 // If the write timeout was set, cancel it.
850 writeTimeout_.cancelTimeout();
852 // If we are registered for I/O events, unregister.
853 if (eventFlags_ != EventHandler::NONE) {
854 eventFlags_ = EventHandler::NONE;
855 if (!updateEventRegistration()) {
856 // We will have been moved into the error state.
857 assert(state_ == StateEnum::ERROR);
862 if (immediateReadHandler_.isLoopCallbackScheduled()) {
863 immediateReadHandler_.cancelLoopCallback();
867 ioHandler_.changeHandlerFD(-1);
871 invokeConnectErr(socketClosedLocallyEx);
873 failAllWrites(socketClosedLocallyEx);
876 ReadCallback* callback = readCallback_;
877 readCallback_ = nullptr;
882 case StateEnum::CLOSED:
883 // Do nothing. It's possible that we are being called recursively
884 // from inside a callback that we invoked inside another call to close()
885 // that is still running.
887 case StateEnum::ERROR:
888 // Do nothing. The error handling code has performed (or is performing)
891 case StateEnum::UNINIT:
892 assert(eventFlags_ == EventHandler::NONE);
893 assert(connectCallback_ == nullptr);
894 assert(readCallback_ == nullptr);
895 assert(writeReqHead_ == nullptr);
896 shutdownFlags_ |= (SHUT_READ | SHUT_WRITE);
897 state_ = StateEnum::CLOSED;
901 LOG(DFATAL) << "AsyncSocket::closeNow() (this=" << this << ", fd=" << fd_
902 << ") called in unknown state " << state_;
905 void AsyncSocket::closeWithReset() {
906 // Enable SO_LINGER, with the linger timeout set to 0.
907 // This will trigger a TCP reset when we close the socket.
909 struct linger optLinger = {1, 0};
910 if (setSockOpt(SOL_SOCKET, SO_LINGER, &optLinger) != 0) {
911 VLOG(2) << "AsyncSocket::closeWithReset(): error setting SO_LINGER "
912 << "on " << fd_ << ": errno=" << errno;
916 // Then let closeNow() take care of the rest
920 void AsyncSocket::shutdownWrite() {
921 VLOG(5) << "AsyncSocket::shutdownWrite(): this=" << this << ", fd=" << fd_
922 << ", state=" << state_ << ", shutdownFlags="
923 << std::hex << (int) shutdownFlags_;
925 // If there are no pending writes, shutdownWrite() is identical to
926 // shutdownWriteNow().
927 if (writeReqHead_ == nullptr) {
932 assert(eventBase_->isInEventBaseThread());
934 // There are pending writes. Set SHUT_WRITE_PENDING so that the actual
935 // shutdown will be performed once all writes complete.
936 shutdownFlags_ |= SHUT_WRITE_PENDING;
939 void AsyncSocket::shutdownWriteNow() {
940 VLOG(5) << "AsyncSocket::shutdownWriteNow(): this=" << this
941 << ", fd=" << fd_ << ", state=" << state_
942 << ", shutdownFlags=" << std::hex << (int) shutdownFlags_;
944 if (shutdownFlags_ & SHUT_WRITE) {
945 // Writes are already shutdown; nothing else to do.
949 // If SHUT_READ is already set, just call closeNow() to completely
950 // close the socket. This can happen if close() was called with writes
951 // pending, and then shutdownWriteNow() is called before all pending writes
953 if (shutdownFlags_ & SHUT_READ) {
958 DestructorGuard dg(this);
959 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
961 switch (static_cast<StateEnum>(state_)) {
962 case StateEnum::ESTABLISHED:
964 shutdownFlags_ |= SHUT_WRITE;
966 // If the write timeout was set, cancel it.
967 writeTimeout_.cancelTimeout();
969 // If we are registered for write events, unregister.
970 if (!updateEventRegistration(0, EventHandler::WRITE)) {
971 // We will have been moved into the error state.
972 assert(state_ == StateEnum::ERROR);
976 // Shutdown writes on the file descriptor
977 ::shutdown(fd_, SHUT_WR);
979 // Immediately fail all write requests
980 failAllWrites(socketShutdownForWritesEx);
983 case StateEnum::CONNECTING:
985 // Set the SHUT_WRITE_PENDING flag.
986 // When the connection completes, it will check this flag,
987 // shutdown the write half of the socket, and then set SHUT_WRITE.
988 shutdownFlags_ |= SHUT_WRITE_PENDING;
990 // Immediately fail all write requests
991 failAllWrites(socketShutdownForWritesEx);
994 case StateEnum::UNINIT:
995 // Callers normally shouldn't call shutdownWriteNow() before the socket
996 // even starts connecting. Nonetheless, go ahead and set
997 // SHUT_WRITE_PENDING. Once the socket eventually connects it will
998 // immediately shut down the write side of the socket.
999 shutdownFlags_ |= SHUT_WRITE_PENDING;
1001 case StateEnum::CLOSED:
1002 case StateEnum::ERROR:
1003 // We should never get here. SHUT_WRITE should always be set
1004 // in STATE_CLOSED and STATE_ERROR.
1005 VLOG(4) << "AsyncSocket::shutdownWriteNow() (this=" << this
1006 << ", fd=" << fd_ << ") in unexpected state " << state_
1007 << " with SHUT_WRITE not set ("
1008 << std::hex << (int) shutdownFlags_ << ")";
1013 LOG(DFATAL) << "AsyncSocket::shutdownWriteNow() (this=" << this << ", fd="
1014 << fd_ << ") called in unknown state " << state_;
1017 bool AsyncSocket::readable() const {
1021 struct pollfd fds[1];
1023 fds[0].events = POLLIN;
1025 int rc = poll(fds, 1, 0);
1029 bool AsyncSocket::isPending() const {
1030 return ioHandler_.isPending();
1033 bool AsyncSocket::hangup() const {
1035 // sanity check, no one should ask for hangup if we are not connected.
1039 #ifdef POLLRDHUP // Linux-only
1040 struct pollfd fds[1];
1042 fds[0].events = POLLRDHUP|POLLHUP;
1045 return (fds[0].revents & (POLLRDHUP|POLLHUP)) != 0;
1051 bool AsyncSocket::good() const {
1052 return ((state_ == StateEnum::CONNECTING ||
1053 state_ == StateEnum::ESTABLISHED) &&
1054 (shutdownFlags_ == 0) && (eventBase_ != nullptr));
1057 bool AsyncSocket::error() const {
1058 return (state_ == StateEnum::ERROR);
1061 void AsyncSocket::attachEventBase(EventBase* eventBase) {
1062 VLOG(5) << "AsyncSocket::attachEventBase(this=" << this << ", fd=" << fd_
1063 << ", old evb=" << eventBase_ << ", new evb=" << eventBase
1064 << ", state=" << state_ << ", events="
1065 << std::hex << eventFlags_ << ")";
1066 assert(eventBase_ == nullptr);
1067 assert(eventBase->isInEventBaseThread());
1069 eventBase_ = eventBase;
1070 ioHandler_.attachEventBase(eventBase);
1071 writeTimeout_.attachEventBase(eventBase);
1074 void AsyncSocket::detachEventBase() {
1075 VLOG(5) << "AsyncSocket::detachEventBase(this=" << this << ", fd=" << fd_
1076 << ", old evb=" << eventBase_ << ", state=" << state_
1077 << ", events=" << std::hex << eventFlags_ << ")";
1078 assert(eventBase_ != nullptr);
1079 assert(eventBase_->isInEventBaseThread());
1081 eventBase_ = nullptr;
1082 ioHandler_.detachEventBase();
1083 writeTimeout_.detachEventBase();
1086 bool AsyncSocket::isDetachable() const {
1087 DCHECK(eventBase_ != nullptr);
1088 DCHECK(eventBase_->isInEventBaseThread());
1090 return !ioHandler_.isHandlerRegistered() && !writeTimeout_.isScheduled();
1093 void AsyncSocket::getLocalAddress(folly::SocketAddress* address) const {
1094 if (!localAddr_.isInitialized()) {
1095 localAddr_.setFromLocalAddress(fd_);
1097 *address = localAddr_;
1100 void AsyncSocket::getPeerAddress(folly::SocketAddress* address) const {
1101 if (!addr_.isInitialized()) {
1102 addr_.setFromPeerAddress(fd_);
1107 int AsyncSocket::setNoDelay(bool noDelay) {
1109 VLOG(4) << "AsyncSocket::setNoDelay() called on non-open socket "
1110 << this << "(state=" << state_ << ")";
1115 int value = noDelay ? 1 : 0;
1116 if (setsockopt(fd_, IPPROTO_TCP, TCP_NODELAY, &value, sizeof(value)) != 0) {
1117 int errnoCopy = errno;
1118 VLOG(2) << "failed to update TCP_NODELAY option on AsyncSocket "
1119 << this << " (fd=" << fd_ << ", state=" << state_ << "): "
1120 << strerror(errnoCopy);
1127 int AsyncSocket::setCongestionFlavor(const std::string &cname) {
1129 #ifndef TCP_CONGESTION
1130 #define TCP_CONGESTION 13
1134 VLOG(4) << "AsyncSocket::setCongestionFlavor() called on non-open "
1135 << "socket " << this << "(state=" << state_ << ")";
1140 if (setsockopt(fd_, IPPROTO_TCP, TCP_CONGESTION, cname.c_str(),
1141 cname.length() + 1) != 0) {
1142 int errnoCopy = errno;
1143 VLOG(2) << "failed to update TCP_CONGESTION option on AsyncSocket "
1144 << this << "(fd=" << fd_ << ", state=" << state_ << "): "
1145 << strerror(errnoCopy);
1152 int AsyncSocket::setQuickAck(bool quickack) {
1154 VLOG(4) << "AsyncSocket::setQuickAck() called on non-open socket "
1155 << this << "(state=" << state_ << ")";
1160 #ifdef TCP_QUICKACK // Linux-only
1161 int value = quickack ? 1 : 0;
1162 if (setsockopt(fd_, IPPROTO_TCP, TCP_QUICKACK, &value, sizeof(value)) != 0) {
1163 int errnoCopy = errno;
1164 VLOG(2) << "failed to update TCP_QUICKACK option on AsyncSocket"
1165 << this << "(fd=" << fd_ << ", state=" << state_ << "): "
1166 << strerror(errnoCopy);
1176 int AsyncSocket::setSendBufSize(size_t bufsize) {
1178 VLOG(4) << "AsyncSocket::setSendBufSize() called on non-open socket "
1179 << this << "(state=" << state_ << ")";
1183 if (setsockopt(fd_, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof(bufsize)) !=0) {
1184 int errnoCopy = errno;
1185 VLOG(2) << "failed to update SO_SNDBUF option on AsyncSocket"
1186 << this << "(fd=" << fd_ << ", state=" << state_ << "): "
1187 << strerror(errnoCopy);
1194 int AsyncSocket::setRecvBufSize(size_t bufsize) {
1196 VLOG(4) << "AsyncSocket::setRecvBufSize() called on non-open socket "
1197 << this << "(state=" << state_ << ")";
1201 if (setsockopt(fd_, SOL_SOCKET, SO_RCVBUF, &bufsize, sizeof(bufsize)) !=0) {
1202 int errnoCopy = errno;
1203 VLOG(2) << "failed to update SO_RCVBUF option on AsyncSocket"
1204 << this << "(fd=" << fd_ << ", state=" << state_ << "): "
1205 << strerror(errnoCopy);
1212 int AsyncSocket::setTCPProfile(int profd) {
1214 VLOG(4) << "AsyncSocket::setTCPProfile() called on non-open socket "
1215 << this << "(state=" << state_ << ")";
1219 if (setsockopt(fd_, SOL_SOCKET, SO_SET_NAMESPACE, &profd, sizeof(int)) !=0) {
1220 int errnoCopy = errno;
1221 VLOG(2) << "failed to set socket namespace option on AsyncSocket"
1222 << this << "(fd=" << fd_ << ", state=" << state_ << "): "
1223 << strerror(errnoCopy);
1230 void AsyncSocket::ioReady(uint16_t events) noexcept {
1231 VLOG(7) << "AsyncSocket::ioRead() this=" << this << ", fd" << fd_
1232 << ", events=" << std::hex << events << ", state=" << state_;
1233 DestructorGuard dg(this);
1234 assert(events & EventHandler::READ_WRITE);
1235 assert(eventBase_->isInEventBaseThread());
1237 uint16_t relevantEvents = events & EventHandler::READ_WRITE;
1238 if (relevantEvents == EventHandler::READ) {
1240 } else if (relevantEvents == EventHandler::WRITE) {
1242 } else if (relevantEvents == EventHandler::READ_WRITE) {
1243 EventBase* originalEventBase = eventBase_;
1244 // If both read and write events are ready, process writes first.
1247 // Return now if handleWrite() detached us from our EventBase
1248 if (eventBase_ != originalEventBase) {
1252 // Only call handleRead() if a read callback is still installed.
1253 // (It's possible that the read callback was uninstalled during
1255 if (readCallback_) {
1259 VLOG(4) << "AsyncSocket::ioRead() called with unexpected events "
1260 << std::hex << events << "(this=" << this << ")";
1265 AsyncSocket::ReadResult
1266 AsyncSocket::performRead(void** buf, size_t* buflen, size_t* /* offset */) {
1267 VLOG(5) << "AsyncSocket::performRead() this=" << this << ", buf=" << *buf
1268 << ", buflen=" << *buflen;
1272 recvFlags |= MSG_PEEK;
1275 ssize_t bytes = recv(fd_, *buf, *buflen, MSG_DONTWAIT | recvFlags);
1277 if (errno == EAGAIN || errno == EWOULDBLOCK) {
1278 // No more data to read right now.
1279 return ReadResult(READ_BLOCKING);
1281 return ReadResult(READ_ERROR);
1284 appBytesReceived_ += bytes;
1285 return ReadResult(bytes);
1289 void AsyncSocket::prepareReadBuffer(void** buf, size_t* buflen) noexcept {
1290 // no matter what, buffer should be preapared for non-ssl socket
1291 CHECK(readCallback_);
1292 readCallback_->getReadBuffer(buf, buflen);
1295 void AsyncSocket::handleRead() noexcept {
1296 VLOG(5) << "AsyncSocket::handleRead() this=" << this << ", fd=" << fd_
1297 << ", state=" << state_;
1298 assert(state_ == StateEnum::ESTABLISHED);
1299 assert((shutdownFlags_ & SHUT_READ) == 0);
1300 assert(readCallback_ != nullptr);
1301 assert(eventFlags_ & EventHandler::READ);
1304 // - a read attempt would block
1305 // - readCallback_ is uninstalled
1306 // - the number of loop iterations exceeds the optional maximum
1307 // - this AsyncSocket is moved to another EventBase
1309 // When we invoke readDataAvailable() it may uninstall the readCallback_,
1310 // which is why need to check for it here.
1312 // The last bullet point is slightly subtle. readDataAvailable() may also
1313 // detach this socket from this EventBase. However, before
1314 // readDataAvailable() returns another thread may pick it up, attach it to
1315 // a different EventBase, and install another readCallback_. We need to
1316 // exit immediately after readDataAvailable() returns if the eventBase_ has
1317 // changed. (The caller must perform some sort of locking to transfer the
1318 // AsyncSocket between threads properly. This will be sufficient to ensure
1319 // that this thread sees the updated eventBase_ variable after
1320 // readDataAvailable() returns.)
1321 uint16_t numReads = 0;
1322 EventBase* originalEventBase = eventBase_;
1323 while (readCallback_ && eventBase_ == originalEventBase) {
1324 // Get the buffer to read into.
1325 void* buf = nullptr;
1326 size_t buflen = 0, offset = 0;
1328 prepareReadBuffer(&buf, &buflen);
1329 VLOG(5) << "prepareReadBuffer() buf=" << buf << ", buflen=" << buflen;
1330 } catch (const AsyncSocketException& ex) {
1331 return failRead(__func__, ex);
1332 } catch (const std::exception& ex) {
1333 AsyncSocketException tex(AsyncSocketException::BAD_ARGS,
1334 string("ReadCallback::getReadBuffer() "
1335 "threw exception: ") +
1337 return failRead(__func__, tex);
1339 AsyncSocketException ex(AsyncSocketException::BAD_ARGS,
1340 "ReadCallback::getReadBuffer() threw "
1341 "non-exception type");
1342 return failRead(__func__, ex);
1344 if (!isBufferMovable_ && (buf == nullptr || buflen == 0)) {
1345 AsyncSocketException ex(AsyncSocketException::BAD_ARGS,
1346 "ReadCallback::getReadBuffer() returned "
1348 return failRead(__func__, ex);
1352 auto readResult = performRead(&buf, &buflen, &offset);
1353 auto bytesRead = readResult.readReturn;
1354 VLOG(4) << "this=" << this << ", AsyncSocket::handleRead() got "
1355 << bytesRead << " bytes";
1356 if (bytesRead > 0) {
1357 if (!isBufferMovable_) {
1358 readCallback_->readDataAvailable(bytesRead);
1360 CHECK(kOpenSslModeMoveBufferOwnership);
1361 VLOG(5) << "this=" << this << ", AsyncSocket::handleRead() got "
1362 << "buf=" << buf << ", " << bytesRead << "/" << buflen
1363 << ", offset=" << offset;
1364 auto readBuf = folly::IOBuf::takeOwnership(buf, buflen);
1365 readBuf->trimStart(offset);
1366 readBuf->trimEnd(buflen - offset - bytesRead);
1367 readCallback_->readBufferAvailable(std::move(readBuf));
1370 // Fall through and continue around the loop if the read
1371 // completely filled the available buffer.
1372 // Note that readCallback_ may have been uninstalled or changed inside
1373 // readDataAvailable().
1374 if (size_t(bytesRead) < buflen) {
1377 } else if (bytesRead == READ_BLOCKING) {
1378 // No more data to read right now.
1380 } else if (bytesRead == READ_ERROR) {
1381 readErr_ = READ_ERROR;
1382 if (readResult.exception) {
1383 return failRead(__func__, *readResult.exception);
1385 auto errnoCopy = errno;
1386 AsyncSocketException ex(
1387 AsyncSocketException::INTERNAL_ERROR,
1388 withAddr("recv() failed"),
1390 return failRead(__func__, ex);
1392 assert(bytesRead == READ_EOF);
1393 readErr_ = READ_EOF;
1395 shutdownFlags_ |= SHUT_READ;
1396 if (!updateEventRegistration(0, EventHandler::READ)) {
1397 // we've already been moved into STATE_ERROR
1398 assert(state_ == StateEnum::ERROR);
1399 assert(readCallback_ == nullptr);
1403 ReadCallback* callback = readCallback_;
1404 readCallback_ = nullptr;
1405 callback->readEOF();
1408 if (maxReadsPerEvent_ && (++numReads >= maxReadsPerEvent_)) {
1409 if (readCallback_ != nullptr) {
1410 // We might still have data in the socket.
1411 // (e.g. see comment in AsyncSSLSocket::checkForImmediateRead)
1412 scheduleImmediateRead();
1420 * This function attempts to write as much data as possible, until no more data
1423 * - If it sends all available data, it unregisters for write events, and stops
1424 * the writeTimeout_.
1426 * - If not all of the data can be sent immediately, it reschedules
1427 * writeTimeout_ (if a non-zero timeout is set), and ensures the handler is
1428 * registered for write events.
1430 void AsyncSocket::handleWrite() noexcept {
1431 VLOG(5) << "AsyncSocket::handleWrite() this=" << this << ", fd=" << fd_
1432 << ", state=" << state_;
1433 DestructorGuard dg(this);
1435 if (state_ == StateEnum::CONNECTING) {
1441 assert(state_ == StateEnum::ESTABLISHED);
1442 assert((shutdownFlags_ & SHUT_WRITE) == 0);
1443 assert(writeReqHead_ != nullptr);
1445 // Loop until we run out of write requests,
1446 // or until this socket is moved to another EventBase.
1447 // (See the comment in handleRead() explaining how this can happen.)
1448 EventBase* originalEventBase = eventBase_;
1449 while (writeReqHead_ != nullptr && eventBase_ == originalEventBase) {
1450 auto writeResult = writeReqHead_->performWrite();
1451 if (writeResult.writeReturn < 0) {
1452 if (writeResult.exception) {
1453 return failWrite(__func__, *writeResult.exception);
1455 auto errnoCopy = errno;
1456 AsyncSocketException ex(
1457 AsyncSocketException::INTERNAL_ERROR,
1458 withAddr("writev() failed"),
1460 return failWrite(__func__, ex);
1461 } else if (writeReqHead_->isComplete()) {
1462 // We finished this request
1463 WriteRequest* req = writeReqHead_;
1464 writeReqHead_ = req->getNext();
1466 if (writeReqHead_ == nullptr) {
1467 writeReqTail_ = nullptr;
1468 // This is the last write request.
1469 // Unregister for write events and cancel the send timer
1470 // before we invoke the callback. We have to update the state properly
1471 // before calling the callback, since it may want to detach us from
1473 if (eventFlags_ & EventHandler::WRITE) {
1474 if (!updateEventRegistration(0, EventHandler::WRITE)) {
1475 assert(state_ == StateEnum::ERROR);
1478 // Stop the send timeout
1479 writeTimeout_.cancelTimeout();
1481 assert(!writeTimeout_.isScheduled());
1483 // If SHUT_WRITE_PENDING is set, we should shutdown the socket after
1484 // we finish sending the last write request.
1486 // We have to do this before invoking writeSuccess(), since
1487 // writeSuccess() may detach us from our EventBase.
1488 if (shutdownFlags_ & SHUT_WRITE_PENDING) {
1489 assert(connectCallback_ == nullptr);
1490 shutdownFlags_ |= SHUT_WRITE;
1492 if (shutdownFlags_ & SHUT_READ) {
1493 // Reads have already been shutdown. Fully close the socket and
1494 // move to STATE_CLOSED.
1496 // Note: This code currently moves us to STATE_CLOSED even if
1497 // close() hasn't ever been called. This can occur if we have
1498 // received EOF from the peer and shutdownWrite() has been called
1499 // locally. Should we bother staying in STATE_ESTABLISHED in this
1500 // case, until close() is actually called? I can't think of a
1501 // reason why we would need to do so. No other operations besides
1502 // calling close() or destroying the socket can be performed at
1504 assert(readCallback_ == nullptr);
1505 state_ = StateEnum::CLOSED;
1507 ioHandler_.changeHandlerFD(-1);
1511 // Reads are still enabled, so we are only doing a half-shutdown
1512 ::shutdown(fd_, SHUT_WR);
1517 // Invoke the callback
1518 WriteCallback* callback = req->getCallback();
1521 callback->writeSuccess();
1523 // We'll continue around the loop, trying to write another request
1526 if (bufferCallback_) {
1527 bufferCallback_->onEgressBuffered();
1529 writeReqHead_->consume();
1530 // Stop after a partial write; it's highly likely that a subsequent write
1531 // attempt will just return EAGAIN.
1533 // Ensure that we are registered for write events.
1534 if ((eventFlags_ & EventHandler::WRITE) == 0) {
1535 if (!updateEventRegistration(EventHandler::WRITE, 0)) {
1536 assert(state_ == StateEnum::ERROR);
1541 // Reschedule the send timeout, since we have made some write progress.
1542 if (sendTimeout_ > 0) {
1543 if (!writeTimeout_.scheduleTimeout(sendTimeout_)) {
1544 AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
1545 withAddr("failed to reschedule write timeout"));
1546 return failWrite(__func__, ex);
1552 if (!writeReqHead_ && bufferCallback_) {
1553 bufferCallback_->onEgressBufferCleared();
1557 void AsyncSocket::checkForImmediateRead() noexcept {
1558 // We currently don't attempt to perform optimistic reads in AsyncSocket.
1559 // (However, note that some subclasses do override this method.)
1561 // Simply calling handleRead() here would be bad, as this would call
1562 // readCallback_->getReadBuffer(), forcing the callback to allocate a read
1563 // buffer even though no data may be available. This would waste lots of
1564 // memory, since the buffer will sit around unused until the socket actually
1565 // becomes readable.
1567 // Checking if the socket is readable now also seems like it would probably
1568 // be a pessimism. In most cases it probably wouldn't be readable, and we
1569 // would just waste an extra system call. Even if it is readable, waiting to
1570 // find out from libevent on the next event loop doesn't seem that bad.
1573 void AsyncSocket::handleInitialReadWrite() noexcept {
1574 // Our callers should already be holding a DestructorGuard, but grab
1575 // one here just to make sure, in case one of our calling code paths ever
1577 DestructorGuard dg(this);
1579 // If we have a readCallback_, make sure we enable read events. We
1580 // may already be registered for reads if connectSuccess() set
1581 // the read calback.
1582 if (readCallback_ && !(eventFlags_ & EventHandler::READ)) {
1583 assert(state_ == StateEnum::ESTABLISHED);
1584 assert((shutdownFlags_ & SHUT_READ) == 0);
1585 if (!updateEventRegistration(EventHandler::READ, 0)) {
1586 assert(state_ == StateEnum::ERROR);
1589 checkForImmediateRead();
1590 } else if (readCallback_ == nullptr) {
1591 // Unregister for read events.
1592 updateEventRegistration(0, EventHandler::READ);
1595 // If we have write requests pending, try to send them immediately.
1596 // Since we just finished accepting, there is a very good chance that we can
1597 // write without blocking.
1599 // However, we only process them if EventHandler::WRITE is not already set,
1600 // which means that we're already blocked on a write attempt. (This can
1601 // happen if connectSuccess() called write() before returning.)
1602 if (writeReqHead_ && !(eventFlags_ & EventHandler::WRITE)) {
1603 // Call handleWrite() to perform write processing.
1605 } else if (writeReqHead_ == nullptr) {
1606 // Unregister for write event.
1607 updateEventRegistration(0, EventHandler::WRITE);
1611 void AsyncSocket::handleConnect() noexcept {
1612 VLOG(5) << "AsyncSocket::handleConnect() this=" << this << ", fd=" << fd_
1613 << ", state=" << state_;
1614 assert(state_ == StateEnum::CONNECTING);
1615 // SHUT_WRITE can never be set while we are still connecting;
1616 // SHUT_WRITE_PENDING may be set, be we only set SHUT_WRITE once the connect
1618 assert((shutdownFlags_ & SHUT_WRITE) == 0);
1620 // In case we had a connect timeout, cancel the timeout
1621 writeTimeout_.cancelTimeout();
1622 // We don't use a persistent registration when waiting on a connect event,
1623 // so we have been automatically unregistered now. Update eventFlags_ to
1625 assert(eventFlags_ == EventHandler::WRITE);
1626 eventFlags_ = EventHandler::NONE;
1628 // Call getsockopt() to check if the connect succeeded
1630 socklen_t len = sizeof(error);
1631 int rv = getsockopt(fd_, SOL_SOCKET, SO_ERROR, &error, &len);
1633 auto errnoCopy = errno;
1634 AsyncSocketException ex(
1635 AsyncSocketException::INTERNAL_ERROR,
1636 withAddr("error calling getsockopt() after connect"),
1638 VLOG(4) << "AsyncSocket::handleConnect(this=" << this << ", fd="
1639 << fd_ << " host=" << addr_.describe()
1640 << ") exception:" << ex.what();
1641 return failConnect(__func__, ex);
1645 AsyncSocketException ex(AsyncSocketException::NOT_OPEN,
1646 "connect failed", error);
1647 VLOG(1) << "AsyncSocket::handleConnect(this=" << this << ", fd="
1648 << fd_ << " host=" << addr_.describe()
1649 << ") exception: " << ex.what();
1650 return failConnect(__func__, ex);
1653 // Move into STATE_ESTABLISHED
1654 state_ = StateEnum::ESTABLISHED;
1656 // If SHUT_WRITE_PENDING is set and we don't have any write requests to
1657 // perform, immediately shutdown the write half of the socket.
1658 if ((shutdownFlags_ & SHUT_WRITE_PENDING) && writeReqHead_ == nullptr) {
1659 // SHUT_READ shouldn't be set. If close() is called on the socket while we
1660 // are still connecting we just abort the connect rather than waiting for
1662 assert((shutdownFlags_ & SHUT_READ) == 0);
1663 ::shutdown(fd_, SHUT_WR);
1664 shutdownFlags_ |= SHUT_WRITE;
1667 VLOG(7) << "AsyncSocket " << this << ": fd " << fd_
1668 << "successfully connected; state=" << state_;
1670 // Remember the EventBase we are attached to, before we start invoking any
1671 // callbacks (since the callbacks may call detachEventBase()).
1672 EventBase* originalEventBase = eventBase_;
1674 invokeConnectSuccess();
1675 // Note that the connect callback may have changed our state.
1676 // (set or unset the read callback, called write(), closed the socket, etc.)
1677 // The following code needs to handle these situations correctly.
1679 // If the socket has been closed, readCallback_ and writeReqHead_ will
1680 // always be nullptr, so that will prevent us from trying to read or write.
1682 // The main thing to check for is if eventBase_ is still originalEventBase.
1683 // If not, we have been detached from this event base, so we shouldn't
1684 // perform any more operations.
1685 if (eventBase_ != originalEventBase) {
1689 handleInitialReadWrite();
1692 void AsyncSocket::timeoutExpired() noexcept {
1693 VLOG(7) << "AsyncSocket " << this << ", fd " << fd_ << ": timeout expired: "
1694 << "state=" << state_ << ", events=" << std::hex << eventFlags_;
1695 DestructorGuard dg(this);
1696 assert(eventBase_->isInEventBaseThread());
1698 if (state_ == StateEnum::CONNECTING) {
1699 // connect() timed out
1700 // Unregister for I/O events.
1701 AsyncSocketException ex(AsyncSocketException::TIMED_OUT,
1702 "connect timed out");
1703 failConnect(__func__, ex);
1705 // a normal write operation timed out
1706 assert(state_ == StateEnum::ESTABLISHED);
1707 AsyncSocketException ex(AsyncSocketException::TIMED_OUT, "write timed out");
1708 failWrite(__func__, ex);
1712 AsyncSocket::WriteResult AsyncSocket::performWrite(
1716 uint32_t* countWritten,
1717 uint32_t* partialWritten) {
1718 // We use sendmsg() instead of writev() so that we can pass in MSG_NOSIGNAL
1719 // We correctly handle EPIPE errors, so we never want to receive SIGPIPE
1720 // (since it may terminate the program if the main program doesn't explicitly
1723 msg.msg_name = nullptr;
1724 msg.msg_namelen = 0;
1725 msg.msg_iov = const_cast<iovec *>(vec);
1726 msg.msg_iovlen = std::min<size_t>(count, kIovMax);
1727 msg.msg_control = nullptr;
1728 msg.msg_controllen = 0;
1731 int msg_flags = MSG_DONTWAIT;
1733 #ifdef MSG_NOSIGNAL // Linux-only
1734 msg_flags |= MSG_NOSIGNAL;
1735 if (isSet(flags, WriteFlags::CORK)) {
1736 // MSG_MORE tells the kernel we have more data to send, so wait for us to
1737 // give it the rest of the data rather than immediately sending a partial
1738 // frame, even when TCP_NODELAY is enabled.
1739 msg_flags |= MSG_MORE;
1742 if (isSet(flags, WriteFlags::EOR)) {
1743 // marks that this is the last byte of a record (response)
1744 msg_flags |= MSG_EOR;
1746 ssize_t totalWritten = ::sendmsg(fd_, &msg, msg_flags);
1747 if (totalWritten < 0) {
1748 if (errno == EAGAIN) {
1749 // TCP buffer is full; we can't write any more data right now.
1751 *partialWritten = 0;
1752 return WriteResult(0);
1756 *partialWritten = 0;
1757 return WriteResult(WRITE_ERROR);
1760 appBytesWritten_ += totalWritten;
1762 uint32_t bytesWritten;
1764 for (bytesWritten = totalWritten, n = 0; n < count; ++n) {
1765 const iovec* v = vec + n;
1766 if (v->iov_len > bytesWritten) {
1767 // Partial write finished in the middle of this iovec
1769 *partialWritten = bytesWritten;
1770 return WriteResult(totalWritten);
1773 bytesWritten -= v->iov_len;
1776 assert(bytesWritten == 0);
1778 *partialWritten = 0;
1779 return WriteResult(totalWritten);
1783 * Re-register the EventHandler after eventFlags_ has changed.
1785 * If an error occurs, fail() is called to move the socket into the error state
1786 * and call all currently installed callbacks. After an error, the
1787 * AsyncSocket is completely unregistered.
1789 * @return Returns true on succcess, or false on error.
1791 bool AsyncSocket::updateEventRegistration() {
1792 VLOG(5) << "AsyncSocket::updateEventRegistration(this=" << this
1793 << ", fd=" << fd_ << ", evb=" << eventBase_ << ", state=" << state_
1794 << ", events=" << std::hex << eventFlags_;
1795 assert(eventBase_->isInEventBaseThread());
1796 if (eventFlags_ == EventHandler::NONE) {
1797 ioHandler_.unregisterHandler();
1801 // Always register for persistent events, so we don't have to re-register
1802 // after being called back.
1803 if (!ioHandler_.registerHandler(eventFlags_ | EventHandler::PERSIST)) {
1804 eventFlags_ = EventHandler::NONE; // we're not registered after error
1805 AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
1806 withAddr("failed to update AsyncSocket event registration"));
1807 fail("updateEventRegistration", ex);
1814 bool AsyncSocket::updateEventRegistration(uint16_t enable,
1816 uint16_t oldFlags = eventFlags_;
1817 eventFlags_ |= enable;
1818 eventFlags_ &= ~disable;
1819 if (eventFlags_ == oldFlags) {
1822 return updateEventRegistration();
1826 void AsyncSocket::startFail() {
1827 // startFail() should only be called once
1828 assert(state_ != StateEnum::ERROR);
1829 assert(getDestructorGuardCount() > 0);
1830 state_ = StateEnum::ERROR;
1831 // Ensure that SHUT_READ and SHUT_WRITE are set,
1832 // so all future attempts to read or write will be rejected
1833 shutdownFlags_ |= (SHUT_READ | SHUT_WRITE);
1835 if (eventFlags_ != EventHandler::NONE) {
1836 eventFlags_ = EventHandler::NONE;
1837 ioHandler_.unregisterHandler();
1839 writeTimeout_.cancelTimeout();
1842 ioHandler_.changeHandlerFD(-1);
1847 void AsyncSocket::finishFail() {
1848 assert(state_ == StateEnum::ERROR);
1849 assert(getDestructorGuardCount() > 0);
1851 AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
1852 withAddr("socket closing after error"));
1853 invokeConnectErr(ex);
1856 if (readCallback_) {
1857 ReadCallback* callback = readCallback_;
1858 readCallback_ = nullptr;
1859 callback->readErr(ex);
1863 void AsyncSocket::fail(const char* fn, const AsyncSocketException& ex) {
1864 VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
1865 << state_ << " host=" << addr_.describe()
1866 << "): failed in " << fn << "(): "
1872 void AsyncSocket::failConnect(const char* fn, const AsyncSocketException& ex) {
1873 VLOG(5) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
1874 << state_ << " host=" << addr_.describe()
1875 << "): failed while connecting in " << fn << "(): "
1879 invokeConnectErr(ex);
1883 void AsyncSocket::failRead(const char* fn, const AsyncSocketException& ex) {
1884 VLOG(5) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
1885 << state_ << " host=" << addr_.describe()
1886 << "): failed while reading in " << fn << "(): "
1890 if (readCallback_ != nullptr) {
1891 ReadCallback* callback = readCallback_;
1892 readCallback_ = nullptr;
1893 callback->readErr(ex);
1899 void AsyncSocket::failWrite(const char* fn, const AsyncSocketException& ex) {
1900 VLOG(5) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
1901 << state_ << " host=" << addr_.describe()
1902 << "): failed while writing in " << fn << "(): "
1906 // Only invoke the first write callback, since the error occurred while
1907 // writing this request. Let any other pending write callbacks be invoked in
1909 if (writeReqHead_ != nullptr) {
1910 WriteRequest* req = writeReqHead_;
1911 writeReqHead_ = req->getNext();
1912 WriteCallback* callback = req->getCallback();
1913 uint32_t bytesWritten = req->getTotalBytesWritten();
1916 callback->writeErr(bytesWritten, ex);
1923 void AsyncSocket::failWrite(const char* fn, WriteCallback* callback,
1924 size_t bytesWritten,
1925 const AsyncSocketException& ex) {
1926 // This version of failWrite() is used when the failure occurs before
1927 // we've added the callback to writeReqHead_.
1928 VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_ << ", state="
1929 << state_ << " host=" << addr_.describe()
1930 <<"): failed while writing in " << fn << "(): "
1934 if (callback != nullptr) {
1935 callback->writeErr(bytesWritten, ex);
1941 void AsyncSocket::failAllWrites(const AsyncSocketException& ex) {
1942 // Invoke writeError() on all write callbacks.
1943 // This is used when writes are forcibly shutdown with write requests
1944 // pending, or when an error occurs with writes pending.
1945 while (writeReqHead_ != nullptr) {
1946 WriteRequest* req = writeReqHead_;
1947 writeReqHead_ = req->getNext();
1948 WriteCallback* callback = req->getCallback();
1950 callback->writeErr(req->getTotalBytesWritten(), ex);
1956 void AsyncSocket::invalidState(ConnectCallback* callback) {
1957 VLOG(5) << "AsyncSocket(this=" << this << ", fd=" << fd_
1958 << "): connect() called in invalid state " << state_;
1961 * The invalidState() methods don't use the normal failure mechanisms,
1962 * since we don't know what state we are in. We don't want to call
1963 * startFail()/finishFail() recursively if we are already in the middle of
1967 AsyncSocketException ex(AsyncSocketException::ALREADY_OPEN,
1968 "connect() called with socket in invalid state");
1969 connectEndTime_ = std::chrono::steady_clock::now();
1970 if (state_ == StateEnum::CLOSED || state_ == StateEnum::ERROR) {
1972 callback->connectErr(ex);
1975 // We can't use failConnect() here since connectCallback_
1976 // may already be set to another callback. Invoke this ConnectCallback
1977 // here; any other connectCallback_ will be invoked in finishFail()
1980 callback->connectErr(ex);
1986 void AsyncSocket::invokeConnectErr(const AsyncSocketException& ex) {
1987 connectEndTime_ = std::chrono::steady_clock::now();
1988 if (connectCallback_) {
1989 ConnectCallback* callback = connectCallback_;
1990 connectCallback_ = nullptr;
1991 callback->connectErr(ex);
1995 void AsyncSocket::invokeConnectSuccess() {
1996 connectEndTime_ = std::chrono::steady_clock::now();
1997 if (connectCallback_) {
1998 ConnectCallback* callback = connectCallback_;
1999 connectCallback_ = nullptr;
2000 callback->connectSuccess();
2004 void AsyncSocket::invalidState(ReadCallback* callback) {
2005 VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_
2006 << "): setReadCallback(" << callback
2007 << ") called in invalid state " << state_;
2009 AsyncSocketException ex(AsyncSocketException::NOT_OPEN,
2010 "setReadCallback() called with socket in "
2012 if (state_ == StateEnum::CLOSED || state_ == StateEnum::ERROR) {
2014 callback->readErr(ex);
2019 callback->readErr(ex);
2025 void AsyncSocket::invalidState(WriteCallback* callback) {
2026 VLOG(4) << "AsyncSocket(this=" << this << ", fd=" << fd_
2027 << "): write() called in invalid state " << state_;
2029 AsyncSocketException ex(AsyncSocketException::NOT_OPEN,
2030 withAddr("write() called with socket in invalid state"));
2031 if (state_ == StateEnum::CLOSED || state_ == StateEnum::ERROR) {
2033 callback->writeErr(0, ex);
2038 callback->writeErr(0, ex);
2044 void AsyncSocket::doClose() {
2045 if (fd_ == -1) return;
2046 if (shutdownSocketSet_) {
2047 shutdownSocketSet_->close(fd_);
2054 std::ostream& operator << (std::ostream& os,
2055 const AsyncSocket::StateEnum& state) {
2056 os << static_cast<int>(state);
2060 std::string AsyncSocket::withAddr(const std::string& s) {
2061 // Don't use addr_ directly because it may not be initialized
2062 // e.g. if constructed from fd
2063 folly::SocketAddress peer, local;
2065 getPeerAddress(&peer);
2066 getLocalAddress(&local);
2067 } catch (const std::exception&) {
2072 return s + " (peer=" + peer.describe() + ", local=" + local.describe() + ")";
2075 void AsyncSocket::setBufferCallback(BufferCallback* cb) {
2076 bufferCallback_ = cb;