2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __STDC_FORMAT_MACROS
18 #define __STDC_FORMAT_MACROS
21 #include <folly/io/async/AsyncServerSocket.h>
23 #include <folly/FileUtil.h>
24 #include <folly/SocketAddress.h>
25 #include <folly/io/async/EventBase.h>
26 #include <folly/io/async/NotificationQueue.h>
30 #include <netinet/tcp.h>
32 #include <sys/socket.h>
33 #include <sys/types.h>
38 const uint32_t AsyncServerSocket::kDefaultMaxAcceptAtOnce;
39 const uint32_t AsyncServerSocket::kDefaultCallbackAcceptAtOnce;
40 const uint32_t AsyncServerSocket::kDefaultMaxMessagesInQueue;
42 int setCloseOnExec(int fd, int value) {
43 // Read the current flags
44 int old_flags = fcntl(fd, F_GETFD, 0);
46 // If reading the flags failed, return error indication now
50 // Set just the flag we want to set
53 new_flags = old_flags | FD_CLOEXEC;
55 new_flags = old_flags & ~FD_CLOEXEC;
57 // Store modified flag word in the descriptor
58 return fcntl(fd, F_SETFD, new_flags);
61 void AsyncServerSocket::RemoteAcceptor::start(
62 EventBase* eventBase, uint32_t maxAtOnce, uint32_t maxInQueue) {
63 setMaxReadAtOnce(maxAtOnce);
64 queue_.setMaxQueueSize(maxInQueue);
66 if (!eventBase->runInEventBaseThread([=](){
67 callback_->acceptStarted();
68 this->startConsuming(eventBase, &queue_);
70 throw std::invalid_argument("unable to start waiting on accept "
71 "notification queue in the specified "
76 void AsyncServerSocket::RemoteAcceptor::stop(
77 EventBase* eventBase, AcceptCallback* callback) {
78 if (!eventBase->runInEventBaseThread([=](){
79 callback->acceptStopped();
82 throw std::invalid_argument("unable to start waiting on accept "
83 "notification queue in the specified "
88 void AsyncServerSocket::RemoteAcceptor::messageAvailable(
92 case MessageType::MSG_NEW_CONN:
94 if (connectionEventCallback_) {
95 connectionEventCallback_->onConnectionDequeuedByAcceptorCallback(
98 callback_->connectionAccepted(msg.fd, msg.address);
101 case MessageType::MSG_ERROR:
103 std::runtime_error ex(msg.msg);
104 callback_->acceptError(ex);
109 LOG(ERROR) << "invalid accept notification message type "
111 std::runtime_error ex(
112 "received invalid accept notification message type");
113 callback_->acceptError(ex);
119 * AsyncServerSocket::BackoffTimeout
121 class AsyncServerSocket::BackoffTimeout : public AsyncTimeout {
123 // Disallow copy, move, and default constructors.
124 BackoffTimeout(BackoffTimeout&&) = delete;
125 BackoffTimeout(AsyncServerSocket* socket)
126 : AsyncTimeout(socket->getEventBase()), socket_(socket) {}
128 void timeoutExpired() noexcept override { socket_->backoffTimeoutExpired(); }
131 AsyncServerSocket* socket_;
135 * AsyncServerSocket methods
138 AsyncServerSocket::AsyncServerSocket(EventBase* eventBase)
139 : eventBase_(eventBase),
141 maxAcceptAtOnce_(kDefaultMaxAcceptAtOnce),
142 maxNumMsgsInQueue_(kDefaultMaxMessagesInQueue),
143 acceptRateAdjustSpeed_(0),
145 lastAccepTimestamp_(std::chrono::steady_clock::now()),
146 numDroppedConnections_(0),
148 backoffTimeout_(nullptr),
150 keepAliveEnabled_(true),
152 shutdownSocketSet_(nullptr) {
155 void AsyncServerSocket::setShutdownSocketSet(ShutdownSocketSet* newSS) {
156 if (shutdownSocketSet_ == newSS) {
159 if (shutdownSocketSet_) {
160 for (auto& h : sockets_) {
161 shutdownSocketSet_->remove(h.socket_);
164 shutdownSocketSet_ = newSS;
165 if (shutdownSocketSet_) {
166 for (auto& h : sockets_) {
167 shutdownSocketSet_->add(h.socket_);
172 AsyncServerSocket::~AsyncServerSocket() {
173 assert(callbacks_.empty());
176 int AsyncServerSocket::stopAccepting(int shutdownFlags) {
178 for (auto& handler : sockets_) {
179 VLOG(10) << "AsyncServerSocket::stopAccepting " << this <<
182 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
184 // When destroy is called, unregister and close the socket immediately.
187 // Close the sockets in reverse order as they were opened to avoid
188 // the condition where another process concurrently tries to open
189 // the same port, succeed to bind the first socket but fails on the
190 // second because it hasn't been closed yet.
191 for (; !sockets_.empty(); sockets_.pop_back()) {
192 auto& handler = sockets_.back();
193 handler.unregisterHandler();
194 if (shutdownSocketSet_) {
195 shutdownSocketSet_->close(handler.socket_);
196 } else if (shutdownFlags >= 0) {
197 result = shutdownNoInt(handler.socket_, shutdownFlags);
198 pendingCloseSockets_.push_back(handler.socket_);
200 closeNoInt(handler.socket_);
204 // Destroy the backoff timout. This will cancel it if it is running.
205 delete backoffTimeout_;
206 backoffTimeout_ = nullptr;
208 // Close all of the callback queues to notify them that they are being
209 // destroyed. No one should access the AsyncServerSocket any more once
210 // destroy() is called. However, clear out callbacks_ before invoking the
211 // accept callbacks just in case. This will potentially help us detect the
212 // bug if one of the callbacks calls addAcceptCallback() or
213 // removeAcceptCallback().
214 std::vector<CallbackInfo> callbacksCopy;
215 callbacks_.swap(callbacksCopy);
216 for (std::vector<CallbackInfo>::iterator it = callbacksCopy.begin();
217 it != callbacksCopy.end();
219 it->consumer->stop(it->eventBase, it->callback);
225 void AsyncServerSocket::destroy() {
227 for (auto s : pendingCloseSockets_) {
230 // Then call DelayedDestruction::destroy() to take care of
231 // whether or not we need immediate or delayed destruction
232 DelayedDestruction::destroy();
235 void AsyncServerSocket::attachEventBase(EventBase *eventBase) {
236 assert(eventBase_ == nullptr);
237 assert(eventBase->isInEventBaseThread());
239 eventBase_ = eventBase;
240 for (auto& handler : sockets_) {
241 handler.attachEventBase(eventBase);
245 void AsyncServerSocket::detachEventBase() {
246 assert(eventBase_ != nullptr);
247 assert(eventBase_->isInEventBaseThread());
250 eventBase_ = nullptr;
251 for (auto& handler : sockets_) {
252 handler.detachEventBase();
256 void AsyncServerSocket::useExistingSockets(const std::vector<int>& fds) {
257 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
259 if (sockets_.size() > 0) {
260 throw std::invalid_argument(
261 "cannot call useExistingSocket() on a "
262 "AsyncServerSocket that already has a socket");
266 // Set addressFamily_ from this socket.
267 // Note that the socket may not have been bound yet, but
268 // setFromLocalAddress() will still work and get the correct address family.
269 // We will update addressFamily_ again anyway if bind() is called later.
270 SocketAddress address;
271 address.setFromLocalAddress(fd);
273 setupSocket(fd, address.getFamily());
274 sockets_.emplace_back(eventBase_, fd, this, address.getFamily());
275 sockets_.back().changeHandlerFD(fd);
279 void AsyncServerSocket::useExistingSocket(int fd) {
280 useExistingSockets({fd});
283 void AsyncServerSocket::bindSocket(
285 const SocketAddress& address,
286 bool isExistingSocket) {
287 sockaddr_storage addrStorage;
288 address.getAddress(&addrStorage);
289 sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
290 if (::bind(fd, saddr, address.getActualSize()) != 0) {
291 if (!isExistingSocket) {
294 folly::throwSystemError(errno,
295 "failed to bind to async server socket: " +
299 // If we just created this socket, update the EventHandler and set socket_
300 if (!isExistingSocket) {
301 sockets_.emplace_back(eventBase_, fd, this, address.getFamily());
305 void AsyncServerSocket::bind(const SocketAddress& address) {
306 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
308 // useExistingSocket() may have been called to initialize socket_ already.
309 // However, in the normal case we need to create a new socket now.
310 // Don't set socket_ yet, so that socket_ will remain uninitialized if an
313 if (sockets_.size() == 0) {
314 fd = createSocket(address.getFamily());
315 } else if (sockets_.size() == 1) {
316 if (address.getFamily() != sockets_[0].addressFamily_) {
317 throw std::invalid_argument(
318 "Attempted to bind address to socket with "
319 "different address family");
321 fd = sockets_[0].socket_;
323 throw std::invalid_argument(
324 "Attempted to bind to multiple fds");
327 bindSocket(fd, address, !sockets_.empty());
330 void AsyncServerSocket::bind(
331 const std::vector<IPAddress>& ipAddresses,
333 if (ipAddresses.empty()) {
334 throw std::invalid_argument("No ip addresses were provided");
336 if (!sockets_.empty()) {
337 throw std::invalid_argument("Cannot call bind on a AsyncServerSocket "
338 "that already has a socket.");
341 for (const IPAddress& ipAddress : ipAddresses) {
342 SocketAddress address(ipAddress.toFullyQualified(), port);
343 int fd = createSocket(address.getFamily());
345 bindSocket(fd, address, false);
347 if (sockets_.size() == 0) {
348 throw std::runtime_error(
349 "did not bind any async server socket for port and addresses");
353 void AsyncServerSocket::bind(uint16_t port) {
354 struct addrinfo hints, *res, *res0;
355 char sport[sizeof("65536")];
357 memset(&hints, 0, sizeof(hints));
358 hints.ai_family = AF_UNSPEC;
359 hints.ai_socktype = SOCK_STREAM;
360 hints.ai_flags = AI_PASSIVE;
361 snprintf(sport, sizeof(sport), "%u", port);
363 if (getaddrinfo(nullptr, sport, &hints, &res0)) {
364 throw std::invalid_argument(
365 "Attempted to bind address to socket with "
369 SCOPE_EXIT { freeaddrinfo(res0); };
371 auto setupAddress = [&] (struct addrinfo* res) {
372 int s = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
373 // IPv6/IPv4 may not be supported by the kernel
374 if (s < 0 && errno == EAFNOSUPPORT) {
380 setupSocket(s, res->ai_family);
386 if (res->ai_family == AF_INET6) {
388 CHECK(0 == setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY,
389 &v6only, sizeof(v6only)));
392 SocketAddress address;
393 address.setFromLocalAddress(s);
395 sockets_.emplace_back(eventBase_, s, this, address.getFamily());
397 // Bind to the socket
398 if (::bind(s, res->ai_addr, res->ai_addrlen) != 0) {
399 folly::throwSystemError(
401 "failed to bind to async server socket for port ",
402 SocketAddress::getPortFrom(res->ai_addr),
404 SocketAddress::getFamilyNameFrom(res->ai_addr, "<unknown>"));
408 const int kNumTries = 25;
409 for (int tries = 1; true; tries++) {
410 // Prefer AF_INET6 addresses. RFC 3484 mandates that getaddrinfo
411 // should return IPv6 first and then IPv4 addresses, but glibc's
412 // getaddrinfo(nullptr) with AI_PASSIVE returns:
413 // - 0.0.0.0 (IPv4-only)
414 // - :: (IPv6+IPv4) in this order
415 // See: https://sourceware.org/bugzilla/show_bug.cgi?id=9981
416 for (res = res0; res; res = res->ai_next) {
417 if (res->ai_family == AF_INET6) {
422 // If port == 0, then we should try to bind to the same port on ipv4 and
423 // ipv6. So if we did bind to ipv6, figure out that port and use it.
424 if (sockets_.size() == 1 && port == 0) {
425 SocketAddress address;
426 address.setFromLocalAddress(sockets_.back().socket_);
427 snprintf(sport, sizeof(sport), "%u", address.getPort());
429 CHECK_EQ(0, getaddrinfo(nullptr, sport, &hints, &res0));
433 for (res = res0; res; res = res->ai_next) {
434 if (res->ai_family != AF_INET6) {
438 } catch (const std::system_error& e) {
439 // If we can't bind to the same port on ipv4 as ipv6 when using
440 // port=0 then we will retry again before giving up after
441 // kNumTries attempts. We do this by closing the sockets that
442 // were opened, then restarting from scratch.
443 if (port == 0 && !sockets_.empty() && tries != kNumTries) {
444 for (const auto& socket : sockets_) {
445 if (socket.socket_ <= 0) {
447 } else if (shutdownSocketSet_) {
448 shutdownSocketSet_->close(socket.socket_);
450 closeNoInt(socket.socket_);
454 snprintf(sport, sizeof(sport), "%u", port);
456 CHECK_EQ(0, getaddrinfo(nullptr, sport, &hints, &res0));
466 if (sockets_.size() == 0) {
467 throw std::runtime_error(
468 "did not bind any async server socket for port");
472 void AsyncServerSocket::listen(int backlog) {
473 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
476 for (auto& handler : sockets_) {
477 if (::listen(handler.socket_, backlog) == -1) {
478 folly::throwSystemError(errno,
479 "failed to listen on async server socket");
484 void AsyncServerSocket::getAddress(SocketAddress* addressReturn) const {
485 CHECK(sockets_.size() >= 1);
486 VLOG_IF(2, sockets_.size() > 1)
487 << "Warning: getAddress() called and multiple addresses available ("
488 << sockets_.size() << "). Returning only the first one.";
490 addressReturn->setFromLocalAddress(sockets_[0].socket_);
493 std::vector<SocketAddress> AsyncServerSocket::getAddresses()
495 CHECK(sockets_.size() >= 1);
496 auto tsaVec = std::vector<SocketAddress>(sockets_.size());
497 auto tsaIter = tsaVec.begin();
498 for (const auto& socket : sockets_) {
499 (tsaIter++)->setFromLocalAddress(socket.socket_);
504 void AsyncServerSocket::addAcceptCallback(AcceptCallback *callback,
505 EventBase *eventBase,
506 uint32_t maxAtOnce) {
507 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
509 // If this is the first accept callback and we are supposed to be accepting,
510 // start accepting once the callback is installed.
511 bool runStartAccepting = accepting_ && callbacks_.empty();
514 eventBase = eventBase_; // Run in AsyncServerSocket's eventbase
517 callbacks_.emplace_back(callback, eventBase);
519 // Start the remote acceptor.
521 // It would be nice if we could avoid starting the remote acceptor if
522 // eventBase == eventBase_. However, that would cause issues if
523 // detachEventBase() and attachEventBase() were ever used to change the
524 // primary EventBase for the server socket. Therefore we require the caller
525 // to specify a nullptr EventBase if they want to ensure that the callback is
526 // always invoked in the primary EventBase, and to be able to invoke that
527 // callback more efficiently without having to use a notification queue.
528 RemoteAcceptor* acceptor = nullptr;
530 acceptor = new RemoteAcceptor(callback, connectionEventCallback_);
531 acceptor->start(eventBase, maxAtOnce, maxNumMsgsInQueue_);
533 callbacks_.pop_back();
537 callbacks_.back().consumer = acceptor;
539 // If this is the first accept callback and we are supposed to be accepting,
541 if (runStartAccepting) {
546 void AsyncServerSocket::removeAcceptCallback(AcceptCallback *callback,
547 EventBase *eventBase) {
548 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
550 // Find the matching AcceptCallback.
551 // We just do a simple linear search; we don't expect removeAcceptCallback()
552 // to be called frequently, and we expect there to only be a small number of
554 std::vector<CallbackInfo>::iterator it = callbacks_.begin();
557 if (it == callbacks_.end()) {
558 throw std::runtime_error("AsyncServerSocket::removeAcceptCallback(): "
559 "accept callback not found");
561 if (it->callback == callback &&
562 (it->eventBase == eventBase || eventBase == nullptr)) {
569 // Remove this callback from callbacks_.
571 // Do this before invoking the acceptStopped() callback, in case
572 // acceptStopped() invokes one of our methods that examines callbacks_.
574 // Save a copy of the CallbackInfo first.
575 CallbackInfo info(*it);
576 callbacks_.erase(it);
577 if (n < callbackIndex_) {
578 // We removed an element before callbackIndex_. Move callbackIndex_ back
579 // one step, since things after n have been shifted back by 1.
582 // We removed something at or after callbackIndex_.
583 // If we removed the last element and callbackIndex_ was pointing at it,
584 // we need to reset callbackIndex_ to 0.
585 if (callbackIndex_ >= callbacks_.size()) {
590 info.consumer->stop(info.eventBase, info.callback);
592 // If we are supposed to be accepting but the last accept callback
593 // was removed, unregister for events until a callback is added.
594 if (accepting_ && callbacks_.empty()) {
595 for (auto& handler : sockets_) {
596 handler.unregisterHandler();
601 void AsyncServerSocket::startAccepting() {
602 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
605 if (callbacks_.empty()) {
606 // We can't actually begin accepting if no callbacks are defined.
607 // Wait until a callback is added to start accepting.
611 for (auto& handler : sockets_) {
612 if (!handler.registerHandler(
613 EventHandler::READ | EventHandler::PERSIST)) {
614 throw std::runtime_error("failed to register for accept events");
619 void AsyncServerSocket::pauseAccepting() {
620 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
622 for (auto& handler : sockets_) {
623 handler. unregisterHandler();
626 // If we were in the accept backoff state, disable the backoff timeout
627 if (backoffTimeout_) {
628 backoffTimeout_->cancelTimeout();
632 int AsyncServerSocket::createSocket(int family) {
633 int fd = socket(family, SOCK_STREAM, 0);
635 folly::throwSystemError(errno, "error creating async server socket");
639 setupSocket(fd, family);
647 void AsyncServerSocket::setupSocket(int fd, int family) {
648 // Put the socket in non-blocking mode
649 if (fcntl(fd, F_SETFL, O_NONBLOCK) != 0) {
650 folly::throwSystemError(errno,
651 "failed to put socket in non-blocking mode");
654 // Set reuseaddr to avoid 2MSL delay on server restart
656 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) != 0) {
657 // This isn't a fatal error; just log an error message and continue
658 LOG(ERROR) << "failed to set SO_REUSEADDR on async server socket " << errno;
661 // Set reuseport to support multiple accept threads
663 if (reusePortEnabled_ &&
664 setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(int)) != 0) {
665 LOG(ERROR) << "failed to set SO_REUSEPORT on async server socket "
668 folly::throwSystemError(errno, "failed to bind to the async server socket");
670 SocketAddress address;
671 address.setFromLocalAddress(fd);
672 folly::throwSystemError(errno,
673 "failed to bind to async server socket: " +
678 // Set keepalive as desired
679 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE,
680 (keepAliveEnabled_) ? &one : &zero, sizeof(int)) != 0) {
681 LOG(ERROR) << "failed to set SO_KEEPALIVE on async server socket: " <<
685 // Setup FD_CLOEXEC flag
687 (-1 == folly::setCloseOnExec(fd, closeOnExec_))) {
688 LOG(ERROR) << "failed to set FD_CLOEXEC on async server socket: " <<
692 // Set TCP nodelay if available, MAC OS X Hack
693 // See http://lists.danga.com/pipermail/memcached/2005-March/001240.html
695 if (family != AF_UNIX) {
696 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one)) != 0) {
697 // This isn't a fatal error; just log an error message and continue
698 LOG(ERROR) << "failed to set TCP_NODELAY on async server socket: " <<
704 if (shutdownSocketSet_) {
705 shutdownSocketSet_->add(fd);
709 void AsyncServerSocket::handlerReady(uint16_t /* events */,
711 sa_family_t addressFamily) noexcept {
712 assert(!callbacks_.empty());
713 DestructorGuard dg(this);
715 // Only accept up to maxAcceptAtOnce_ connections at a time,
716 // to avoid starving other I/O handlers using this EventBase.
717 for (uint32_t n = 0; n < maxAcceptAtOnce_; ++n) {
718 SocketAddress address;
720 sockaddr_storage addrStorage;
721 socklen_t addrLen = sizeof(addrStorage);
722 sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
724 // In some cases, accept() doesn't seem to update these correctly.
725 saddr->sa_family = addressFamily;
726 if (addressFamily == AF_UNIX) {
727 addrLen = sizeof(struct sockaddr_un);
730 // Accept a new client socket
732 int clientSocket = accept4(fd, saddr, &addrLen, SOCK_NONBLOCK);
734 int clientSocket = accept(fd, saddr, &addrLen);
737 address.setFromSockaddr(saddr, addrLen);
739 if (clientSocket >= 0 && connectionEventCallback_) {
740 connectionEventCallback_->onConnectionAccepted(clientSocket, address);
743 std::chrono::time_point<std::chrono::steady_clock> nowMs =
744 std::chrono::steady_clock::now();
745 auto timeSinceLastAccept = std::max<int64_t>(
747 nowMs.time_since_epoch().count() -
748 lastAccepTimestamp_.time_since_epoch().count());
749 lastAccepTimestamp_ = nowMs;
750 if (acceptRate_ < 1) {
751 acceptRate_ *= 1 + acceptRateAdjustSpeed_ * timeSinceLastAccept;
752 if (acceptRate_ >= 1) {
754 } else if (rand() > acceptRate_ * RAND_MAX) {
755 ++numDroppedConnections_;
756 if (clientSocket >= 0) {
757 closeNoInt(clientSocket);
758 if (connectionEventCallback_) {
759 connectionEventCallback_->onConnectionDropped(clientSocket,
767 if (clientSocket < 0) {
768 if (errno == EAGAIN) {
769 // No more sockets to accept right now.
770 // Check for this code first, since it's the most common.
772 } else if (errno == EMFILE || errno == ENFILE) {
773 // We're out of file descriptors. Perhaps we're accepting connections
774 // too quickly. Pause accepting briefly to back off and give the server
775 // a chance to recover.
776 LOG(ERROR) << "accept failed: out of file descriptors; entering accept "
780 // Dispatch the error message
781 dispatchError("accept() failed", errno);
783 dispatchError("accept() failed", errno);
785 if (connectionEventCallback_) {
786 connectionEventCallback_->onConnectionAcceptError(errno);
791 #ifndef SOCK_NONBLOCK
792 // Explicitly set the new connection to non-blocking mode
793 if (fcntl(clientSocket, F_SETFL, O_NONBLOCK) != 0) {
794 closeNoInt(clientSocket);
795 dispatchError("failed to set accepted socket to non-blocking mode",
797 if (connectionEventCallback_) {
798 connectionEventCallback_->onConnectionDropped(clientSocket, address);
804 // Inform the callback about the new connection
805 dispatchSocket(clientSocket, std::move(address));
807 // If we aren't accepting any more, break out of the loop
808 if (!accepting_ || callbacks_.empty()) {
814 void AsyncServerSocket::dispatchSocket(int socket,
815 SocketAddress&& address) {
816 uint32_t startingIndex = callbackIndex_;
818 // Short circuit if the callback is in the primary EventBase thread
820 CallbackInfo *info = nextCallback();
821 if (info->eventBase == nullptr) {
822 info->callback->connectionAccepted(socket, address);
826 const SocketAddress addr(address);
827 // Create a message to send over the notification queue
829 msg.type = MessageType::MSG_NEW_CONN;
830 msg.address = std::move(address);
833 // Loop until we find a free queue to write to
835 if (info->consumer->getQueue()->tryPutMessageNoThrow(std::move(msg))) {
836 if (connectionEventCallback_) {
837 connectionEventCallback_->onConnectionEnqueuedForAcceptorCallback(
845 // We couldn't add to queue. Fall through to below
847 ++numDroppedConnections_;
848 if (acceptRateAdjustSpeed_ > 0) {
849 // aggressively decrease accept rate when in trouble
850 static const double kAcceptRateDecreaseSpeed = 0.1;
851 acceptRate_ *= 1 - kAcceptRateDecreaseSpeed;
855 if (callbackIndex_ == startingIndex) {
856 // The notification queue was full
857 // We can't really do anything at this point other than close the socket.
859 // This should only happen if a user's service is behaving extremely
860 // badly and none of the EventBase threads are looping fast enough to
861 // process the incoming connections. If the service is overloaded, it
862 // should use pauseAccepting() to temporarily back off accepting new
863 // connections, before they reach the point where their threads can't
864 // even accept new messages.
865 LOG(ERROR) << "failed to dispatch newly accepted socket:"
866 << " all accept callback queues are full";
868 if (connectionEventCallback_) {
869 connectionEventCallback_->onConnectionDropped(socket, addr);
874 info = nextCallback();
878 void AsyncServerSocket::dispatchError(const char *msgstr, int errnoValue) {
879 uint32_t startingIndex = callbackIndex_;
880 CallbackInfo *info = nextCallback();
882 // Create a message to send over the notification queue
884 msg.type = MessageType::MSG_ERROR;
885 msg.err = errnoValue;
886 msg.msg = std::move(msgstr);
889 // Short circuit if the callback is in the primary EventBase thread
890 if (info->eventBase == nullptr) {
891 std::runtime_error ex(
892 std::string(msgstr) + folly::to<std::string>(errnoValue));
893 info->callback->acceptError(ex);
897 if (info->consumer->getQueue()->tryPutMessageNoThrow(std::move(msg))) {
900 // Fall through and try another callback
902 if (callbackIndex_ == startingIndex) {
903 // The notification queues for all of the callbacks were full.
904 // We can't really do anything at this point.
905 LOG(ERROR) << "failed to dispatch accept error: all accept callback "
906 "queues are full: error msg: " <<
907 msg.msg.c_str() << errnoValue;
910 info = nextCallback();
914 void AsyncServerSocket::enterBackoff() {
915 // If this is the first time we have entered the backoff state,
916 // allocate backoffTimeout_.
917 if (backoffTimeout_ == nullptr) {
919 backoffTimeout_ = new BackoffTimeout(this);
920 } catch (const std::bad_alloc& ex) {
921 // Man, we couldn't even allocate the timer to re-enable accepts.
922 // We must be in pretty bad shape. Don't pause accepting for now,
923 // since we won't be able to re-enable ourselves later.
924 LOG(ERROR) << "failed to allocate AsyncServerSocket backoff"
925 << " timer; unable to temporarly pause accepting";
926 if (connectionEventCallback_) {
927 connectionEventCallback_->onBackoffError();
933 // For now, we simply pause accepting for 1 second.
935 // We could add some smarter backoff calculation here in the future. (e.g.,
936 // start sleeping for longer if we keep hitting the backoff frequently.)
937 // Typically the user needs to figure out why the server is overloaded and
938 // fix it in some other way, though. The backoff timer is just a simple
939 // mechanism to try and give the connection processing code a little bit of
940 // breathing room to catch up, and to avoid just spinning and failing to
941 // accept over and over again.
942 const uint32_t timeoutMS = 1000;
943 if (!backoffTimeout_->scheduleTimeout(timeoutMS)) {
944 LOG(ERROR) << "failed to schedule AsyncServerSocket backoff timer;"
945 << "unable to temporarly pause accepting";
946 if (connectionEventCallback_) {
947 connectionEventCallback_->onBackoffError();
952 // The backoff timer is scheduled to re-enable accepts.
953 // Go ahead and disable accepts for now. We leave accepting_ set to true,
954 // since that tracks the desired state requested by the user.
955 for (auto& handler : sockets_) {
956 handler.unregisterHandler();
958 if (connectionEventCallback_) {
959 connectionEventCallback_->onBackoffStarted();
963 void AsyncServerSocket::backoffTimeoutExpired() {
964 // accepting_ should still be true.
965 // If pauseAccepting() was called while in the backoff state it will cancel
966 // the backoff timeout.
968 // We can't be detached from the EventBase without being paused
969 assert(eventBase_ != nullptr && eventBase_->isInEventBaseThread());
971 // If all of the callbacks were removed, we shouldn't re-enable accepts
972 if (callbacks_.empty()) {
973 if (connectionEventCallback_) {
974 connectionEventCallback_->onBackoffEnded();
979 // Register the handler.
980 for (auto& handler : sockets_) {
981 if (!handler.registerHandler(
982 EventHandler::READ | EventHandler::PERSIST)) {
983 // We're hosed. We could just re-schedule backoffTimeout_ to
984 // re-try again after a little bit. However, we don't want to
985 // loop retrying forever if we can't re-enable accepts. Just
986 // abort the entire program in this state; things are really bad
987 // and restarting the entire server is probably the best remedy.
989 << "failed to re-enable AsyncServerSocket accepts after backoff; "
994 if (connectionEventCallback_) {
995 connectionEventCallback_->onBackoffEnded();