2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __STDC_FORMAT_MACROS
18 #define __STDC_FORMAT_MACROS
21 #include <folly/io/async/AsyncServerSocket.h>
23 #include <folly/io/async/EventBase.h>
24 #include <folly/io/async/NotificationQueue.h>
25 #include <folly/SocketAddress.h>
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <netinet/tcp.h>
37 const uint32_t AsyncServerSocket::kDefaultMaxAcceptAtOnce;
38 const uint32_t AsyncServerSocket::kDefaultCallbackAcceptAtOnce;
39 const uint32_t AsyncServerSocket::kDefaultMaxMessagesInQueue;
41 int setCloseOnExec(int fd, int value) {
42 // Read the current flags
43 int old_flags = fcntl(fd, F_GETFD, 0);
45 // If reading the flags failed, return error indication now
49 // Set just the flag we want to set
52 new_flags = old_flags | FD_CLOEXEC;
54 new_flags = old_flags & ~FD_CLOEXEC;
56 // Store modified flag word in the descriptor
57 return fcntl(fd, F_SETFD, new_flags);
60 void AsyncServerSocket::RemoteAcceptor::start(
61 EventBase* eventBase, uint32_t maxAtOnce, uint32_t maxInQueue) {
62 setMaxReadAtOnce(maxAtOnce);
63 queue_.setMaxQueueSize(maxInQueue);
65 if (!eventBase->runInEventBaseThread([=](){
66 callback_->acceptStarted();
67 this->startConsuming(eventBase, &queue_);
69 throw std::invalid_argument("unable to start waiting on accept "
70 "notification queue in the specified "
75 void AsyncServerSocket::RemoteAcceptor::stop(
76 EventBase* eventBase, AcceptCallback* callback) {
77 if (!eventBase->runInEventBaseThread([=](){
78 callback->acceptStopped();
81 throw std::invalid_argument("unable to start waiting on accept "
82 "notification queue in the specified "
87 void AsyncServerSocket::RemoteAcceptor::messageAvailable(
91 case MessageType::MSG_NEW_CONN:
93 callback_->connectionAccepted(msg.fd, msg.address);
96 case MessageType::MSG_ERROR:
98 std::runtime_error ex(msg.msg);
99 callback_->acceptError(ex);
104 LOG(ERROR) << "invalid accept notification message type "
106 std::runtime_error ex(
107 "received invalid accept notification message type");
108 callback_->acceptError(ex);
114 * AsyncServerSocket::BackoffTimeout
116 class AsyncServerSocket::BackoffTimeout : public AsyncTimeout {
118 BackoffTimeout(AsyncServerSocket* socket)
119 : AsyncTimeout(socket->getEventBase()),
122 virtual void timeoutExpired() noexcept {
123 socket_->backoffTimeoutExpired();
127 AsyncServerSocket* socket_;
131 * AsyncServerSocket methods
134 AsyncServerSocket::AsyncServerSocket(EventBase* eventBase)
135 : eventBase_(eventBase),
137 maxAcceptAtOnce_(kDefaultMaxAcceptAtOnce),
138 maxNumMsgsInQueue_(kDefaultMaxMessagesInQueue),
139 acceptRateAdjustSpeed_(0),
141 lastAccepTimestamp_(std::chrono::steady_clock::now()),
142 numDroppedConnections_(0),
144 backoffTimeout_(nullptr),
146 keepAliveEnabled_(true),
148 shutdownSocketSet_(nullptr) {
151 void AsyncServerSocket::setShutdownSocketSet(ShutdownSocketSet* newSS) {
152 if (shutdownSocketSet_ == newSS) {
155 if (shutdownSocketSet_) {
156 for (auto& h : sockets_) {
157 shutdownSocketSet_->remove(h.socket_);
160 shutdownSocketSet_ = newSS;
161 if (shutdownSocketSet_) {
162 for (auto& h : sockets_) {
163 shutdownSocketSet_->add(h.socket_);
168 AsyncServerSocket::~AsyncServerSocket() {
169 assert(callbacks_.empty());
172 int AsyncServerSocket::stopAccepting(int shutdownFlags) {
174 for (auto& handler : sockets_) {
175 VLOG(10) << "AsyncServerSocket::stopAccepting " << this <<
178 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
180 // When destroy is called, unregister and close the socket immediately
183 for (auto& handler : sockets_) {
184 handler.unregisterHandler();
185 if (shutdownSocketSet_) {
186 shutdownSocketSet_->close(handler.socket_);
187 } else if (shutdownFlags >= 0) {
188 result = ::shutdown(handler.socket_, shutdownFlags);
189 pendingCloseSockets_.push_back(handler.socket_);
191 ::close(handler.socket_);
196 // Destroy the backoff timout. This will cancel it if it is running.
197 delete backoffTimeout_;
198 backoffTimeout_ = nullptr;
200 // Close all of the callback queues to notify them that they are being
201 // destroyed. No one should access the AsyncServerSocket any more once
202 // destroy() is called. However, clear out callbacks_ before invoking the
203 // accept callbacks just in case. This will potentially help us detect the
204 // bug if one of the callbacks calls addAcceptCallback() or
205 // removeAcceptCallback().
206 std::vector<CallbackInfo> callbacksCopy;
207 callbacks_.swap(callbacksCopy);
208 for (std::vector<CallbackInfo>::iterator it = callbacksCopy.begin();
209 it != callbacksCopy.end();
211 it->consumer->stop(it->eventBase, it->callback);
217 void AsyncServerSocket::destroy() {
219 for (auto s: pendingCloseSockets_) {
222 // Then call DelayedDestruction::destroy() to take care of
223 // whether or not we need immediate or delayed destruction
224 DelayedDestruction::destroy();
227 void AsyncServerSocket::attachEventBase(EventBase *eventBase) {
228 assert(eventBase_ == nullptr);
229 assert(eventBase->isInEventBaseThread());
231 eventBase_ = eventBase;
232 for (auto& handler : sockets_) {
233 handler.attachEventBase(eventBase);
237 void AsyncServerSocket::detachEventBase() {
238 assert(eventBase_ != nullptr);
239 assert(eventBase_->isInEventBaseThread());
242 eventBase_ = nullptr;
243 for (auto& handler : sockets_) {
244 handler.detachEventBase();
248 void AsyncServerSocket::useExistingSockets(const std::vector<int>& fds) {
249 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
251 if (sockets_.size() > 0) {
252 throw std::invalid_argument(
253 "cannot call useExistingSocket() on a "
254 "AsyncServerSocket that already has a socket");
258 // Set addressFamily_ from this socket.
259 // Note that the socket may not have been bound yet, but
260 // setFromLocalAddress() will still work and get the correct address family.
261 // We will update addressFamily_ again anyway if bind() is called later.
262 SocketAddress address;
263 address.setFromLocalAddress(fd);
267 ServerEventHandler(eventBase_, fd, this, address.getFamily()));
268 sockets_.back().changeHandlerFD(fd);
272 void AsyncServerSocket::useExistingSocket(int fd) {
273 useExistingSockets({fd});
276 void AsyncServerSocket::bindSocket(
278 const SocketAddress& address,
279 bool isExistingSocket) {
280 sockaddr_storage addrStorage;
281 address.getAddress(&addrStorage);
282 sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
283 if (::bind(fd, saddr, address.getActualSize()) != 0) {
284 if (!isExistingSocket) {
287 folly::throwSystemError(errno,
288 "failed to bind to async server socket: " +
292 // If we just created this socket, update the EventHandler and set socket_
293 if (!isExistingSocket) {
295 ServerEventHandler(eventBase_, fd, this, address.getFamily()));
299 void AsyncServerSocket::bind(const SocketAddress& address) {
300 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
302 // useExistingSocket() may have been called to initialize socket_ already.
303 // However, in the normal case we need to create a new socket now.
304 // Don't set socket_ yet, so that socket_ will remain uninitialized if an
307 if (sockets_.size() == 0) {
308 fd = createSocket(address.getFamily());
309 } else if (sockets_.size() == 1) {
310 if (address.getFamily() != sockets_[0].addressFamily_) {
311 throw std::invalid_argument(
312 "Attempted to bind address to socket with "
313 "different address family");
315 fd = sockets_[0].socket_;
317 throw std::invalid_argument(
318 "Attempted to bind to multiple fds");
321 bindSocket(fd, address, !sockets_.empty());
324 void AsyncServerSocket::bind(
325 const std::vector<IPAddress>& ipAddresses,
327 if (ipAddresses.empty()) {
328 throw std::invalid_argument("No ip addresses were provided");
330 if (!sockets_.empty()) {
331 throw std::invalid_argument("Cannot call bind on a AsyncServerSocket "
332 "that already has a socket.");
335 for (const IPAddress& ipAddress : ipAddresses) {
336 SocketAddress address(ipAddress.toFullyQualified(), port);
337 int fd = createSocket(address.getFamily());
339 bindSocket(fd, address, false);
341 if (sockets_.size() == 0) {
342 throw std::runtime_error(
343 "did not bind any async server socket for port and addresses");
347 void AsyncServerSocket::bind(uint16_t port) {
348 struct addrinfo hints, *res, *res0;
349 char sport[sizeof("65536")];
351 memset(&hints, 0, sizeof(hints));
352 hints.ai_family = AF_UNSPEC;
353 hints.ai_socktype = SOCK_STREAM;
354 hints.ai_flags = AI_PASSIVE;
355 snprintf(sport, sizeof(sport), "%u", port);
357 if (getaddrinfo(nullptr, sport, &hints, &res0)) {
358 throw std::invalid_argument(
359 "Attempted to bind address to socket with "
363 folly::ScopeGuard guard = folly::makeGuard([&]{
368 auto setupAddress = [&] (struct addrinfo* res) {
369 int s = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
370 // IPv6/IPv4 may not be supported by the kernel
371 if (s < 0 && errno == EAFNOSUPPORT) {
383 if (res->ai_family == AF_INET6) {
385 CHECK(0 == setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY,
386 &v6only, sizeof(v6only)));
389 SocketAddress address;
390 address.setFromLocalAddress(s);
393 ServerEventHandler(eventBase_, s, this, address.getFamily()));
395 // Bind to the socket
396 if (::bind(s, res->ai_addr, res->ai_addrlen) != 0) {
397 folly::throwSystemError(
399 "failed to bind to async server socket for port");
403 // Prefer AF_INET6 addresses. RFC 3484 mandates that getaddrinfo
404 // should return IPv6 first and then IPv4 addresses, but glibc's
405 // getaddrinfo(nullptr) with AI_PASSIVE returns:
406 // - 0.0.0.0 (IPv4-only)
407 // - :: (IPv6+IPv4) in this order
408 // See: https://sourceware.org/bugzilla/show_bug.cgi?id=9981
409 for (res = res0; res; res = res->ai_next) {
410 if (res->ai_family == AF_INET6) {
415 for (res = res0; res; res = res->ai_next) {
416 if (res->ai_family != AF_INET6) {
421 if (sockets_.size() == 0) {
422 throw std::runtime_error(
423 "did not bind any async server socket for port");
427 void AsyncServerSocket::listen(int backlog) {
428 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
431 for (auto& handler : sockets_) {
432 if (::listen(handler.socket_, backlog) == -1) {
433 folly::throwSystemError(errno,
434 "failed to listen on async server socket");
439 void AsyncServerSocket::getAddress(SocketAddress* addressReturn) const {
440 CHECK(sockets_.size() >= 1);
441 VLOG_IF(2, sockets_.size() > 1)
442 << "Warning: getAddress() called and multiple addresses available ("
443 << sockets_.size() << "). Returning only the first one.";
445 addressReturn->setFromLocalAddress(sockets_[0].socket_);
448 std::vector<SocketAddress> AsyncServerSocket::getAddresses()
450 CHECK(sockets_.size() >= 1);
451 auto tsaVec = std::vector<SocketAddress>(sockets_.size());
452 auto tsaIter = tsaVec.begin();
453 for (const auto& socket : sockets_) {
454 (tsaIter++)->setFromLocalAddress(socket.socket_);
459 void AsyncServerSocket::addAcceptCallback(AcceptCallback *callback,
460 EventBase *eventBase,
461 uint32_t maxAtOnce) {
462 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
464 // If this is the first accept callback and we are supposed to be accepting,
465 // start accepting once the callback is installed.
466 bool runStartAccepting = accepting_ && callbacks_.empty();
469 eventBase = eventBase_; // Run in AsyncServerSocket's eventbase
472 callbacks_.push_back(CallbackInfo(callback, eventBase));
474 // Start the remote acceptor.
476 // It would be nice if we could avoid starting the remote acceptor if
477 // eventBase == eventBase_. However, that would cause issues if
478 // detachEventBase() and attachEventBase() were ever used to change the
479 // primary EventBase for the server socket. Therefore we require the caller
480 // to specify a nullptr EventBase if they want to ensure that the callback is
481 // always invoked in the primary EventBase, and to be able to invoke that
482 // callback more efficiently without having to use a notification queue.
483 RemoteAcceptor* acceptor = nullptr;
485 acceptor = new RemoteAcceptor(callback);
486 acceptor->start(eventBase, maxAtOnce, maxNumMsgsInQueue_);
488 callbacks_.pop_back();
492 callbacks_.back().consumer = acceptor;
494 // If this is the first accept callback and we are supposed to be accepting,
496 if (runStartAccepting) {
501 void AsyncServerSocket::removeAcceptCallback(AcceptCallback *callback,
502 EventBase *eventBase) {
503 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
505 // Find the matching AcceptCallback.
506 // We just do a simple linear search; we don't expect removeAcceptCallback()
507 // to be called frequently, and we expect there to only be a small number of
509 std::vector<CallbackInfo>::iterator it = callbacks_.begin();
512 if (it == callbacks_.end()) {
513 throw std::runtime_error("AsyncServerSocket::removeAcceptCallback(): "
514 "accept callback not found");
516 if (it->callback == callback &&
517 (it->eventBase == eventBase || eventBase == nullptr)) {
524 // Remove this callback from callbacks_.
526 // Do this before invoking the acceptStopped() callback, in case
527 // acceptStopped() invokes one of our methods that examines callbacks_.
529 // Save a copy of the CallbackInfo first.
530 CallbackInfo info(*it);
531 callbacks_.erase(it);
532 if (n < callbackIndex_) {
533 // We removed an element before callbackIndex_. Move callbackIndex_ back
534 // one step, since things after n have been shifted back by 1.
537 // We removed something at or after callbackIndex_.
538 // If we removed the last element and callbackIndex_ was pointing at it,
539 // we need to reset callbackIndex_ to 0.
540 if (callbackIndex_ >= callbacks_.size()) {
545 info.consumer->stop(info.eventBase, info.callback);
547 // If we are supposed to be accepting but the last accept callback
548 // was removed, unregister for events until a callback is added.
549 if (accepting_ && callbacks_.empty()) {
550 for (auto& handler : sockets_) {
551 handler.unregisterHandler();
556 void AsyncServerSocket::startAccepting() {
557 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
560 if (callbacks_.empty()) {
561 // We can't actually begin accepting if no callbacks are defined.
562 // Wait until a callback is added to start accepting.
566 for (auto& handler : sockets_) {
567 if (!handler.registerHandler(
568 EventHandler::READ | EventHandler::PERSIST)) {
569 throw std::runtime_error("failed to register for accept events");
574 void AsyncServerSocket::pauseAccepting() {
575 assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
577 for (auto& handler : sockets_) {
578 handler. unregisterHandler();
581 // If we were in the accept backoff state, disable the backoff timeout
582 if (backoffTimeout_) {
583 backoffTimeout_->cancelTimeout();
587 int AsyncServerSocket::createSocket(int family) {
588 int fd = socket(family, SOCK_STREAM, 0);
590 folly::throwSystemError(errno, "error creating async server socket");
602 void AsyncServerSocket::setupSocket(int fd) {
603 // Get the address family
604 SocketAddress address;
605 address.setFromLocalAddress(fd);
606 auto family = address.getFamily();
608 // Put the socket in non-blocking mode
609 if (fcntl(fd, F_SETFL, O_NONBLOCK) != 0) {
610 folly::throwSystemError(errno,
611 "failed to put socket in non-blocking mode");
614 // Set reuseaddr to avoid 2MSL delay on server restart
616 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) != 0) {
617 // This isn't a fatal error; just log an error message and continue
618 LOG(ERROR) << "failed to set SO_REUSEADDR on async server socket " << errno;
621 // Set reuseport to support multiple accept threads
623 if (reusePortEnabled_ &&
624 setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(int)) != 0) {
625 LOG(ERROR) << "failed to set SO_REUSEPORT on async server socket "
627 folly::throwSystemError(errno,
628 "failed to bind to async server socket: " +
632 // Set keepalive as desired
633 if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE,
634 (keepAliveEnabled_) ? &one : &zero, sizeof(int)) != 0) {
635 LOG(ERROR) << "failed to set SO_KEEPALIVE on async server socket: " <<
639 // Setup FD_CLOEXEC flag
641 (-1 == folly::setCloseOnExec(fd, closeOnExec_))) {
642 LOG(ERROR) << "failed to set FD_CLOEXEC on async server socket: " <<
646 // Set TCP nodelay if available, MAC OS X Hack
647 // See http://lists.danga.com/pipermail/memcached/2005-March/001240.html
649 if (family != AF_UNIX) {
650 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one)) != 0) {
651 // This isn't a fatal error; just log an error message and continue
652 LOG(ERROR) << "failed to set TCP_NODELAY on async server socket: " <<
658 if (shutdownSocketSet_) {
659 shutdownSocketSet_->add(fd);
663 void AsyncServerSocket::handlerReady(
664 uint16_t events, int fd, sa_family_t addressFamily) noexcept {
665 assert(!callbacks_.empty());
666 DestructorGuard dg(this);
668 // Only accept up to maxAcceptAtOnce_ connections at a time,
669 // to avoid starving other I/O handlers using this EventBase.
670 for (uint32_t n = 0; n < maxAcceptAtOnce_; ++n) {
671 SocketAddress address;
673 sockaddr_storage addrStorage;
674 socklen_t addrLen = sizeof(addrStorage);
675 sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
677 // In some cases, accept() doesn't seem to update these correctly.
678 saddr->sa_family = addressFamily;
679 if (addressFamily == AF_UNIX) {
680 addrLen = sizeof(struct sockaddr_un);
683 // Accept a new client socket
685 int clientSocket = accept4(fd, saddr, &addrLen, SOCK_NONBLOCK);
687 int clientSocket = accept(fd, saddr, &addrLen);
690 address.setFromSockaddr(saddr, addrLen);
692 std::chrono::time_point<std::chrono::steady_clock> nowMs =
693 std::chrono::steady_clock::now();
694 int64_t timeSinceLastAccept = std::max(
696 nowMs.time_since_epoch().count() -
697 lastAccepTimestamp_.time_since_epoch().count());
698 lastAccepTimestamp_ = nowMs;
699 if (acceptRate_ < 1) {
700 acceptRate_ *= 1 + acceptRateAdjustSpeed_ * timeSinceLastAccept;
701 if (acceptRate_ >= 1) {
703 } else if (rand() > acceptRate_ * RAND_MAX) {
704 ++numDroppedConnections_;
705 if (clientSocket >= 0) {
706 ::close(clientSocket);
712 if (clientSocket < 0) {
713 if (errno == EAGAIN) {
714 // No more sockets to accept right now.
715 // Check for this code first, since it's the most common.
717 } else if (errno == EMFILE || errno == ENFILE) {
718 // We're out of file descriptors. Perhaps we're accepting connections
719 // too quickly. Pause accepting briefly to back off and give the server
720 // a chance to recover.
721 LOG(ERROR) << "accept failed: out of file descriptors; entering accept "
725 // Dispatch the error message
726 dispatchError("accept() failed", errno);
728 dispatchError("accept() failed", errno);
733 #ifndef SOCK_NONBLOCK
734 // Explicitly set the new connection to non-blocking mode
735 if (fcntl(clientSocket, F_SETFL, O_NONBLOCK) != 0) {
736 ::close(clientSocket);
737 dispatchError("failed to set accepted socket to non-blocking mode",
743 // Inform the callback about the new connection
744 dispatchSocket(clientSocket, std::move(address));
746 // If we aren't accepting any more, break out of the loop
747 if (!accepting_ || callbacks_.empty()) {
753 void AsyncServerSocket::dispatchSocket(int socket,
754 SocketAddress&& address) {
755 uint32_t startingIndex = callbackIndex_;
757 // Short circuit if the callback is in the primary EventBase thread
759 CallbackInfo *info = nextCallback();
760 if (info->eventBase == nullptr) {
761 info->callback->connectionAccepted(socket, address);
765 // Create a message to send over the notification queue
767 msg.type = MessageType::MSG_NEW_CONN;
768 msg.address = std::move(address);
771 // Loop until we find a free queue to write to
773 if (info->consumer->getQueue()->tryPutMessageNoThrow(std::move(msg))) {
778 // We couldn't add to queue. Fall through to below
780 ++numDroppedConnections_;
781 if (acceptRateAdjustSpeed_ > 0) {
782 // aggressively decrease accept rate when in trouble
783 static const double kAcceptRateDecreaseSpeed = 0.1;
784 acceptRate_ *= 1 - kAcceptRateDecreaseSpeed;
788 if (callbackIndex_ == startingIndex) {
789 // The notification queue was full
790 // We can't really do anything at this point other than close the socket.
792 // This should only happen if a user's service is behaving extremely
793 // badly and none of the EventBase threads are looping fast enough to
794 // process the incoming connections. If the service is overloaded, it
795 // should use pauseAccepting() to temporarily back off accepting new
796 // connections, before they reach the point where their threads can't
797 // even accept new messages.
798 LOG(ERROR) << "failed to dispatch newly accepted socket:"
799 << " all accept callback queues are full";
804 info = nextCallback();
808 void AsyncServerSocket::dispatchError(const char *msgstr, int errnoValue) {
809 uint32_t startingIndex = callbackIndex_;
810 CallbackInfo *info = nextCallback();
812 // Create a message to send over the notification queue
814 msg.type = MessageType::MSG_ERROR;
815 msg.err = errnoValue;
816 msg.msg = std::move(msgstr);
819 // Short circuit if the callback is in the primary EventBase thread
820 if (info->eventBase == nullptr) {
821 std::runtime_error ex(
822 std::string(msgstr) + folly::to<std::string>(errnoValue));
823 info->callback->acceptError(ex);
827 if (info->consumer->getQueue()->tryPutMessageNoThrow(std::move(msg))) {
830 // Fall through and try another callback
832 if (callbackIndex_ == startingIndex) {
833 // The notification queues for all of the callbacks were full.
834 // We can't really do anything at this point.
835 LOG(ERROR) << "failed to dispatch accept error: all accept callback "
836 "queues are full: error msg: " <<
837 msg.msg.c_str() << errnoValue;
840 info = nextCallback();
844 void AsyncServerSocket::enterBackoff() {
845 // If this is the first time we have entered the backoff state,
846 // allocate backoffTimeout_.
847 if (backoffTimeout_ == nullptr) {
849 backoffTimeout_ = new BackoffTimeout(this);
850 } catch (const std::bad_alloc& ex) {
851 // Man, we couldn't even allocate the timer to re-enable accepts.
852 // We must be in pretty bad shape. Don't pause accepting for now,
853 // since we won't be able to re-enable ourselves later.
854 LOG(ERROR) << "failed to allocate AsyncServerSocket backoff"
855 << " timer; unable to temporarly pause accepting";
860 // For now, we simply pause accepting for 1 second.
862 // We could add some smarter backoff calculation here in the future. (e.g.,
863 // start sleeping for longer if we keep hitting the backoff frequently.)
864 // Typically the user needs to figure out why the server is overloaded and
865 // fix it in some other way, though. The backoff timer is just a simple
866 // mechanism to try and give the connection processing code a little bit of
867 // breathing room to catch up, and to avoid just spinning and failing to
868 // accept over and over again.
869 const uint32_t timeoutMS = 1000;
870 if (!backoffTimeout_->scheduleTimeout(timeoutMS)) {
871 LOG(ERROR) << "failed to schedule AsyncServerSocket backoff timer;"
872 << "unable to temporarly pause accepting";
876 // The backoff timer is scheduled to re-enable accepts.
877 // Go ahead and disable accepts for now. We leave accepting_ set to true,
878 // since that tracks the desired state requested by the user.
879 for (auto& handler : sockets_) {
880 handler.unregisterHandler();
884 void AsyncServerSocket::backoffTimeoutExpired() {
885 // accepting_ should still be true.
886 // If pauseAccepting() was called while in the backoff state it will cancel
887 // the backoff timeout.
889 // We can't be detached from the EventBase without being paused
890 assert(eventBase_ != nullptr && eventBase_->isInEventBaseThread());
892 // If all of the callbacks were removed, we shouldn't re-enable accepts
893 if (callbacks_.empty()) {
897 // Register the handler.
898 for (auto& handler : sockets_) {
899 if (!handler.registerHandler(
900 EventHandler::READ | EventHandler::PERSIST)) {
901 // We're hosed. We could just re-schedule backoffTimeout_ to
902 // re-try again after a little bit. However, we don't want to
903 // loop retrying forever if we can't re-enable accepts. Just
904 // abort the entire program in this state; things are really bad
905 // and restarting the entire server is probably the best remedy.
907 << "failed to re-enable AsyncServerSocket accepts after backoff; "