2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include <folly/wangle/bootstrap/ServerBootstrap-inl.h>
19 #include <folly/Baton.h>
20 #include <folly/wangle/channel/ChannelPipeline.h>
24 typedef folly::wangle::ChannelPipeline<
25 folly::IOBufQueue&, std::unique_ptr<folly::IOBuf>> DefaultPipeline;
28 * ServerBootstrap is a parent class intended to set up a
29 * high-performance TCP accepting server. It will manage a pool of
30 * accepting threads, any number of accepting sockets, a pool of
31 * IO-worker threads, and connection pool for each IO thread for you.
33 * The output is given as a ChannelPipeline template: given a
34 * PipelineFactory, it will create a new pipeline for each connection,
35 * and your server can handle the incoming bytes.
37 * BACKWARDS COMPATIBLITY: for servers already taking a pool of
38 * Acceptor objects, an AcceptorFactory can be given directly instead
39 * of a pipeline factory.
41 template <typename Pipeline>
42 class ServerBootstrap {
45 ServerBootstrap(const ServerBootstrap& that) = delete;
46 ServerBootstrap(ServerBootstrap&& that) = default;
55 typedef wangle::ChannelPipeline<
57 std::exception> AcceptPipeline;
59 * Pipeline used to add connections to event bases.
60 * This is used for UDP or for load balancing
61 * TCP connections to IO threads explicitly
63 ServerBootstrap* pipeline(
64 std::shared_ptr<PipelineFactory<AcceptPipeline>> factory) {
69 ServerBootstrap* channelFactory(
70 std::shared_ptr<ServerSocketFactory> factory) {
71 socketFactory_ = factory;
76 * BACKWARDS COMPATIBILITY - an acceptor factory can be set. Your
77 * Acceptor is responsible for managing the connection pool.
79 * @param childHandler - acceptor factory to call for each IO thread
81 ServerBootstrap* childHandler(std::shared_ptr<AcceptorFactory> h) {
87 * Set a pipeline factory that will be called for each new connection
89 * @param factory pipeline factory to use for each new connection
91 ServerBootstrap* childPipeline(
92 std::shared_ptr<PipelineFactory<Pipeline>> factory) {
93 childPipelineFactory_ = factory;
98 * Set the IO executor. If not set, a default one will be created
99 * with one thread per core.
101 * @param io_group - io executor to use for IO threads.
103 ServerBootstrap* group(
104 std::shared_ptr<folly::wangle::IOThreadPoolExecutor> io_group) {
105 return group(nullptr, io_group);
109 * Set the acceptor executor, and IO executor.
111 * If no acceptor executor is set, a single thread will be created for accepts
112 * If no IO executor is set, a default of one thread per core will be created
114 * @param group - acceptor executor to use for acceptor threads.
115 * @param io_group - io executor to use for IO threads.
117 ServerBootstrap* group(
118 std::shared_ptr<folly::wangle::IOThreadPoolExecutor> accept_group,
119 std::shared_ptr<wangle::IOThreadPoolExecutor> io_group) {
121 accept_group = std::make_shared<folly::wangle::IOThreadPoolExecutor>(
122 1, std::make_shared<wangle::NamedThreadFactory>("Acceptor Thread"));
125 io_group = std::make_shared<folly::wangle::IOThreadPoolExecutor>(
126 32, std::make_shared<wangle::NamedThreadFactory>("IO Thread"));
129 // TODO better config checking
130 // CHECK(acceptorFactory_ || childPipelineFactory_);
131 CHECK(!(acceptorFactory_ && childPipelineFactory_));
133 if (acceptorFactory_) {
134 workerFactory_ = std::make_shared<ServerWorkerPool>(
135 acceptorFactory_, io_group.get(), &sockets_, socketFactory_);
137 workerFactory_ = std::make_shared<ServerWorkerPool>(
138 std::make_shared<ServerAcceptorFactory<Pipeline>>(
139 childPipelineFactory_,
141 io_group.get(), &sockets_, socketFactory_);
144 io_group->addObserver(workerFactory_);
146 acceptor_group_ = accept_group;
147 io_group_ = io_group;
153 * Bind to an existing socket
155 * @param sock Existing socket to use for accepting
157 void bind(folly::AsyncServerSocket::UniquePtr s) {
158 if (!workerFactory_) {
162 // Since only a single socket is given,
163 // we can only accept on a single thread
164 CHECK(acceptor_group_->numThreads() == 1);
166 std::shared_ptr<folly::AsyncServerSocket> socket(
167 s.release(), DelayedDestruction::Destructor());
169 folly::Baton<> barrier;
170 acceptor_group_->add([&](){
171 socket->attachEventBase(EventBaseManager::get()->getEventBase());
172 socket->listen(socketConfig.acceptBacklog);
173 socket->startAccepting();
178 // Startup all the threads
179 workerFactory_->forEachWorker([this, socket](Acceptor* worker){
180 socket->getEventBase()->runInEventBaseThreadAndWait(
181 [this, worker, socket](){
182 socketFactory_->addAcceptCB(socket, worker, worker->getEventBase());
186 sockets_.push_back(socket);
189 void bind(folly::SocketAddress& address) {
190 bindImpl(-1, address);
194 * Bind to a port and start listening.
195 * One of childPipeline or childHandler must be called before bind
197 * @param port Port to listen on
199 void bind(int port) {
201 folly::SocketAddress address;
202 bindImpl(port, address);
205 void bindImpl(int port, folly::SocketAddress& address) {
206 if (!workerFactory_) {
210 bool reusePort = false;
211 if (acceptor_group_->numThreads() > 1) {
215 std::mutex sock_lock;
216 std::vector<std::shared_ptr<folly::AsyncSocketBase>> new_sockets;
219 std::exception_ptr exn;
221 auto startupFunc = [&](std::shared_ptr<folly::Baton<>> barrier){
224 auto socket = socketFactory_->newSocket(
225 port, address, socketConfig.acceptBacklog, reusePort, socketConfig);
228 new_sockets.push_back(socket);
232 socket->getAddress(&address);
233 port = address.getPort();
238 exn = std::current_exception();
248 auto wait0 = std::make_shared<folly::Baton<>>();
249 acceptor_group_->add(std::bind(startupFunc, wait0));
252 for (size_t i = 1; i < acceptor_group_->numThreads(); i++) {
253 auto barrier = std::make_shared<folly::Baton<>>();
254 acceptor_group_->add(std::bind(startupFunc, barrier));
259 std::rethrow_exception(exn);
262 for (auto& socket : new_sockets) {
263 // Startup all the threads
264 workerFactory_->forEachWorker([this, socket](Acceptor* worker){
265 socket->getEventBase()->runInEventBaseThreadAndWait([this, worker, socket](){
266 socketFactory_->addAcceptCB(socket, worker, worker->getEventBase());
270 sockets_.push_back(socket);
275 * Stop listening on all sockets.
278 for (auto socket : sockets_) {
279 folly::Baton<> barrier;
280 socket->getEventBase()->runInEventBaseThread([&]() mutable {
281 socketFactory_->stopSocket(socket);
290 if (acceptor_group_) {
291 acceptor_group_->join();
299 * Get the list of listening sockets
301 const std::vector<std::shared_ptr<folly::AsyncSocketBase>>&
306 std::shared_ptr<wangle::IOThreadPoolExecutor> getIOGroup() const {
310 template <typename F>
311 void forEachWorker(F&& f) const {
312 workerFactory_->forEachWorker(f);
315 ServerSocketConfig socketConfig;
318 std::shared_ptr<wangle::IOThreadPoolExecutor> acceptor_group_;
319 std::shared_ptr<wangle::IOThreadPoolExecutor> io_group_;
321 std::shared_ptr<ServerWorkerPool> workerFactory_;
322 std::vector<std::shared_ptr<folly::AsyncSocketBase>> sockets_;
324 std::shared_ptr<AcceptorFactory> acceptorFactory_;
325 std::shared_ptr<PipelineFactory<Pipeline>> childPipelineFactory_;
326 std::shared_ptr<PipelineFactory<AcceptPipeline>> pipeline_{
327 std::make_shared<DefaultAcceptPipelineFactory>()};
328 std::shared_ptr<ServerSocketFactory> socketFactory_{
329 std::make_shared<AsyncServerSocketFactory>()};