2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <folly/io/IOBuf.h>
22 #include <folly/io/async/AsyncSocketBase.h>
23 #include <folly/io/async/DelayedDestruction.h>
24 #include <folly/io/async/EventBase.h>
25 #include <folly/io/async/ssl/OpenSSLPtrTypes.h>
26 #include <folly/portability/SysUio.h>
28 #include <openssl/ssl.h>
30 constexpr bool kOpenSslModeMoveBufferOwnership =
31 #ifdef SSL_MODE_MOVE_BUFFER_OWNERSHIP
40 class AsyncSocketException;
45 * flags given by the application for write* calls
47 enum class WriteFlags : uint32_t {
50 * Whether to delay the output until a subsequent non-corked write.
51 * (Note: may not be supported in all subclasses or on all platforms.)
55 * for a socket that has ACK latency enabled, it will cause the kernel
56 * to fire a TCP ESTATS event when the last byte of the given write call
57 * will be acknowledged.
61 * this indicates that only the write side of socket should be shutdown
63 WRITE_SHUTDOWN = 0x04,
69 inline WriteFlags operator|(WriteFlags a, WriteFlags b) {
70 return static_cast<WriteFlags>(
71 static_cast<uint32_t>(a) | static_cast<uint32_t>(b));
75 * compound assignment union operator
77 inline WriteFlags& operator|=(WriteFlags& a, WriteFlags b) {
83 * intersection operator
85 inline WriteFlags operator&(WriteFlags a, WriteFlags b) {
86 return static_cast<WriteFlags>(
87 static_cast<uint32_t>(a) & static_cast<uint32_t>(b));
91 * compound assignment intersection operator
93 inline WriteFlags& operator&=(WriteFlags& a, WriteFlags b) {
101 inline WriteFlags operator~(WriteFlags a) {
102 return static_cast<WriteFlags>(~static_cast<uint32_t>(a));
108 inline WriteFlags unSet(WriteFlags a, WriteFlags b) {
115 inline bool isSet(WriteFlags a, WriteFlags b) {
121 * AsyncTransport defines an asynchronous API for streaming I/O.
123 * This class provides an API to for asynchronously waiting for data
124 * on a streaming transport, and for asynchronously sending data.
126 * The APIs for reading and writing are intentionally asymmetric. Waiting for
127 * data to read is a persistent API: a callback is installed, and is notified
128 * whenever new data is available. It continues to be notified of new events
129 * until it is uninstalled.
131 * AsyncTransport does not provide read timeout functionality, because it
132 * typically cannot determine when the timeout should be active. Generally, a
133 * timeout should only be enabled when processing is blocked waiting on data
134 * from the remote endpoint. For server-side applications, the timeout should
135 * not be active if the server is currently processing one or more outstanding
136 * requests on this transport. For client-side applications, the timeout
137 * should not be active if there are no requests pending on the transport.
138 * Additionally, if a client has multiple pending requests, it will ususally
139 * want a separate timeout for each request, rather than a single read timeout.
141 * The write API is fairly intuitive: a user can request to send a block of
142 * data, and a callback will be informed once the entire block has been
143 * transferred to the kernel, or on error. AsyncTransport does provide a send
144 * timeout, since most callers want to give up if the remote end stops
145 * responding and no further progress can be made sending the data.
147 class AsyncTransport : public DelayedDestruction, public AsyncSocketBase {
149 typedef std::unique_ptr<AsyncTransport, Destructor> UniquePtr;
152 * Close the transport.
154 * This gracefully closes the transport, waiting for all pending write
155 * requests to complete before actually closing the underlying transport.
157 * If a read callback is set, readEOF() will be called immediately. If there
158 * are outstanding write requests, the close will be delayed until all
159 * remaining writes have completed. No new writes may be started after
160 * close() has been called.
162 virtual void close() = 0;
165 * Close the transport immediately.
167 * This closes the transport immediately, dropping any outstanding data
168 * waiting to be written.
170 * If a read callback is set, readEOF() will be called immediately.
171 * If there are outstanding write requests, these requests will be aborted
172 * and writeError() will be invoked immediately on all outstanding write
175 virtual void closeNow() = 0;
178 * Reset the transport immediately.
180 * This closes the transport immediately, sending a reset to the remote peer
181 * if possible to indicate abnormal shutdown.
183 * Note that not all subclasses implement this reset functionality: some
184 * subclasses may treat reset() the same as closeNow(). Subclasses that use
185 * TCP transports should terminate the connection with a TCP reset.
187 virtual void closeWithReset() {
192 * Perform a half-shutdown of the write side of the transport.
194 * The caller should not make any more calls to write() or writev() after
195 * shutdownWrite() is called. Any future write attempts will fail
198 * Not all transport types support half-shutdown. If the underlying
199 * transport does not support half-shutdown, it will fully shutdown both the
200 * read and write sides of the transport. (Fully shutting down the socket is
201 * better than doing nothing at all, since the caller may rely on the
202 * shutdownWrite() call to notify the other end of the connection that no
203 * more data can be read.)
205 * If there is pending data still waiting to be written on the transport,
206 * the actual shutdown will be delayed until the pending data has been
209 * Note: There is no corresponding shutdownRead() equivalent. Simply
210 * uninstall the read callback if you wish to stop reading. (On TCP sockets
211 * at least, shutting down the read side of the socket is a no-op anyway.)
213 virtual void shutdownWrite() = 0;
216 * Perform a half-shutdown of the write side of the transport.
218 * shutdownWriteNow() is identical to shutdownWrite(), except that it
219 * immediately performs the shutdown, rather than waiting for pending writes
220 * to complete. Any pending write requests will be immediately failed when
221 * shutdownWriteNow() is called.
223 virtual void shutdownWriteNow() = 0;
226 * Determine if transport is open and ready to read or write.
228 * Note that this function returns false on EOF; you must also call error()
229 * to distinguish between an EOF and an error.
231 * @return true iff the transport is open and ready, false otherwise.
233 virtual bool good() const = 0;
236 * Determine if the transport is readable or not.
238 * @return true iff the transport is readable, false otherwise.
240 virtual bool readable() const = 0;
243 * Determine if the there is pending data on the transport.
245 * @return true iff the if the there is pending data, false otherwise.
247 virtual bool isPending() const {
252 * Determine if transport is connected to the endpoint
254 * @return false iff the transport is connected, otherwise true
256 virtual bool connecting() const = 0;
259 * Determine if an error has occurred with this transport.
261 * @return true iff an error has occurred (not EOF).
263 virtual bool error() const = 0;
266 * Attach the transport to a EventBase.
268 * This may only be called if the transport is not currently attached to a
269 * EventBase (by an earlier call to detachEventBase()).
271 * This method must be invoked in the EventBase's thread.
273 virtual void attachEventBase(EventBase* eventBase) = 0;
276 * Detach the transport from its EventBase.
278 * This may only be called when the transport is idle and has no reads or
279 * writes pending. Once detached, the transport may not be used again until
280 * it is re-attached to a EventBase by calling attachEventBase().
282 * This method must be called from the current EventBase's thread.
284 virtual void detachEventBase() = 0;
287 * Determine if the transport can be detached.
289 * This method must be called from the current EventBase's thread.
291 virtual bool isDetachable() const = 0;
294 * Set the send timeout.
296 * If write requests do not make any progress for more than the specified
297 * number of milliseconds, fail all pending writes and close the transport.
299 * If write requests are currently pending when setSendTimeout() is called,
300 * the timeout interval is immediately restarted using the new value.
302 * @param milliseconds The timeout duration, in milliseconds. If 0, no
303 * timeout will be used.
305 virtual void setSendTimeout(uint32_t milliseconds) = 0;
308 * Get the send timeout.
310 * @return Returns the current send timeout, in milliseconds. A return value
311 * of 0 indicates that no timeout is set.
313 virtual uint32_t getSendTimeout() const = 0;
316 * Get the address of the local endpoint of this transport.
318 * This function may throw AsyncSocketException on error.
320 * @param address The local address will be stored in the specified
323 virtual void getLocalAddress(SocketAddress* address) const = 0;
325 virtual void getAddress(SocketAddress* address) const {
326 getLocalAddress(address);
330 * Get the address of the remote endpoint to which this transport is
333 * This function may throw AsyncSocketException on error.
335 * @param address The remote endpoint's address will be stored in the
336 * specified SocketAddress.
338 virtual void getPeerAddress(SocketAddress* address) const = 0;
341 * Get the certificate used to authenticate the peer.
343 virtual ssl::X509UniquePtr getPeerCert() const { return nullptr; }
346 * The local certificate used for this connection. May be null
348 virtual const X509* getSelfCert() const {
353 * @return True iff end of record tracking is enabled
355 virtual bool isEorTrackingEnabled() const = 0;
357 virtual void setEorTracking(bool track) = 0;
359 virtual size_t getAppBytesWritten() const = 0;
360 virtual size_t getRawBytesWritten() const = 0;
361 virtual size_t getAppBytesReceived() const = 0;
362 virtual size_t getRawBytesReceived() const = 0;
364 class BufferCallback {
366 virtual ~BufferCallback() {}
367 virtual void onEgressBuffered() = 0;
368 virtual void onEgressBufferCleared() = 0;
372 * Callback class to signal when a transport that did not have replay
373 * protection gains replay protection. This is needed for 0-RTT security
376 class ReplaySafetyCallback {
378 virtual ~ReplaySafetyCallback() = default;
381 * Called when the transport becomes replay safe.
383 virtual void onReplaySafe() = 0;
387 * False if the transport does not have replay protection, but will in the
390 virtual bool isReplaySafe() const { return true; }
393 * Set the ReplaySafeCallback on this transport.
395 * This should only be called if isReplaySafe() returns false.
397 virtual void setReplaySafetyCallback(ReplaySafetyCallback* callback) {
399 CHECK(false) << "setReplaySafetyCallback() not supported";
404 virtual ~AsyncTransport() = default;
411 virtual ~ReadCallback() = default;
414 * When data becomes available, getReadBuffer() will be invoked to get the
415 * buffer into which data should be read.
417 * This method allows the ReadCallback to delay buffer allocation until
418 * data becomes available. This allows applications to manage large
419 * numbers of idle connections, without having to maintain a separate read
420 * buffer for each idle connection.
422 * It is possible that in some cases, getReadBuffer() may be called
423 * multiple times before readDataAvailable() is invoked. In this case, the
424 * data will be written to the buffer returned from the most recent call to
425 * readDataAvailable(). If the previous calls to readDataAvailable()
426 * returned different buffers, the ReadCallback is responsible for ensuring
427 * that they are not leaked.
429 * If getReadBuffer() throws an exception, returns a nullptr buffer, or
430 * returns a 0 length, the ReadCallback will be uninstalled and its
431 * readError() method will be invoked.
433 * getReadBuffer() is not allowed to change the transport state before it
434 * returns. (For example, it should never uninstall the read callback, or
435 * set a different read callback.)
437 * @param bufReturn getReadBuffer() should update *bufReturn to contain the
438 * address of the read buffer. This parameter will never
440 * @param lenReturn getReadBuffer() should update *lenReturn to contain the
441 * maximum number of bytes that may be written to the read
442 * buffer. This parameter will never be nullptr.
444 virtual void getReadBuffer(void** bufReturn, size_t* lenReturn) = 0;
447 * readDataAvailable() will be invoked when data has been successfully read
448 * into the buffer returned by the last call to getReadBuffer().
450 * The read callback remains installed after readDataAvailable() returns.
451 * It must be explicitly uninstalled to stop receiving read events.
452 * getReadBuffer() will be called at least once before each call to
453 * readDataAvailable(). getReadBuffer() will also be called before any
456 * @param len The number of bytes placed in the buffer.
459 virtual void readDataAvailable(size_t len) noexcept = 0;
462 * When data becomes available, isBufferMovable() will be invoked to figure
463 * out which API will be used, readBufferAvailable() or
464 * readDataAvailable(). If isBufferMovable() returns true, that means
465 * ReadCallback supports the IOBuf ownership transfer and
466 * readBufferAvailable() will be used. Otherwise, not.
468 * By default, isBufferMovable() always return false. If
469 * readBufferAvailable() is implemented and to be invoked, You should
470 * overwrite isBufferMovable() and return true in the inherited class.
472 * This method allows the AsyncSocket/AsyncSSLSocket do buffer allocation by
473 * itself until data becomes available. Compared with the pre/post buffer
474 * allocation in getReadBuffer()/readDataAvailabe(), readBufferAvailable()
475 * has two advantages. First, this can avoid memcpy. E.g., in
476 * AsyncSSLSocket, the decrypted data was copied from the openssl internal
477 * buffer to the readbuf buffer. With the buffer ownership transfer, the
478 * internal buffer can be directly "moved" to ReadCallback. Second, the
479 * memory allocation can be more precise. The reason is
480 * AsyncSocket/AsyncSSLSocket can allocate the memory of precise size
481 * because they have more context about the available data than
482 * ReadCallback. Think about the getReadBuffer() pre-allocate 4072 bytes
483 * buffer, but the available data is always 16KB (max OpenSSL record size).
486 virtual bool isBufferMovable() noexcept {
491 * Suggested buffer size, allocated for read operations,
492 * if callback is movable and supports folly::IOBuf
495 virtual size_t maxBufferSize() const {
496 return 64 * 1024; // 64K
500 * readBufferAvailable() will be invoked when data has been successfully
503 * Note that only either readBufferAvailable() or readDataAvailable() will
504 * be invoked according to the return value of isBufferMovable(). The timing
505 * and aftereffect of readBufferAvailable() are the same as
506 * readDataAvailable()
508 * @param readBuf The unique pointer of read buffer.
511 virtual void readBufferAvailable(std::unique_ptr<IOBuf> /*readBuf*/)
515 * readEOF() will be invoked when the transport is closed.
517 * The read callback will be automatically uninstalled immediately before
518 * readEOF() is invoked.
520 virtual void readEOF() noexcept = 0;
523 * readError() will be invoked if an error occurs reading from the
526 * The read callback will be automatically uninstalled immediately before
527 * readError() is invoked.
529 * @param ex An exception describing the error that occurred.
531 virtual void readErr(const AsyncSocketException& ex) noexcept = 0;
534 // Read methods that aren't part of AsyncTransport.
535 virtual void setReadCB(ReadCallback* callback) = 0;
536 virtual ReadCallback* getReadCallback() const = 0;
539 virtual ~AsyncReader() = default;
544 class WriteCallback {
546 virtual ~WriteCallback() = default;
549 * writeSuccess() will be invoked when all of the data has been
550 * successfully written.
552 * Note that this mainly signals that the buffer containing the data to
553 * write is no longer needed and may be freed or re-used. It does not
554 * guarantee that the data has been fully transmitted to the remote
555 * endpoint. For example, on socket-based transports, writeSuccess() only
556 * indicates that the data has been given to the kernel for eventual
559 virtual void writeSuccess() noexcept = 0;
562 * writeError() will be invoked if an error occurs writing the data.
564 * @param bytesWritten The number of bytes that were successfull
565 * @param ex An exception describing the error that occurred.
567 virtual void writeErr(size_t bytesWritten,
568 const AsyncSocketException& ex) noexcept = 0;
571 // Write methods that aren't part of AsyncTransport
572 virtual void write(WriteCallback* callback, const void* buf, size_t bytes,
573 WriteFlags flags = WriteFlags::NONE) = 0;
574 virtual void writev(WriteCallback* callback, const iovec* vec, size_t count,
575 WriteFlags flags = WriteFlags::NONE) = 0;
576 virtual void writeChain(WriteCallback* callback,
577 std::unique_ptr<IOBuf>&& buf,
578 WriteFlags flags = WriteFlags::NONE) = 0;
581 virtual ~AsyncWriter() = default;
584 // Transitional intermediate interface. This is deprecated.
585 // Wrapper around folly::AsyncTransport, that includes read/write callbacks
586 class AsyncTransportWrapper : virtual public AsyncTransport,
587 virtual public AsyncReader,
588 virtual public AsyncWriter {
590 using UniquePtr = std::unique_ptr<AsyncTransportWrapper, Destructor>;
592 // Alias for inherited members from AsyncReader and AsyncWriter
593 // to keep compatibility.
594 using ReadCallback = AsyncReader::ReadCallback;
595 using WriteCallback = AsyncWriter::WriteCallback;
596 virtual void setReadCB(ReadCallback* callback) override = 0;
597 virtual ReadCallback* getReadCallback() const override = 0;
598 virtual void write(WriteCallback* callback, const void* buf, size_t bytes,
599 WriteFlags flags = WriteFlags::NONE) override = 0;
600 virtual void writev(WriteCallback* callback, const iovec* vec, size_t count,
601 WriteFlags flags = WriteFlags::NONE) override = 0;
602 virtual void writeChain(WriteCallback* callback,
603 std::unique_ptr<IOBuf>&& buf,
604 WriteFlags flags = WriteFlags::NONE) override = 0;
606 * The transport wrapper may wrap another transport. This returns the
607 * transport that is wrapped. It returns nullptr if there is no wrapped
610 virtual const AsyncTransportWrapper* getWrappedTransport() const {
615 * In many cases when we need to set socket properties or otherwise access the
616 * underlying transport from a wrapped transport. This method allows access to
617 * the derived classes of the underlying transport.
620 const T* getUnderlyingTransport() const {
621 const AsyncTransportWrapper* current = this;
623 auto sock = dynamic_cast<const T*>(current);
627 current = current->getWrappedTransport();
633 T* getUnderlyingTransport() {
634 return const_cast<T*>(static_cast<const AsyncTransportWrapper*>(this)
635 ->getUnderlyingTransport<T>());
639 * Return the application protocol being used by the underlying transport
640 * protocol. This is useful for transports which are used to tunnel other
643 virtual std::string getApplicationProtocol() noexcept {
648 * Returns the name of the security protocol being used.
650 virtual std::string getSecurityProtocol() const { return ""; }