2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <folly/io/IOBuf.h>
22 #include <folly/io/async/AsyncSocketBase.h>
23 #include <folly/io/async/DelayedDestruction.h>
24 #include <folly/io/async/EventBase.h>
25 #include <folly/io/async/ssl/OpenSSLPtrTypes.h>
26 #include <folly/portability/SysUio.h>
28 #include <openssl/ssl.h>
30 constexpr bool kOpenSslModeMoveBufferOwnership =
31 #ifdef SSL_MODE_MOVE_BUFFER_OWNERSHIP
40 class AsyncSocketException;
45 * flags given by the application for write* calls
47 enum class WriteFlags : uint32_t {
50 * Whether to delay the output until a subsequent non-corked write.
51 * (Note: may not be supported in all subclasses or on all platforms.)
55 * for a socket that has ACK latency enabled, it will cause the kernel
56 * to fire a TCP ESTATS event when the last byte of the given write call
57 * will be acknowledged.
61 * this indicates that only the write side of socket should be shutdown
63 WRITE_SHUTDOWN = 0x04,
69 inline WriteFlags operator|(WriteFlags a, WriteFlags b) {
70 return static_cast<WriteFlags>(
71 static_cast<uint32_t>(a) | static_cast<uint32_t>(b));
75 * compound assignment union operator
77 inline WriteFlags& operator|=(WriteFlags& a, WriteFlags b) {
83 * intersection operator
85 inline WriteFlags operator&(WriteFlags a, WriteFlags b) {
86 return static_cast<WriteFlags>(
87 static_cast<uint32_t>(a) & static_cast<uint32_t>(b));
91 * compound assignment intersection operator
93 inline WriteFlags& operator&=(WriteFlags& a, WriteFlags b) {
101 inline WriteFlags operator~(WriteFlags a) {
102 return static_cast<WriteFlags>(~static_cast<uint32_t>(a));
108 inline WriteFlags unSet(WriteFlags a, WriteFlags b) {
115 inline bool isSet(WriteFlags a, WriteFlags b) {
121 * AsyncTransport defines an asynchronous API for streaming I/O.
123 * This class provides an API to for asynchronously waiting for data
124 * on a streaming transport, and for asynchronously sending data.
126 * The APIs for reading and writing are intentionally asymmetric. Waiting for
127 * data to read is a persistent API: a callback is installed, and is notified
128 * whenever new data is available. It continues to be notified of new events
129 * until it is uninstalled.
131 * AsyncTransport does not provide read timeout functionality, because it
132 * typically cannot determine when the timeout should be active. Generally, a
133 * timeout should only be enabled when processing is blocked waiting on data
134 * from the remote endpoint. For server-side applications, the timeout should
135 * not be active if the server is currently processing one or more outstanding
136 * requests on this transport. For client-side applications, the timeout
137 * should not be active if there are no requests pending on the transport.
138 * Additionally, if a client has multiple pending requests, it will ususally
139 * want a separate timeout for each request, rather than a single read timeout.
141 * The write API is fairly intuitive: a user can request to send a block of
142 * data, and a callback will be informed once the entire block has been
143 * transferred to the kernel, or on error. AsyncTransport does provide a send
144 * timeout, since most callers want to give up if the remote end stops
145 * responding and no further progress can be made sending the data.
147 class AsyncTransport : public DelayedDestruction, public AsyncSocketBase {
149 typedef std::unique_ptr<AsyncTransport, Destructor> UniquePtr;
152 * Close the transport.
154 * This gracefully closes the transport, waiting for all pending write
155 * requests to complete before actually closing the underlying transport.
157 * If a read callback is set, readEOF() will be called immediately. If there
158 * are outstanding write requests, the close will be delayed until all
159 * remaining writes have completed. No new writes may be started after
160 * close() has been called.
162 virtual void close() = 0;
165 * Close the transport immediately.
167 * This closes the transport immediately, dropping any outstanding data
168 * waiting to be written.
170 * If a read callback is set, readEOF() will be called immediately.
171 * If there are outstanding write requests, these requests will be aborted
172 * and writeError() will be invoked immediately on all outstanding write
175 virtual void closeNow() = 0;
178 * Reset the transport immediately.
180 * This closes the transport immediately, sending a reset to the remote peer
181 * if possible to indicate abnormal shutdown.
183 * Note that not all subclasses implement this reset functionality: some
184 * subclasses may treat reset() the same as closeNow(). Subclasses that use
185 * TCP transports should terminate the connection with a TCP reset.
187 virtual void closeWithReset() {
192 * Perform a half-shutdown of the write side of the transport.
194 * The caller should not make any more calls to write() or writev() after
195 * shutdownWrite() is called. Any future write attempts will fail
198 * Not all transport types support half-shutdown. If the underlying
199 * transport does not support half-shutdown, it will fully shutdown both the
200 * read and write sides of the transport. (Fully shutting down the socket is
201 * better than doing nothing at all, since the caller may rely on the
202 * shutdownWrite() call to notify the other end of the connection that no
203 * more data can be read.)
205 * If there is pending data still waiting to be written on the transport,
206 * the actual shutdown will be delayed until the pending data has been
209 * Note: There is no corresponding shutdownRead() equivalent. Simply
210 * uninstall the read callback if you wish to stop reading. (On TCP sockets
211 * at least, shutting down the read side of the socket is a no-op anyway.)
213 virtual void shutdownWrite() = 0;
216 * Perform a half-shutdown of the write side of the transport.
218 * shutdownWriteNow() is identical to shutdownWrite(), except that it
219 * immediately performs the shutdown, rather than waiting for pending writes
220 * to complete. Any pending write requests will be immediately failed when
221 * shutdownWriteNow() is called.
223 virtual void shutdownWriteNow() = 0;
226 * Determine if transport is open and ready to read or write.
228 * Note that this function returns false on EOF; you must also call error()
229 * to distinguish between an EOF and an error.
231 * @return true iff the transport is open and ready, false otherwise.
233 virtual bool good() const = 0;
236 * Determine if the transport is readable or not.
238 * @return true iff the transport is readable, false otherwise.
240 virtual bool readable() const = 0;
243 * Determine if the there is pending data on the transport.
245 * @return true iff the if the there is pending data, false otherwise.
247 virtual bool isPending() const {
252 * Determine if transport is connected to the endpoint
254 * @return false iff the transport is connected, otherwise true
256 virtual bool connecting() const = 0;
259 * Determine if an error has occurred with this transport.
261 * @return true iff an error has occurred (not EOF).
263 virtual bool error() const = 0;
266 * Attach the transport to a EventBase.
268 * This may only be called if the transport is not currently attached to a
269 * EventBase (by an earlier call to detachEventBase()).
271 * This method must be invoked in the EventBase's thread.
273 virtual void attachEventBase(EventBase* eventBase) = 0;
276 * Detach the transport from its EventBase.
278 * This may only be called when the transport is idle and has no reads or
279 * writes pending. Once detached, the transport may not be used again until
280 * it is re-attached to a EventBase by calling attachEventBase().
282 * This method must be called from the current EventBase's thread.
284 virtual void detachEventBase() = 0;
287 * Determine if the transport can be detached.
289 * This method must be called from the current EventBase's thread.
291 virtual bool isDetachable() const = 0;
294 * Set the send timeout.
296 * If write requests do not make any progress for more than the specified
297 * number of milliseconds, fail all pending writes and close the transport.
299 * If write requests are currently pending when setSendTimeout() is called,
300 * the timeout interval is immediately restarted using the new value.
302 * @param milliseconds The timeout duration, in milliseconds. If 0, no
303 * timeout will be used.
305 virtual void setSendTimeout(uint32_t milliseconds) = 0;
308 * Get the send timeout.
310 * @return Returns the current send timeout, in milliseconds. A return value
311 * of 0 indicates that no timeout is set.
313 virtual uint32_t getSendTimeout() const = 0;
316 * Get the address of the local endpoint of this transport.
318 * This function may throw AsyncSocketException on error.
320 * @param address The local address will be stored in the specified
323 virtual void getLocalAddress(SocketAddress* address) const = 0;
326 * Get the address of the remote endpoint to which this transport is
329 * This function may throw AsyncSocketException on error.
331 * @return Return the local address
333 SocketAddress getLocalAddress() const {
335 getLocalAddress(&addr);
339 virtual void getAddress(SocketAddress* address) const {
340 getLocalAddress(address);
344 * Get the address of the remote endpoint to which this transport is
347 * This function may throw AsyncSocketException on error.
349 * @param address The remote endpoint's address will be stored in the
350 * specified SocketAddress.
352 virtual void getPeerAddress(SocketAddress* address) const = 0;
355 * Get the address of the remote endpoint to which this transport is
358 * This function may throw AsyncSocketException on error.
360 * @return Return the remote endpoint's address
362 SocketAddress getPeerAddress() const {
364 getPeerAddress(&addr);
369 * Get the certificate used to authenticate the peer.
371 virtual ssl::X509UniquePtr getPeerCert() const { return nullptr; }
374 * The local certificate used for this connection. May be null
376 virtual const X509* getSelfCert() const {
381 * @return True iff end of record tracking is enabled
383 virtual bool isEorTrackingEnabled() const = 0;
385 virtual void setEorTracking(bool track) = 0;
387 virtual size_t getAppBytesWritten() const = 0;
388 virtual size_t getRawBytesWritten() const = 0;
389 virtual size_t getAppBytesReceived() const = 0;
390 virtual size_t getRawBytesReceived() const = 0;
392 class BufferCallback {
394 virtual ~BufferCallback() {}
395 virtual void onEgressBuffered() = 0;
396 virtual void onEgressBufferCleared() = 0;
400 * Callback class to signal when a transport that did not have replay
401 * protection gains replay protection. This is needed for 0-RTT security
404 class ReplaySafetyCallback {
406 virtual ~ReplaySafetyCallback() = default;
409 * Called when the transport becomes replay safe.
411 virtual void onReplaySafe() = 0;
415 * False if the transport does not have replay protection, but will in the
418 virtual bool isReplaySafe() const { return true; }
421 * Set the ReplaySafeCallback on this transport.
423 * This should only be called if isReplaySafe() returns false.
425 virtual void setReplaySafetyCallback(ReplaySafetyCallback* callback) {
427 CHECK(false) << "setReplaySafetyCallback() not supported";
432 virtual ~AsyncTransport() = default;
439 virtual ~ReadCallback() = default;
442 * When data becomes available, getReadBuffer() will be invoked to get the
443 * buffer into which data should be read.
445 * This method allows the ReadCallback to delay buffer allocation until
446 * data becomes available. This allows applications to manage large
447 * numbers of idle connections, without having to maintain a separate read
448 * buffer for each idle connection.
450 * It is possible that in some cases, getReadBuffer() may be called
451 * multiple times before readDataAvailable() is invoked. In this case, the
452 * data will be written to the buffer returned from the most recent call to
453 * readDataAvailable(). If the previous calls to readDataAvailable()
454 * returned different buffers, the ReadCallback is responsible for ensuring
455 * that they are not leaked.
457 * If getReadBuffer() throws an exception, returns a nullptr buffer, or
458 * returns a 0 length, the ReadCallback will be uninstalled and its
459 * readError() method will be invoked.
461 * getReadBuffer() is not allowed to change the transport state before it
462 * returns. (For example, it should never uninstall the read callback, or
463 * set a different read callback.)
465 * @param bufReturn getReadBuffer() should update *bufReturn to contain the
466 * address of the read buffer. This parameter will never
468 * @param lenReturn getReadBuffer() should update *lenReturn to contain the
469 * maximum number of bytes that may be written to the read
470 * buffer. This parameter will never be nullptr.
472 virtual void getReadBuffer(void** bufReturn, size_t* lenReturn) = 0;
475 * readDataAvailable() will be invoked when data has been successfully read
476 * into the buffer returned by the last call to getReadBuffer().
478 * The read callback remains installed after readDataAvailable() returns.
479 * It must be explicitly uninstalled to stop receiving read events.
480 * getReadBuffer() will be called at least once before each call to
481 * readDataAvailable(). getReadBuffer() will also be called before any
484 * @param len The number of bytes placed in the buffer.
487 virtual void readDataAvailable(size_t len) noexcept = 0;
490 * When data becomes available, isBufferMovable() will be invoked to figure
491 * out which API will be used, readBufferAvailable() or
492 * readDataAvailable(). If isBufferMovable() returns true, that means
493 * ReadCallback supports the IOBuf ownership transfer and
494 * readBufferAvailable() will be used. Otherwise, not.
496 * By default, isBufferMovable() always return false. If
497 * readBufferAvailable() is implemented and to be invoked, You should
498 * overwrite isBufferMovable() and return true in the inherited class.
500 * This method allows the AsyncSocket/AsyncSSLSocket do buffer allocation by
501 * itself until data becomes available. Compared with the pre/post buffer
502 * allocation in getReadBuffer()/readDataAvailabe(), readBufferAvailable()
503 * has two advantages. First, this can avoid memcpy. E.g., in
504 * AsyncSSLSocket, the decrypted data was copied from the openssl internal
505 * buffer to the readbuf buffer. With the buffer ownership transfer, the
506 * internal buffer can be directly "moved" to ReadCallback. Second, the
507 * memory allocation can be more precise. The reason is
508 * AsyncSocket/AsyncSSLSocket can allocate the memory of precise size
509 * because they have more context about the available data than
510 * ReadCallback. Think about the getReadBuffer() pre-allocate 4072 bytes
511 * buffer, but the available data is always 16KB (max OpenSSL record size).
514 virtual bool isBufferMovable() noexcept {
519 * Suggested buffer size, allocated for read operations,
520 * if callback is movable and supports folly::IOBuf
523 virtual size_t maxBufferSize() const {
524 return 64 * 1024; // 64K
528 * readBufferAvailable() will be invoked when data has been successfully
531 * Note that only either readBufferAvailable() or readDataAvailable() will
532 * be invoked according to the return value of isBufferMovable(). The timing
533 * and aftereffect of readBufferAvailable() are the same as
534 * readDataAvailable()
536 * @param readBuf The unique pointer of read buffer.
539 virtual void readBufferAvailable(std::unique_ptr<IOBuf> /*readBuf*/)
543 * readEOF() will be invoked when the transport is closed.
545 * The read callback will be automatically uninstalled immediately before
546 * readEOF() is invoked.
548 virtual void readEOF() noexcept = 0;
551 * readError() will be invoked if an error occurs reading from the
554 * The read callback will be automatically uninstalled immediately before
555 * readError() is invoked.
557 * @param ex An exception describing the error that occurred.
559 virtual void readErr(const AsyncSocketException& ex) noexcept = 0;
562 // Read methods that aren't part of AsyncTransport.
563 virtual void setReadCB(ReadCallback* callback) = 0;
564 virtual ReadCallback* getReadCallback() const = 0;
567 virtual ~AsyncReader() = default;
572 class WriteCallback {
574 virtual ~WriteCallback() = default;
577 * writeSuccess() will be invoked when all of the data has been
578 * successfully written.
580 * Note that this mainly signals that the buffer containing the data to
581 * write is no longer needed and may be freed or re-used. It does not
582 * guarantee that the data has been fully transmitted to the remote
583 * endpoint. For example, on socket-based transports, writeSuccess() only
584 * indicates that the data has been given to the kernel for eventual
587 virtual void writeSuccess() noexcept = 0;
590 * writeError() will be invoked if an error occurs writing the data.
592 * @param bytesWritten The number of bytes that were successfull
593 * @param ex An exception describing the error that occurred.
595 virtual void writeErr(size_t bytesWritten,
596 const AsyncSocketException& ex) noexcept = 0;
599 // Write methods that aren't part of AsyncTransport
600 virtual void write(WriteCallback* callback, const void* buf, size_t bytes,
601 WriteFlags flags = WriteFlags::NONE) = 0;
602 virtual void writev(WriteCallback* callback, const iovec* vec, size_t count,
603 WriteFlags flags = WriteFlags::NONE) = 0;
604 virtual void writeChain(WriteCallback* callback,
605 std::unique_ptr<IOBuf>&& buf,
606 WriteFlags flags = WriteFlags::NONE) = 0;
609 virtual ~AsyncWriter() = default;
612 // Transitional intermediate interface. This is deprecated.
613 // Wrapper around folly::AsyncTransport, that includes read/write callbacks
614 class AsyncTransportWrapper : virtual public AsyncTransport,
615 virtual public AsyncReader,
616 virtual public AsyncWriter {
618 using UniquePtr = std::unique_ptr<AsyncTransportWrapper, Destructor>;
620 // Alias for inherited members from AsyncReader and AsyncWriter
621 // to keep compatibility.
622 using ReadCallback = AsyncReader::ReadCallback;
623 using WriteCallback = AsyncWriter::WriteCallback;
624 virtual void setReadCB(ReadCallback* callback) override = 0;
625 virtual ReadCallback* getReadCallback() const override = 0;
626 virtual void write(WriteCallback* callback, const void* buf, size_t bytes,
627 WriteFlags flags = WriteFlags::NONE) override = 0;
628 virtual void writev(WriteCallback* callback, const iovec* vec, size_t count,
629 WriteFlags flags = WriteFlags::NONE) override = 0;
630 virtual void writeChain(WriteCallback* callback,
631 std::unique_ptr<IOBuf>&& buf,
632 WriteFlags flags = WriteFlags::NONE) override = 0;
634 * The transport wrapper may wrap another transport. This returns the
635 * transport that is wrapped. It returns nullptr if there is no wrapped
638 virtual const AsyncTransportWrapper* getWrappedTransport() const {
643 * In many cases when we need to set socket properties or otherwise access the
644 * underlying transport from a wrapped transport. This method allows access to
645 * the derived classes of the underlying transport.
648 const T* getUnderlyingTransport() const {
649 const AsyncTransportWrapper* current = this;
651 auto sock = dynamic_cast<const T*>(current);
655 current = current->getWrappedTransport();
661 T* getUnderlyingTransport() {
662 return const_cast<T*>(static_cast<const AsyncTransportWrapper*>(this)
663 ->getUnderlyingTransport<T>());
667 * Return the application protocol being used by the underlying transport
668 * protocol. This is useful for transports which are used to tunnel other
671 virtual std::string getApplicationProtocol() noexcept {
676 * Returns the name of the security protocol being used.
678 virtual std::string getSecurityProtocol() const { return ""; }