From 5cd9755e39ad63a2707633346c20fd3d8ff9a2a4 Mon Sep 17 00:00:00 2001 From: O2 Date: Tue, 12 Sep 2023 22:09:02 +0800 Subject: [PATCH 1/5] Update run_envoy_docker.sh (#29448) Signed-off-by: O2 --- ci/run_envoy_docker.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 06f6920b479a..2224f5c3f48a 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -161,6 +161,5 @@ docker run --rm \ -e ENVOY_BUILD_ARCH \ -e SYSTEM_STAGEDISPLAYNAME \ -e SYSTEM_JOBDISPLAYNAME \ - -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ "${ENVOY_BUILD_IMAGE}" \ "${START_COMMAND[@]}" From 5cd9a5d601de47a185cc43b244233fd7c8a8795f Mon Sep 17 00:00:00 2001 From: ohadvano <49730675+ohadvano@users.noreply.github.com> Date: Tue, 12 Sep 2023 18:29:44 +0300 Subject: [PATCH 2/5] dfp_cluster: use requestStreamInfo() directly (#29539) --------- Signed-off-by: ohadvano Signed-off-by: ohadvano <49730675+ohadvano@users.noreply.github.com> --- source/common/tcp_proxy/tcp_proxy.h | 4 ++++ .../clusters/dynamic_forward_proxy/cluster.cc | 16 ++++++---------- .../dynamic_forward_proxy/cluster_test.cc | 5 +++-- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index 929953d273c5..ae360352de07 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -423,6 +423,10 @@ class Filter : public Network::ReadFilter, return &read_callbacks_->connection(); } + const StreamInfo::StreamInfo* requestStreamInfo() const override { + return &read_callbacks_->connection().streamInfo(); + } + Network::TransportSocketOptionsConstSharedPtr upstreamTransportSocketOptions() const override { return transport_socket_options_; } diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index fea3b34f11d0..939a77cf5726 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -343,12 +343,10 @@ Cluster::LoadBalancer::chooseHost(Upstream::LoadBalancerContext* context) { } const Router::StringAccessor* dynamic_host_filter_state = nullptr; - if (context->downstreamConnection()) { + if (context->requestStreamInfo()) { dynamic_host_filter_state = - context->downstreamConnection() - ->streamInfo() - .filterState() - .getDataReadOnly(DynamicHostFilterStateKey); + context->requestStreamInfo()->filterState().getDataReadOnly( + DynamicHostFilterStateKey); } absl::string_view raw_host; @@ -370,12 +368,10 @@ Cluster::LoadBalancer::chooseHost(Upstream::LoadBalancerContext* context) { .resolve(nullptr) .factory_.implementsSecureTransport(); uint32_t port = is_secure ? 443 : 80; - if (context->downstreamConnection()) { + if (context->requestStreamInfo()) { const StreamInfo::UInt32Accessor* dynamic_port_filter_state = - context->downstreamConnection() - ->streamInfo() - .filterState() - .getDataReadOnly(DynamicPortFilterStateKey); + context->requestStreamInfo()->filterState().getDataReadOnly( + DynamicPortFilterStateKey); if (dynamic_port_filter_state != nullptr && dynamic_port_filter_state->value() > 0 && dynamic_port_filter_state->value() <= 65535) { port = dynamic_port_filter_state->value(); diff --git a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc index 0c72656c0668..bbe4ab7ed27e 100644 --- a/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc +++ b/test/extensions/clusters/dynamic_forward_proxy/cluster_test.cc @@ -64,6 +64,7 @@ class ClusterTest : public testing::Test, ON_CALL(lb_context_, downstreamHeaders()).WillByDefault(Return(&downstream_headers_)); ON_CALL(connection_, streamInfo()).WillByDefault(ReturnRef(stream_info_)); + ON_CALL(lb_context_, requestStreamInfo()).WillByDefault(Return(&stream_info_)); ON_CALL(lb_context_, downstreamConnection()).WillByDefault(Return(&connection_)); member_update_cb_ = cluster_->prioritySet().addMemberUpdateCb( @@ -118,8 +119,8 @@ class ClusterTest : public testing::Test, } Upstream::MockLoadBalancerContext* setFilterStateHostAndReturnContext(const std::string& host) { - StreamInfo::FilterState& filter_state = const_cast( - lb_context_.downstreamConnection()->streamInfo().filterState()); + StreamInfo::FilterState& filter_state = + const_cast(lb_context_.requestStreamInfo()->filterState()); filter_state.setData( "envoy.upstream.dynamic_host", std::make_shared(host), From 8f42e907b6fdb9c578dc0978baa0bf8402eb27e6 Mon Sep 17 00:00:00 2001 From: botengyao Date: Tue, 12 Sep 2023 11:44:31 -0400 Subject: [PATCH 3/5] TCP connection: detect and send RST (#28817) This PR will add the support to detect RST from the peer and send RST by Envoy controlled by flag. This PR added enableTcpRstDetectAndSend method to the connection object, and it is disabled by default. The filter or application can enable it by calling enableTcpRstDetectAndSend(true) to enable it. This is similar for the usage of enableHalfClose(). In the meantime, this feature will also be controlled by the runtime guard envoy_reloadable_features_detect_and_raise_rst_tcp_connection. --------- Signed-off-by: Boteng Yao --- changelogs/current.yaml | 4 + contrib/golang/common/go/api/type.go | 5 + envoy/network/BUILD | 1 + envoy/network/connection.h | 18 +- envoy/network/transport_socket.h | 15 + envoy/stream_info/stream_info.h | 2 + envoy/tcp/async_tcp_client.h | 5 + source/common/network/BUILD | 3 + source/common/network/connection_impl.cc | 58 +++ source/common/network/connection_impl.h | 6 + .../network/multi_connection_base_impl.cc | 4 + .../network/multi_connection_base_impl.h | 1 + source/common/network/raw_buffer_socket.cc | 16 +- .../common/network/socket_option_factory.cc | 12 + source/common/network/socket_option_factory.h | 1 + .../quic_filter_manager_connection_impl.h | 4 + source/common/runtime/runtime_features.cc | 1 + source/common/tcp/async_tcp_client_impl.cc | 4 +- source/common/tcp/async_tcp_client_impl.h | 3 + source/common/tcp_proxy/tcp_proxy.cc | 2 + source/common/tcp_proxy/upstream.cc | 1 + source/server/api_listener_impl.h | 3 + test/common/network/connection_impl_test.cc | 459 +++++++++++++++++- .../network/socket_option_factory_test.cc | 15 + test/common/tcp/async_tcp_client_impl_test.cc | 49 +- .../proxy_protocol/proxy_protocol_test.cc | 28 ++ test/integration/fake_upstream.cc | 28 ++ test/integration/fake_upstream.h | 18 + .../filters/test_network_async_tcp_filter.cc | 51 +- test/integration/integration_tcp_client.cc | 4 + test/integration/integration_tcp_client.h | 1 + .../tcp_async_client_integration_test.cc | 106 ++++ test/mocks/network/connection.cc | 11 +- test/mocks/network/connection.h | 3 + test/mocks/network/transport_socket.cc | 6 + 35 files changed, 908 insertions(+), 40 deletions(-) diff --git a/changelogs/current.yaml b/changelogs/current.yaml index c45710a1c4e0..5bbbdf7377a8 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -240,6 +240,10 @@ new_features: change: | added :ref:`record_headers_received_time ` to control writing request and response headers received time in trace output. +- area: tcp + change: | + added the support to detect and send TCP RST for raw buffer socket based connections. This is currently supported on Linux only. + It can be disabled by the runtime guard ``envoy_reloadable_features_detect_and_raise_rst_tcp_connection``. - area: upstream change: | diff --git a/contrib/golang/common/go/api/type.go b/contrib/golang/common/go/api/type.go index f2a0fa7dfb11..46ee34d055ca 100644 --- a/contrib/golang/common/go/api/type.go +++ b/contrib/golang/common/go/api/type.go @@ -349,6 +349,9 @@ const ( FlushWriteAndDelay ConnectionCloseType = 2 // Do not write/flush any pending data and immediately raise ConnectionEvent::LocalClose Abort ConnectionCloseType = 3 + // Do not write/flush any pending data and immediately raise + // ConnectionEvent::LocalClose. Envoy will try to close the connection with RST flag. + AbortReset ConnectionCloseType = 4 ) func (t ConnectionCloseType) String() string { @@ -361,6 +364,8 @@ func (t ConnectionCloseType) String() string { return "FlushWriteAndDelay" case Abort: return "Abort" + case AbortReset: + return "AbortReset" } return "unknown" } diff --git a/envoy/network/BUILD b/envoy/network/BUILD index 1e98d0e2cd14..8144f9eab260 100644 --- a/envoy/network/BUILD +++ b/envoy/network/BUILD @@ -184,6 +184,7 @@ envoy_cc_library( ":io_handle_interface", ":post_io_action_interface", ":proxy_protocol_options_lib", + "//envoy/api:io_error_interface", "//envoy/buffer:buffer_interface", "//envoy/network:listen_socket_interface", "//envoy/ssl:connection_interface", diff --git a/envoy/network/connection.h b/envoy/network/connection.h index 8c48af9f0928..f5b6f5b885e0 100644 --- a/envoy/network/connection.h +++ b/envoy/network/connection.h @@ -72,7 +72,18 @@ enum class ConnectionCloseType { // raise ConnectionEvent::LocalClose FlushWriteAndDelay, // Flush pending write data and delay raising a ConnectionEvent::LocalClose // until the delayed_close_timeout expires - Abort // Do not write/flush any pending data and immediately raise ConnectionEvent::LocalClose + Abort, // Do not write/flush any pending data and immediately raise ConnectionEvent::LocalClose + AbortReset // Do not write/flush any pending data and immediately raise + // ConnectionEvent::LocalClose. Envoy will try to close the connection with RST flag. +}; + +/** + * Type of connection close which is detected from the socket. + */ +enum class DetectedCloseType { + Normal, // The normal socket close from Envoy's connection perspective. + LocalReset, // The local reset initiated from Envoy. + RemoteReset, // The peer reset detected by the connection. }; /** @@ -153,6 +164,11 @@ class Connection : public Event::DeferredDeletable, */ virtual void close(ConnectionCloseType type, absl::string_view details) PURE; + /** + * @return the detected close type from socket. + */ + virtual DetectedCloseType detectedCloseType() const PURE; + /** * @return Event::Dispatcher& the dispatcher backing this connection. */ diff --git a/envoy/network/transport_socket.h b/envoy/network/transport_socket.h index e5bdc1bf1b14..36d9edca1084 100644 --- a/envoy/network/transport_socket.h +++ b/envoy/network/transport_socket.h @@ -2,6 +2,7 @@ #include +#include "envoy/api/io_error.h" #include "envoy/buffer/buffer.h" #include "envoy/common/optref.h" #include "envoy/common/pure.h" @@ -37,6 +38,15 @@ enum class ConnectionEvent; * Result of each I/O event. */ struct IoResult { + IoResult(PostIoAction action, uint64_t bytes_processed, bool end_stream_read) + : action_(action), bytes_processed_(bytes_processed), end_stream_read_(end_stream_read), + err_code_(absl::nullopt) {} + + IoResult(PostIoAction action, uint64_t bytes_processed, bool end_stream_read, + absl::optional err_code) + : action_(action), bytes_processed_(bytes_processed), end_stream_read_(end_stream_read), + err_code_(err_code) {} + PostIoAction action_; /** @@ -49,6 +59,11 @@ struct IoResult { * can only be true for read operations. */ bool end_stream_read_; + + /** + * The underlying I/O error code. + */ + absl::optional err_code_; }; /** diff --git a/envoy/stream_info/stream_info.h b/envoy/stream_info/stream_info.h index 9196813fd4ef..a4f2faca84ca 100644 --- a/envoy/stream_info/stream_info.h +++ b/envoy/stream_info/stream_info.h @@ -231,6 +231,8 @@ struct LocalCloseReasonValues { "closing_upstream_tcp_connection_due_to_downstream_remote_close"; const std::string ClosingUpstreamTcpDueToDownstreamLocalClose = "closing_upstream_tcp_connection_due_to_downstream_local_close"; + const std::string ClosingUpstreamTcpDueToDownstreamResetClose = + "closing_upstream_tcp_connection_due_to_downstream_reset_close"; const std::string NonPooledTcpConnectionHostHealthFailure = "non_pooled_tcp_connection_host_health_failure"; }; diff --git a/envoy/tcp/async_tcp_client.h b/envoy/tcp/async_tcp_client.h index 0c8aa5c9efb6..684dc3aa47a7 100644 --- a/envoy/tcp/async_tcp_client.h +++ b/envoy/tcp/async_tcp_client.h @@ -52,6 +52,11 @@ class AsyncTcpClient { */ virtual void close(Network::ConnectionCloseType type) PURE; + /** + * @return the detected close type from socket. + */ + virtual Network::DetectedCloseType detectedCloseType() const PURE; + /** * Write data through the client. * @param data the bufferred data. diff --git a/source/common/network/BUILD b/source/common/network/BUILD index af359e896eda..1c87ec7b34e3 100644 --- a/source/common/network/BUILD +++ b/source/common/network/BUILD @@ -108,6 +108,7 @@ envoy_cc_library( "//envoy/event:timer_interface", "//envoy/network:connection_interface", "//envoy/network:filter_interface", + "//envoy/network:socket_interface", "//envoy/server/overload:thread_local_overload_state", "//source/common/buffer:buffer_lib", "//source/common/buffer:watermark_buffer_lib", @@ -117,6 +118,8 @@ envoy_cc_library( "//source/common/common:minimal_logger_lib", "//source/common/event:libevent_lib", "//source/common/network:listen_socket_lib", + "//source/common/network:socket_option_factory_lib", + "//source/common/runtime:runtime_features_lib", "//source/common/stream_info:stream_info_lib", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index 8ed55494fe07..d3f7634d7f8b 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -20,7 +20,10 @@ #include "source/common/network/address_impl.h" #include "source/common/network/listen_socket_impl.h" #include "source/common/network/raw_buffer_socket.h" +#include "source/common/network/socket_option_factory.h" +#include "source/common/network/socket_option_impl.h" #include "source/common/network/utility.h" +#include "source/common/runtime/runtime_features.h" namespace Envoy { namespace Network { @@ -82,6 +85,10 @@ ConnectionImpl::ConnectionImpl(Event::Dispatcher& dispatcher, ConnectionSocketPt write_end_stream_(false), current_write_end_stream_(false), dispatch_buffered_data_(false), transport_wants_read_(false) { + // Keep it as a bool flag to reduce the times calling runtime method.. + enable_rst_detect_send_ = Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.detect_and_raise_rst_tcp_connection"); + if (!connected) { connecting_ = true; } @@ -137,6 +144,23 @@ void ConnectionImpl::close(ConnectionCloseType type) { uint64_t data_to_write = write_buffer_->length(); ENVOY_CONN_LOG_EVENT(debug, "connection_closing", "closing data_to_write={} type={}", *this, data_to_write, enumToInt(type)); + + // RST will be sent only if enable_rst_detect_send_ is true, otherwise it is converted to normal + // ConnectionCloseType::Abort. + if (!enable_rst_detect_send_ && type == ConnectionCloseType::AbortReset) { + type = ConnectionCloseType::Abort; + } + + // The connection is closed by Envoy by sending RST, and the connection is closed immediately. + if (type == ConnectionCloseType::AbortReset) { + ENVOY_CONN_LOG( + trace, "connection closing type=AbortReset, setting LocalReset to the detected close type.", + *this); + setDetectedCloseType(DetectedCloseType::LocalReset); + closeSocket(ConnectionEvent::LocalClose); + return; + } + const bool delayed_close_timeout_set = delayed_close_timeout_.count() > 0; if (data_to_write == 0 || type == ConnectionCloseType::NoFlush || type == ConnectionCloseType::Abort || !transport_socket_->canFlushClose()) { @@ -236,6 +260,10 @@ bool ConnectionImpl::filterChainWantsData() { (read_disable_count_ == 1 && read_buffer_->highWatermarkTriggered()); } +void ConnectionImpl::setDetectedCloseType(DetectedCloseType close_type) { + detected_close_type_ = close_type; +} + void ConnectionImpl::closeSocket(ConnectionEvent close_type) { if (!ConnectionImpl::ioHandle().isOpen()) { return; @@ -262,6 +290,17 @@ void ConnectionImpl::closeSocket(ConnectionEvent close_type) { connection_stats_.reset(); + if (enable_rst_detect_send_ && (detected_close_type_ == DetectedCloseType::RemoteReset || + detected_close_type_ == DetectedCloseType::LocalReset)) { + const bool ok = Network::Socket::applyOptions( + Network::SocketOptionFactory::buildZeroSoLingerOptions(), *socket_, + envoy::config::core::v3::SocketOption::STATE_LISTENING); + if (!ok) { + ENVOY_LOG_EVERY_POW_2(error, "rst setting so_linger=0 failed on connection {}", id()); + } + } + + // It is safe to call close() since there is an IO handle check. socket_->close(); // Call the base class directly as close() is called in the destructor. @@ -642,6 +681,15 @@ void ConnectionImpl::onReadReady() { uint64_t new_buffer_size = read_buffer_->length(); updateReadBufferStats(result.bytes_processed_, new_buffer_size); + // The socket is closed immediately when receiving RST. + if (enable_rst_detect_send_ && result.err_code_.has_value() && + result.err_code_ == Api::IoError::IoErrorCode::ConnectionReset) { + ENVOY_CONN_LOG(trace, "read: rst close from peer", *this); + setDetectedCloseType(DetectedCloseType::RemoteReset); + closeSocket(ConnectionEvent::RemoteClose); + return; + } + // If this connection doesn't have half-close semantics, translate end_stream into // a connection close. if ((!enable_half_close_ && result.end_stream_read_)) { @@ -714,6 +762,16 @@ void ConnectionImpl::onWriteReady() { uint64_t new_buffer_size = write_buffer_->length(); updateWriteBufferStats(result.bytes_processed_, new_buffer_size); + // The socket is closed immediately when receiving RST. + if (enable_rst_detect_send_ && result.err_code_.has_value() && + result.err_code_ == Api::IoError::IoErrorCode::ConnectionReset) { + // Discard anything in the buffer. + ENVOY_CONN_LOG(debug, "write: rst close from peer.", *this); + setDetectedCloseType(DetectedCloseType::RemoteReset); + closeSocket(ConnectionEvent::RemoteClose); + return; + } + // NOTE: If the delayed_close_timer_ is set, it must only trigger after a delayed_close_timeout_ // period of inactivity from the last write event. Therefore, the timer must be reset to its // original timeout value unless the socket is going to be closed as a result of the doWrite(). diff --git a/source/common/network/connection_impl.h b/source/common/network/connection_impl.h index 44a2a4efb74c..14279f51f192 100644 --- a/source/common/network/connection_impl.h +++ b/source/common/network/connection_impl.h @@ -148,6 +148,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback // ScopeTrackedObject void dumpState(std::ostream& os, int indent_level) const override; + DetectedCloseType detectedCloseType() const override { return detected_close_type_; } protected: // A convenience function which returns true if @@ -213,6 +214,9 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback // Returns true iff end of stream has been both written and read. bool bothSidesHalfClosed(); + // Set the detected close type for this connection. + void setDetectedCloseType(DetectedCloseType close_type); + static std::atomic next_global_id_; std::list bytes_sent_callbacks_; @@ -225,6 +229,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback uint64_t last_write_buffer_size_{}; Buffer::Instance* current_write_buffer_{}; uint32_t read_disable_count_{0}; + DetectedCloseType detected_close_type_{DetectedCloseType::Normal}; bool write_buffer_above_high_watermark_ : 1; bool detect_early_close_ : 1; bool enable_half_close_ : 1; @@ -239,6 +244,7 @@ class ConnectionImpl : public ConnectionImplBase, public TransportSocketCallback // read_disable_count_ == 0 to ensure that read resumption happens when remaining bytes are held // in transport socket internal buffers. bool transport_wants_read_ : 1; + bool enable_rst_detect_send_ : 1; }; class ServerConnectionImpl : public ConnectionImpl, virtual public ServerConnection { diff --git a/source/common/network/multi_connection_base_impl.cc b/source/common/network/multi_connection_base_impl.cc index dfd149f2b918..55ab41e973cb 100644 --- a/source/common/network/multi_connection_base_impl.cc +++ b/source/common/network/multi_connection_base_impl.cc @@ -358,6 +358,10 @@ void MultiConnectionBaseImpl::close(ConnectionCloseType type, absl::string_view connections_[0]->close(type, details); } +DetectedCloseType MultiConnectionBaseImpl::detectedCloseType() const { + return connections_[0]->detectedCloseType(); +}; + Event::Dispatcher& MultiConnectionBaseImpl::dispatcher() const { ASSERT(&dispatcher_ == &connections_[0]->dispatcher()); return connections_[0]->dispatcher(); diff --git a/source/common/network/multi_connection_base_impl.h b/source/common/network/multi_connection_base_impl.h index c88bc9924609..6bb373114809 100644 --- a/source/common/network/multi_connection_base_impl.h +++ b/source/common/network/multi_connection_base_impl.h @@ -127,6 +127,7 @@ class MultiConnectionBaseImpl : public ClientConnection, Event::Dispatcher& dispatcher() const override; void close(ConnectionCloseType type) override { close(type, ""); } void close(ConnectionCloseType type, absl::string_view details) override; + DetectedCloseType detectedCloseType() const override; bool readEnabled() const override; bool aboveHighWatermark() const override; void hashKey(std::vector& hash_key) const override; diff --git a/source/common/network/raw_buffer_socket.cc b/source/common/network/raw_buffer_socket.cc index 6ad896d095ae..f873ebaa2c90 100644 --- a/source/common/network/raw_buffer_socket.cc +++ b/source/common/network/raw_buffer_socket.cc @@ -17,6 +17,7 @@ IoResult RawBufferSocket::doRead(Buffer::Instance& buffer) { PostIoAction action = PostIoAction::KeepOpen; uint64_t bytes_read = 0; bool end_stream = false; + absl::optional err = absl::nullopt; do { Api::IoCallUint64Result result = callbacks_->ioHandle().read(buffer, absl::nullopt); @@ -34,21 +35,23 @@ IoResult RawBufferSocket::doRead(Buffer::Instance& buffer) { } } else { // Remote error (might be no data). - ENVOY_CONN_LOG(trace, "read error: {}", callbacks_->connection(), - result.err_->getErrorDetails()); + ENVOY_CONN_LOG(trace, "read error: {}, code: {}", callbacks_->connection(), + result.err_->getErrorDetails(), static_cast(result.err_->getErrorCode())); if (result.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) { action = PostIoAction::Close; + err = result.err_->getErrorCode(); } break; } } while (true); - return {action, bytes_read, end_stream}; + return {action, bytes_read, end_stream, err}; } IoResult RawBufferSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { PostIoAction action; uint64_t bytes_written = 0; + absl::optional err = absl::nullopt; ASSERT(!shutdown_ || buffer.length() == 0); do { if (buffer.length() == 0) { @@ -67,18 +70,19 @@ IoResult RawBufferSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.return_value_); bytes_written += result.return_value_; } else { - ENVOY_CONN_LOG(trace, "write error: {}", callbacks_->connection(), - result.err_->getErrorDetails()); + ENVOY_CONN_LOG(trace, "write error: {}, code: {}", callbacks_->connection(), + result.err_->getErrorDetails(), static_cast(result.err_->getErrorCode())); if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { action = PostIoAction::KeepOpen; } else { + err = result.err_->getErrorCode(); action = PostIoAction::Close; } break; } } while (true); - return {action, bytes_written, false}; + return {action, bytes_written, false, err}; } std::string RawBufferSocket::protocol() const { return EMPTY_STRING; } diff --git a/source/common/network/socket_option_factory.cc b/source/common/network/socket_option_factory.cc index 4c9e144e859e..18ff956fa505 100644 --- a/source/common/network/socket_option_factory.cc +++ b/source/common/network/socket_option_factory.cc @@ -149,5 +149,17 @@ std::unique_ptr SocketOptionFactory::buildUdpGroOptions() { return options; } +std::unique_ptr SocketOptionFactory::buildZeroSoLingerOptions() { + std::unique_ptr options = std::make_unique(); + struct linger linger; + linger.l_onoff = 1; + linger.l_linger = 0; + absl::string_view linger_bstr{reinterpret_cast(&linger), sizeof(struct linger)}; + options->push_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_LISTENING, + ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_LINGER), linger_bstr)); + return options; +} + } // namespace Network } // namespace Envoy diff --git a/source/common/network/socket_option_factory.h b/source/common/network/socket_option_factory.h index e40730167a1b..f0de148cbd66 100644 --- a/source/common/network/socket_option_factory.h +++ b/source/common/network/socket_option_factory.h @@ -36,6 +36,7 @@ class SocketOptionFactory : Logger::Loggable { static std::unique_ptr buildRxQueueOverFlowOptions(); static std::unique_ptr buildReusePortOptions(); static std::unique_ptr buildUdpGroOptions(); + static std::unique_ptr buildZeroSoLingerOptions(); }; } // namespace Network } // namespace Envoy diff --git a/source/common/quic/quic_filter_manager_connection_impl.h b/source/common/quic/quic_filter_manager_connection_impl.h index 154123b64160..b44515cea102 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.h +++ b/source/common/quic/quic_filter_manager_connection_impl.h @@ -59,6 +59,10 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, } close(type); } + + Network::DetectedCloseType detectedCloseType() const override { + return Network::DetectedCloseType::Normal; + } Event::Dispatcher& dispatcher() const override { return dispatcher_; } std::string nextProtocol() const override { return EMPTY_STRING; } // No-op. TCP_NODELAY doesn't apply to UDP. diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 95d1bfb84beb..71847248bb51 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -37,6 +37,7 @@ RUNTIME_GUARD(envoy_reloadable_features_check_mep_on_first_eject); RUNTIME_GUARD(envoy_reloadable_features_conn_pool_delete_when_idle); RUNTIME_GUARD(envoy_reloadable_features_copy_response_code_to_downstream_stream_info); RUNTIME_GUARD(envoy_reloadable_features_count_unused_mapped_pages_as_free); +RUNTIME_GUARD(envoy_reloadable_features_detect_and_raise_rst_tcp_connection); RUNTIME_GUARD(envoy_reloadable_features_dfp_mixed_scheme); RUNTIME_GUARD(envoy_reloadable_features_enable_aws_credentials_file); RUNTIME_GUARD(envoy_reloadable_features_enable_compression_bomb_protection); diff --git a/source/common/tcp/async_tcp_client_impl.cc b/source/common/tcp/async_tcp_client_impl.cc index 49f6bba350d8..93b3dc4c188b 100644 --- a/source/common/tcp/async_tcp_client_impl.cc +++ b/source/common/tcp/async_tcp_client_impl.cc @@ -109,10 +109,12 @@ void AsyncTcpClientImpl::onEvent(Network::ConnectionEvent event) { conn_length_ms_->complete(); conn_length_ms_.reset(); } - disableConnectTimeout(); reportConnectionDestroy(event); disconnected_ = true; + if (connection_) { + detected_close_ = connection_->detectedCloseType(); + } dispatcher_.deferredDelete(std::move(connection_)); if (callbacks_) { diff --git a/source/common/tcp/async_tcp_client_impl.h b/source/common/tcp/async_tcp_client_impl.h index 9494ae47a1db..1786cfaaf589 100644 --- a/source/common/tcp/async_tcp_client_impl.h +++ b/source/common/tcp/async_tcp_client_impl.h @@ -31,6 +31,8 @@ class AsyncTcpClientImpl : public AsyncTcpClient, void close(Network::ConnectionCloseType type) override; + Network::DetectedCloseType detectedCloseType() const override { return detected_close_; } + /** * @return true means a host is successfully picked from a Cluster. * This doesn't mean the connection is established. @@ -95,6 +97,7 @@ class AsyncTcpClientImpl : public AsyncTcpClient, Stats::TimespanPtr conn_length_ms_; Event::TimerPtr connect_timer_; AsyncTcpClientCallbacks* callbacks_{}; + Network::DetectedCloseType detected_close_{Network::DetectedCloseType::Normal}; bool disconnected_{true}; bool enable_half_close_{false}; }; diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index fcd09356ad14..0b07d56cd641 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -750,6 +750,7 @@ void Filter::onDownstreamEvent(Network::ConnectionEvent event) { disableIdleTimer(); } } + if (generic_conn_pool_) { if (event == Network::ConnectionEvent::LocalClose || event == Network::ConnectionEvent::RemoteClose) { @@ -794,6 +795,7 @@ void Filter::onUpstreamEvent(Network::ConnectionEvent event) { establishUpstreamConnection(); } } else { + // TODO(botengyao): propagate RST back to downstream connection if RST is received. if (read_callbacks_->connection().state() == Network::Connection::State::Open) { read_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); } diff --git a/source/common/tcp_proxy/upstream.cc b/source/common/tcp_proxy/upstream.cc index ea9e2ba73eb1..82c389d6faf0 100644 --- a/source/common/tcp_proxy/upstream.cc +++ b/source/common/tcp_proxy/upstream.cc @@ -58,6 +58,7 @@ Ssl::ConnectionInfoConstSharedPtr TcpUpstream::getUpstreamConnectionSslInfo() { Tcp::ConnectionPool::ConnectionData* TcpUpstream::onDownstreamEvent(Network::ConnectionEvent event) { + // TODO(botengyao): propagate RST back to upstream connection if RST is received from downstream. if (event == Network::ConnectionEvent::RemoteClose) { // The close call may result in this object being deleted. Latch the // connection locally so it can be returned for potential draining. diff --git a/source/server/api_listener_impl.h b/source/server/api_listener_impl.h index 8c795124e7cf..ee2038d454b3 100644 --- a/source/server/api_listener_impl.h +++ b/source/server/api_listener_impl.h @@ -122,6 +122,9 @@ class ApiListenerImplBase : public ApiListener, } void close(Network::ConnectionCloseType) override {} void close(Network::ConnectionCloseType, absl::string_view) override {} + Network::DetectedCloseType detectedCloseType() const override { + return Network::DetectedCloseType::Normal; + }; Event::Dispatcher& dispatcher() const override { return dispatcher_; } uint64_t id() const override { return 12345; } void hashKey(std::vector&) const override {} diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 27f558208237..310019fdfdde 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -189,7 +189,7 @@ class ConnectionImplTestBase { })); EXPECT_CALL(listener_callbacks_, recordConnectionsAcceptedOnSocketEvent(_)); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { + .WillOnce(InvokeWithoutArgs([&]() -> void { expected_callbacks--; if (expected_callbacks == 0) { dispatcher_->exit(); @@ -209,7 +209,7 @@ class ConnectionImplTestBase { client_connection_->close(ConnectionCloseType::NoFlush); if (wait_for_remote_close) { EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); } else { dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -368,9 +368,8 @@ TEST_P(ConnectionImplTest, CloseDuringConnectCallback) { std::make_shared>(); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { - client_connection_->close(ConnectionCloseType::NoFlush); - })); + .WillOnce(InvokeWithoutArgs( + [&]() -> void { client_connection_->close(ConnectionCloseType::NoFlush); })); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)); EXPECT_CALL(later_callbacks, onEvent(ConnectionEvent::LocalClose)); @@ -388,7 +387,7 @@ TEST_P(ConnectionImplTest, CloseDuringConnectCallback) { EXPECT_CALL(listener_callbacks_, recordConnectionsAcceptedOnSocketEvent(_)); EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); } @@ -418,7 +417,7 @@ TEST_P(ConnectionImplTest, UnregisterRegisterDuringConnectCallback) { })); EXPECT_CALL(listener_callbacks_, recordConnectionsAcceptedOnSocketEvent(_)); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { + .WillOnce(InvokeWithoutArgs([&]() -> void { expected_callbacks--; // Register the new callback. It should immediately get the Connected // event without an extra dispatch loop. @@ -461,7 +460,7 @@ TEST_P(ConnectionImplTest, ImmediateConnectError) { // Verify that also the immediate connect errors generate a remote close event. EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); EXPECT_THAT(client_connection_->transportFailureReason(), StartsWith("immediate connect error")); @@ -552,9 +551,8 @@ TEST_P(ConnectionImplTest, SocketOptions) { client_connection_->connect(); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { - client_connection_->close(ConnectionCloseType::NoFlush); - })); + .WillOnce(InvokeWithoutArgs( + [&]() -> void { client_connection_->close(ConnectionCloseType::NoFlush); })); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)); read_filter_ = std::make_shared>(); @@ -578,7 +576,7 @@ TEST_P(ConnectionImplTest, SocketOptions) { EXPECT_CALL(listener_callbacks_, recordConnectionsAcceptedOnSocketEvent(_)); EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { + .WillOnce(InvokeWithoutArgs([&]() -> void { upstream_connection_->close(ConnectionCloseType::NoFlush); dispatcher_->exit(); })); @@ -602,9 +600,8 @@ TEST_P(ConnectionImplTest, SocketOptionsFailureTest) { client_connection_->connect(); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { - client_connection_->close(ConnectionCloseType::NoFlush); - })); + .WillOnce(InvokeWithoutArgs( + [&]() -> void { client_connection_->close(ConnectionCloseType::NoFlush); })); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)); read_filter_ = std::make_shared>(); @@ -631,7 +628,7 @@ TEST_P(ConnectionImplTest, SocketOptionsFailureTest) { EXPECT_CALL(upstream_callbacks_, onEvent(ConnectionEvent::LocalClose)); EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { + .WillOnce(InvokeWithoutArgs([&]() -> void { upstream_connection_->close(ConnectionCloseType::NoFlush); dispatcher_->exit(); })); @@ -639,6 +636,153 @@ TEST_P(ConnectionImplTest, SocketOptionsFailureTest) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +// Test that connection is AbortReset closed during callback. +TEST_P(ConnectionImplTest, ClientAbortResetDuringCallback) { + Network::ClientConnectionPtr upstream_connection_; + StrictMock upstream_callbacks_; + setUpBasicConnection(); + + Buffer::OwnedImpl buffer("hello world"); + client_connection_->write(buffer, false); + client_connection_->connect(); + + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected)) + .WillOnce(InvokeWithoutArgs( + [&]() -> void { client_connection_->close(ConnectionCloseType::AbortReset); })); + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + })); + + read_filter_ = std::make_shared>(); + + EXPECT_CALL(listener_callbacks_, onAccept_(_)) + .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { + server_connection_ = dispatcher_->createServerConnection( + std::move(socket), Network::Test::createRawBufferSocket(), stream_info_); + server_connection_->addConnectionCallbacks(server_callbacks_); + server_connection_->addReadFilter(read_filter_); + + upstream_connection_ = dispatcher_->createClientConnection( + socket_->connectionInfoProvider().localAddress(), source_address_, + Network::Test::createRawBufferSocket(), server_connection_->socketOptions(), nullptr); + upstream_connection_->addConnectionCallbacks(upstream_callbacks_); + })); + EXPECT_CALL(listener_callbacks_, recordConnectionsAcceptedOnSocketEvent(_)); + + EXPECT_CALL(upstream_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(upstream_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + })); + + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(server_connection_->detectedCloseType(), DetectedCloseType::RemoteReset); + upstream_connection_->close(ConnectionCloseType::AbortReset); + dispatcher_->exit(); + })); + + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +// Test the connection is AbortReset closed and RST is detected for +// normal close event chain. +TEST_P(ConnectionImplTest, ClientAbortResetAfterCallback) { + Network::ClientConnectionPtr upstream_connection_; + StrictMock upstream_callbacks_; + + setUpBasicConnection(); + + Buffer::OwnedImpl buffer("hello world"); + client_connection_->write(buffer, false); + client_connection_->connect(); + + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected)) + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); + ; + + read_filter_ = std::make_shared>(); + + EXPECT_CALL(listener_callbacks_, onAccept_(_)) + .WillOnce(Invoke([&](Network::ConnectionSocketPtr& socket) -> void { + server_connection_ = dispatcher_->createServerConnection( + std::move(socket), Network::Test::createRawBufferSocket(), stream_info_); + server_connection_->addConnectionCallbacks(server_callbacks_); + server_connection_->addReadFilter(read_filter_); + + upstream_connection_ = dispatcher_->createClientConnection( + socket_->connectionInfoProvider().localAddress(), source_address_, + Network::Test::createRawBufferSocket(), server_connection_->socketOptions(), nullptr); + upstream_connection_->addConnectionCallbacks(upstream_callbacks_); + })); + + EXPECT_CALL(listener_callbacks_, recordConnectionsAcceptedOnSocketEvent(_)); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + })); + client_connection_->close(ConnectionCloseType::AbortReset); + + EXPECT_CALL(upstream_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(upstream_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + })); + + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(server_connection_->detectedCloseType(), DetectedCloseType::RemoteReset); + upstream_connection_->close(ConnectionCloseType::AbortReset); + dispatcher_->exit(); + })); + + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +// Test the server connection is AbortReset closed and RST is detected +// from the client. +TEST_P(ConnectionImplTest, ServerResetClose) { + setUpBasicConnection(); + connect(); + + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_connection_->detectedCloseType(), DetectedCloseType::RemoteReset); + dispatcher_->exit(); + })); + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(server_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + })); + + server_connection_->close(ConnectionCloseType::AbortReset); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +// Test the RST close and detection feature is disabled by runtime_guard. +TEST_P(ConnectionImplTest, ServerResetCloseRuntimeDisabled) { + TestScopedRuntime scoped_runtime; + scoped_runtime.mergeValues( + {{"envoy.reloadable_features.detect_and_raise_rst_tcp_connection", "false"}}); + setUpBasicConnection(); + connect(); + + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_connection_->detectedCloseType(), DetectedCloseType::Normal); + dispatcher_->exit(); + })); + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(server_connection_->detectedCloseType(), DetectedCloseType::Normal); + })); + + // Originally it should close the connection by RST. + server_connection_->close(ConnectionCloseType::AbortReset); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + struct MockConnectionStats { Connection::ConnectionStats toBufferStats() { return {rx_total_, rx_current_, tx_total_, @@ -739,7 +883,7 @@ TEST_P(ConnectionImplTest, ConnectionStats) { })); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); Buffer::OwnedImpl data("1234"); client_connection_->write(data, false); @@ -963,6 +1107,84 @@ TEST_P(ConnectionImplTest, CloseOnReadDisableWithoutCloseDetection) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +// Test normal RST close without readDisable. +TEST_P(ConnectionImplTest, RstCloseOnNotReadDisabledConnection) { + setUpBasicConnection(); + connect(); + + // Connection is not readDisabled, and detect_early_close_ is true. + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_connection_->detectedCloseType(), DetectedCloseType::RemoteReset); + dispatcher_->exit(); + })); + + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(server_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + })); + server_connection_->close(ConnectionCloseType::AbortReset); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +// Test normal RST close with readDisable=true. +TEST_P(ConnectionImplTest, RstCloseOnReadDisabledConnectionThenWrite) { + setUpBasicConnection(); + connect(); + + // Connection is readDisabled, and detect_early_close_ is true. + EXPECT_EQ(Connection::ReadDisableStatus::TransitionedToReadDisabled, + client_connection_->readDisable(true)); + + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)).Times(0); + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(server_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + })); + server_connection_->close(ConnectionCloseType::AbortReset); + + // The reset error is triggered by write event. + // write error: Connection reset by peer, code: 9 + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_connection_->detectedCloseType(), DetectedCloseType::RemoteReset); + dispatcher_->exit(); + })); + Buffer::OwnedImpl buffer("data"); + client_connection_->write(buffer, false); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +// Test reset close with readDisable=true. detect_early_close_ will not +// impact the read and write behaviour for RST. +TEST_P(ConnectionImplTest, RstCloseOnReadEarlyCloseDisabledThenWrite) { + setUpBasicConnection(); + connect(); + + // Connection is readDisabled, and detect_early_close_ is false. + client_connection_->detectEarlyCloseWhenReadDisabled(false); + EXPECT_EQ(Connection::ReadDisableStatus::TransitionedToReadDisabled, + client_connection_->readDisable(true)); + + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)).Times(0); + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(server_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + })); + server_connection_->close(ConnectionCloseType::AbortReset); + + // The reset error is triggered by write event. + // write error: Connection reset by peer, code: 9 + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_connection_->detectedCloseType(), DetectedCloseType::RemoteReset); + dispatcher_->exit(); + })); + Buffer::OwnedImpl buffer("data"); + client_connection_->write(buffer, false); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + // Test that connection half-close is sent and received properly. TEST_P(ConnectionImplTest, HalfClose) { setUpBasicConnection(); @@ -1003,6 +1225,182 @@ TEST_P(ConnectionImplTest, HalfClose) { dispatcher_->run(Event::Dispatcher::RunType::Block); } +// Test that connection is immediately closed when RST is detected even +// half-close is enabled +TEST_P(ConnectionImplTest, HalfCloseResetClose) { + setUpBasicConnection(); + connect(); + + server_connection_->enableHalfClose(true); + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(server_connection_->detectedCloseType(), DetectedCloseType::RemoteReset); + dispatcher_->exit(); + })); + EXPECT_CALL(*read_filter_, onData(_, _)).Times(0); + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + dispatcher_->exit(); + })); + client_connection_->close(ConnectionCloseType::AbortReset); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +// Test that no remote close event will be propagated back to peer, when a connection is +// half-closed and then the connection is normally closed. This is the current behavior. +TEST_P(ConnectionImplTest, HalfCloseThenNormallClose) { + setUpBasicConnection(); + connect(); + + std::shared_ptr client_read_filter(new NiceMock()); + server_connection_->enableHalfClose(true); + client_connection_->enableHalfClose(true); + client_connection_->addReadFilter(client_read_filter); + + EXPECT_CALL(*read_filter_, onData(_, true)).WillOnce(InvokeWithoutArgs([&]() -> FilterStatus { + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + + Buffer::OwnedImpl empty_buffer; + // Client is half closed at first. + client_connection_->write(empty_buffer, true); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + Buffer::OwnedImpl buffer("data"); + server_connection_->write(buffer, false); + EXPECT_CALL(*client_read_filter, onData(BufferStringEqual("data"), false)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> FilterStatus { + buffer.drain(buffer.length()); + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); + + // After the half closed from one way, no event will be propagate back to server connection. + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)).Times(0); + // Then client is going to normally close the connection. + client_connection_->close(ConnectionCloseType::NoFlush); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); + server_connection_->write(empty_buffer, true); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +// Test that no remote close event will be propagated back to peer, when a connection is +// half-closed and then the connection is RST closed. This is same as other close type. +TEST_P(ConnectionImplTest, HalfCloseThenResetClose) { + setUpBasicConnection(); + connect(); + + std::shared_ptr client_read_filter(new NiceMock()); + server_connection_->enableHalfClose(true); + client_connection_->enableHalfClose(true); + client_connection_->addReadFilter(client_read_filter); + + EXPECT_CALL(*read_filter_, onData(_, true)).WillOnce(InvokeWithoutArgs([&]() -> FilterStatus { + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + + Buffer::OwnedImpl empty_buffer; + // Client is half closed at first. + client_connection_->write(empty_buffer, true); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + Buffer::OwnedImpl buffer("data"); + server_connection_->write(buffer, false); + EXPECT_CALL(*client_read_filter, onData(BufferStringEqual("data"), false)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> FilterStatus { + buffer.drain(buffer.length()); + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + dispatcher_->exit(); + })); + + // After the half closed from one way, no event will be propagate back to server connection. + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)).Times(0); + // Then client is going to reset the connection. + client_connection_->close(ConnectionCloseType::AbortReset); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); + server_connection_->write(empty_buffer, true); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} + +#if !defined(WIN32) +// Test that no remote close event will be propagated back to peer, when a connection is +// half-closed and then the connection is RST closed. Writing data to the half closed and then +// reset connection will lead to broken pipe error rather than reset error. +// This behavior is different for windows. +TEST_P(ConnectionImplTest, HalfCloseThenResetCloseThenWriteData) { + setUpBasicConnection(); + connect(); + + std::shared_ptr client_read_filter(new NiceMock()); + server_connection_->enableHalfClose(true); + client_connection_->enableHalfClose(true); + client_connection_->addReadFilter(client_read_filter); + + EXPECT_CALL(*read_filter_, onData(_, true)).WillOnce(InvokeWithoutArgs([&]() -> FilterStatus { + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + + Buffer::OwnedImpl empty_buffer; + // Client is half closed at first. + client_connection_->write(empty_buffer, true); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + Buffer::OwnedImpl buffer("data"); + server_connection_->write(buffer, false); + EXPECT_CALL(*client_read_filter, onData(BufferStringEqual("data"), false)) + .WillOnce(Invoke([&](Buffer::Instance& buffer, bool) -> FilterStatus { + buffer.drain(buffer.length()); + dispatcher_->exit(); + return FilterStatus::StopIteration; + })); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_connection_->detectedCloseType(), DetectedCloseType::LocalReset); + dispatcher_->exit(); + })); + + // After the half closed from one way, no event will be propagate back to server connection. + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)).Times(0); + // Then client is going to reset the connection. + client_connection_->close(ConnectionCloseType::AbortReset); + dispatcher_->run(Event::Dispatcher::RunType::Block); + + // Write error: Broken pipe, code: 12 rather than the reset error. + EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(server_connection_->detectedCloseType(), DetectedCloseType::Normal); + dispatcher_->exit(); + })); + // Tring to write data to the closed connection. + Buffer::OwnedImpl server_buffer("data"); + server_connection_->write(server_buffer, false); + dispatcher_->run(Event::Dispatcher::RunType::Block); +} +#endif + // Test that connections do not detect early close when half-close is enabled TEST_P(ConnectionImplTest, HalfCloseNoEarlyCloseDetection) { setUpBasicConnection(); @@ -1506,7 +1904,7 @@ TEST_P(ConnectionImplTest, ReadOnCloseTest) { })); EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); } @@ -1612,7 +2010,7 @@ TEST_P(ConnectionImplTest, FlushWriteAndDelayCloseTest) { EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::LocalClose)); EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::RemoteClose)) .Times(1) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); server_connection_->close(ConnectionCloseType::FlushWriteAndDelay); dispatcher_->run(Event::Dispatcher::RunType::Block); } @@ -1653,7 +2051,7 @@ TEST_P(ConnectionImplTest, FlushWriteAndDelayCloseTimerTriggerTest) { EXPECT_CALL(server_callbacks_, onEvent(ConnectionEvent::LocalClose)); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) .Times(1) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); } @@ -1697,7 +2095,7 @@ TEST_P(ConnectionImplTest, FlushWriteAfterFlushWriteAndDelayWithPendingWrite) { })); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) .Times(1) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); } @@ -1736,7 +2134,7 @@ TEST_P(ConnectionImplTest, FlushWriteAfterFlushWriteAndDelayWithoutPendingWrite) EXPECT_CALL(stats.delayed_close_timeouts_, inc()).Times(0); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) .Times(1) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); } @@ -2958,6 +3356,19 @@ TEST_F(PostCloseConnectionImplTest, CloseAbort) { connection_->close(ConnectionCloseType::Abort); } +// Test that close(ConnectionCloseType::AbortReset) won't write and flush pending data. +TEST_F(PostCloseConnectionImplTest, AbortReset) { + InSequence s; + initialize(); + writeSomeData(); + + // Connection abort. We have data written above in writeSomeData(), + // it won't be written and flushed due to ``ConnectionCloseType::AbortReset``. + EXPECT_CALL(*transport_socket_, doWrite(_, true)).Times(0); + EXPECT_CALL(*transport_socket_, closeSocket(_)); + connection_->close(ConnectionCloseType::AbortReset); +} + class ReadBufferLimitTest : public ConnectionImplTest { public: void readBufferLimitTest(uint32_t read_buffer_limit, uint32_t expected_chunk_size) { @@ -3003,11 +3414,11 @@ class ReadBufferLimitTest : public ConnectionImplTest { })); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::Connected)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { dispatcher_->exit(); })); + .WillOnce(InvokeWithoutArgs([&]() -> void { dispatcher_->exit(); })); dispatcher_->run(Event::Dispatcher::RunType::Block); EXPECT_CALL(client_callbacks_, onEvent(ConnectionEvent::RemoteClose)) - .WillOnce(Invoke([&](Network::ConnectionEvent) -> void { + .WillOnce(InvokeWithoutArgs([&]() -> void { EXPECT_EQ(buffer_size, filter_seen); dispatcher_->exit(); })); diff --git a/test/common/network/socket_option_factory_test.cc b/test/common/network/socket_option_factory_test.cc index e89c89514216..993dd98fdc25 100644 --- a/test/common/network/socket_option_factory_test.cc +++ b/test/common/network/socket_option_factory_test.cc @@ -178,6 +178,21 @@ TEST_F(SocketOptionFactoryTest, TestBuildLiteralOptions) { EXPECT_EQ(value_bstr, option_details->value_); } +TEST_F(SocketOptionFactoryTest, TestBuildZeroSoLingerOptions) { + struct linger expected_linger; + expected_linger.l_onoff = 1; + expected_linger.l_linger = 0; + absl::string_view linger_bstr{reinterpret_cast(&expected_linger), + sizeof(struct linger)}; + auto socket_options = SocketOptionFactory::buildZeroSoLingerOptions(); + auto option_details = socket_options->at(0)->getOptionDetails( + socket_mock_, envoy::config::core::v3::SocketOption::STATE_LISTENING); + EXPECT_TRUE(option_details.has_value()); + EXPECT_EQ(SOL_SOCKET, option_details->name_.level()); + EXPECT_EQ(SO_LINGER, option_details->name_.option()); + EXPECT_EQ(linger_bstr, option_details->value_); +} + } // namespace } // namespace Network } // namespace Envoy diff --git a/test/common/tcp/async_tcp_client_impl_test.cc b/test/common/tcp/async_tcp_client_impl_test.cc index 26298161919a..57f53cb3f3b8 100644 --- a/test/common/tcp/async_tcp_client_impl_test.cc +++ b/test/common/tcp/async_tcp_client_impl_test.cc @@ -11,6 +11,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::InvokeWithoutArgs; using testing::NiceMock; using testing::Return; @@ -19,7 +20,9 @@ namespace Tcp { class AsyncTcpClientImplTest : public Event::TestUsingSimulatedTime, public testing::Test { public: - AsyncTcpClientImplTest() { + AsyncTcpClientImplTest() {} + + void setUpClient() { cluster_manager_.initializeClusters({"fake_cluster"}, {}); cluster_manager_.initializeThreadLocalClusters({"fake_cluster"}); connect_timer_ = new NiceMock(&dispatcher_); @@ -61,6 +64,7 @@ class AsyncTcpClientImplTest : public Event::TestUsingSimulatedTime, public test }; TEST_F(AsyncTcpClientImplTest, BasicWrite) { + setUpClient(); expectCreateConnection(); EXPECT_CALL(*connection_, write(BufferStringEqual("test data"), _)); @@ -73,7 +77,23 @@ TEST_F(AsyncTcpClientImplTest, BasicWrite) { ASSERT_FALSE(client_->connected()); } +TEST_F(AsyncTcpClientImplTest, RstClose) { + setUpClient(); + expectCreateConnection(); + + EXPECT_CALL(callbacks_, onEvent(Network::ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_->detectedCloseType(), Network::DetectedCloseType::LocalReset); + })); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_->detectedCloseType(), Network::DetectedCloseType::LocalReset); + })); + client_->close(Network::ConnectionCloseType::AbortReset); + ASSERT_FALSE(client_->connected()); +} + TEST_F(AsyncTcpClientImplTest, WaterMark) { + setUpClient(); expectCreateConnection(); EXPECT_CALL(callbacks_, onAboveWriteBufferHighWatermark()); @@ -88,6 +108,7 @@ TEST_F(AsyncTcpClientImplTest, WaterMark) { } TEST_F(AsyncTcpClientImplTest, NoAvaiableConnection) { + setUpClient(); Upstream::MockHost::MockCreateConnectionData conn_info; conn_info.connection_ = nullptr; EXPECT_CALL(cluster_manager_.thread_local_cluster_, tcpConn_(_)).WillOnce(Return(conn_info)); @@ -95,6 +116,7 @@ TEST_F(AsyncTcpClientImplTest, NoAvaiableConnection) { } TEST_F(AsyncTcpClientImplTest, TestReadDisable) { + setUpClient(); expectCreateConnection(); EXPECT_CALL(*connection_, readDisable(true)); client_->readDisable(true); @@ -109,15 +131,32 @@ TEST_F(AsyncTcpClientImplTest, TestReadDisable) { } TEST_F(AsyncTcpClientImplTest, TestCloseType) { + setUpClient(); expectCreateConnection(); - EXPECT_CALL(callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); + EXPECT_CALL(callbacks_, onEvent(Network::ConnectionEvent::LocalClose)) + .WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_->detectedCloseType(), Network::DetectedCloseType::Normal); + })); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::Abort)); - EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)).WillOnce(InvokeWithoutArgs([&]() -> void { + EXPECT_EQ(client_->detectedCloseType(), Network::DetectedCloseType::Normal); + })); client_->close(Network::ConnectionCloseType::Abort); ASSERT_FALSE(client_->connected()); } +TEST_F(AsyncTcpClientImplTest, TestGetDispatcher) { + setUpClient(); + expectCreateConnection(); + EXPECT_CALL(callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); + EXPECT_CALL(dispatcher_, deferredDelete_(_)); + EXPECT_EQ(&dispatcher_, &client_->dispatcher()); + client_->close(Network::ConnectionCloseType::NoFlush); + ASSERT_FALSE(client_->connected()); +} + TEST_F(AsyncTcpClientImplTest, TestTimingStats) { + setUpClient(); expectCreateConnection(); EXPECT_CALL(callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); EXPECT_CALL( @@ -128,6 +167,7 @@ TEST_F(AsyncTcpClientImplTest, TestTimingStats) { } TEST_F(AsyncTcpClientImplTest, TestCounterStats) { + setUpClient(); expectCreateConnection(); EXPECT_CALL(callbacks_, onEvent(Network::ConnectionEvent::LocalClose)); Buffer::OwnedImpl buff("test data"); @@ -145,6 +185,7 @@ TEST_F(AsyncTcpClientImplTest, TestCounterStats) { } TEST_F(AsyncTcpClientImplTest, TestFailStats) { + setUpClient(); expectCreateConnection(false); connect_timer_->invokeCallback(); EXPECT_EQ(1UL, cluster_manager_.thread_local_cluster_.cluster_.info_->traffic_stats_ @@ -154,6 +195,7 @@ TEST_F(AsyncTcpClientImplTest, TestFailStats) { } TEST_F(AsyncTcpClientImplTest, TestCxDestroyRemoteClose) { + setUpClient(); expectCreateConnection(); EXPECT_CALL(callbacks_, onEvent(Network::ConnectionEvent::RemoteClose)); connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); @@ -165,6 +207,7 @@ TEST_F(AsyncTcpClientImplTest, TestCxDestroyRemoteClose) { } TEST_F(AsyncTcpClientImplTest, TestActiveCx) { + setUpClient(); expectCreateConnection(); EXPECT_EQ(1UL, cluster_manager_.thread_local_cluster_.cluster_.info_->traffic_stats_ ->upstream_cx_active_.value()); diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index 07eeaf2591ab..38efd6256d4c 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -858,6 +858,13 @@ TEST_P(ProxyProtocolTest, V2ParseExtensionsRecvError) { .WillRepeatedly(Invoke([this](os_fd_t sockfd, Api::EnvoyTcpInfo* tcp_info) { return os_sys_calls_actual_.socketTcpInfo(sockfd, tcp_info); })); + EXPECT_CALL(os_sys_calls, setsockopt_(_, SOL_SOCKET, SO_LINGER, _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke([this](os_fd_t sockfd, int level, int optname, const void* optval, + socklen_t optlen) -> int { + return os_sys_calls_actual_.setsockopt(sockfd, level, optname, optval, optlen) + .return_value_; + })); connect(false); write(buffer, sizeof(buffer)); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -1069,6 +1076,13 @@ TEST_P(ProxyProtocolTest, V2Fragmented4Error) { .WillRepeatedly(Invoke([this](os_fd_t sockfd, Api::EnvoyTcpInfo* tcp_info) { return os_sys_calls_actual_.socketTcpInfo(sockfd, tcp_info); })); + EXPECT_CALL(os_sys_calls, setsockopt_(_, SOL_SOCKET, SO_LINGER, _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke([this](os_fd_t sockfd, int level, int optname, const void* optval, + socklen_t optlen) -> int { + return os_sys_calls_actual_.setsockopt(sockfd, level, optname, optval, optlen) + .return_value_; + })); connect(false); write(buffer, 17); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -1172,6 +1186,13 @@ TEST_P(ProxyProtocolTest, V2Fragmented5Error) { .WillRepeatedly(Invoke([this](os_fd_t sockfd, Api::EnvoyTcpInfo* tcp_info) { return os_sys_calls_actual_.socketTcpInfo(sockfd, tcp_info); })); + EXPECT_CALL(os_sys_calls, setsockopt_(_, SOL_SOCKET, SO_LINGER, _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke([this](os_fd_t sockfd, int level, int optname, const void* optval, + socklen_t optlen) -> int { + return os_sys_calls_actual_.setsockopt(sockfd, level, optname, optval, optlen) + .return_value_; + })); connect(false); write(buffer, 10); dispatcher_->run(Event::Dispatcher::RunType::NonBlock); @@ -1922,6 +1943,13 @@ TEST_P(ProxyProtocolTest, DrainError) { .WillRepeatedly(Invoke([this](os_fd_t sockfd, Api::EnvoyTcpInfo* tcp_info) { return os_sys_calls_actual_.socketTcpInfo(sockfd, tcp_info); })); + EXPECT_CALL(os_sys_calls, setsockopt_(_, SOL_SOCKET, SO_LINGER, _, _)) + .Times(AnyNumber()) + .WillRepeatedly(Invoke([this](os_fd_t sockfd, int level, int optname, const void* optval, + socklen_t optlen) -> int { + return os_sys_calls_actual_.setsockopt(sockfd, level, optname, optval, optlen) + .return_value_; + })); connect(false); write("PROXY TCP4 1.2.3.4 253.253.253.253 65535 1234\r\nmore data"); diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 2804061fefed..deb3d515b250 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -425,6 +425,16 @@ AssertionResult FakeConnectionBase::close(std::chrono::milliseconds timeout) { timeout); } +AssertionResult FakeConnectionBase::close(Network::ConnectionCloseType close_type, + std::chrono::milliseconds timeout) { + ENVOY_LOG(trace, "FakeConnectionBase close type={}", static_cast(close_type)); + if (!shared_connection_.connected()) { + return AssertionSuccess(); + } + return shared_connection_.executeOnDispatcher( + [&close_type](Network::Connection& connection) { connection.close(close_type); }, timeout); +} + AssertionResult FakeConnectionBase::readDisable(bool disable, std::chrono::milliseconds timeout) { return shared_connection_.executeOnDispatcher( [disable](Network::Connection& connection) { connection.readDisable(disable); }, timeout); @@ -520,6 +530,24 @@ AssertionResult FakeConnectionBase::waitForDisconnect(milliseconds timeout) { return AssertionSuccess(); } +AssertionResult FakeConnectionBase::waitForRstDisconnect(std::chrono::milliseconds timeout) { + ENVOY_LOG(trace, "FakeConnectionBase waiting for RST disconnect"); + absl::MutexLock lock(&lock_); + const auto reached = [this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(lock_) { + return shared_connection_.rstDisconnected(); + }; + + if (!time_system_.waitFor(lock_, absl::Condition(&reached), timeout)) { + if (timeout == TestUtility::DefaultTimeout) { + ADD_FAILURE() + << "Please don't waitForRstDisconnect with a 5s timeout if failure is expected\n"; + } + return AssertionFailure() << "Timed out waiting for RST disconnect."; + } + ENVOY_LOG(trace, "FakeConnectionBase done waiting for RST disconnect"); + return AssertionSuccess(); +} + AssertionResult FakeConnectionBase::waitForHalfClose(milliseconds timeout) { absl::MutexLock lock(&lock_); if (!time_system_.waitFor(lock_, absl::Condition(&half_closed_), timeout)) { diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 3a01e944d51b..3f0e5bc9ae26 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -300,6 +300,10 @@ class SharedConnectionWrapper : public Network::ConnectionCallbacks, absl::MutexLock lock(&lock_); if (event == Network::ConnectionEvent::RemoteClose || event == Network::ConnectionEvent::LocalClose) { + if (connection_.detectedCloseType() == Network::DetectedCloseType::RemoteReset || + connection_.detectedCloseType() == Network::DetectedCloseType::LocalReset) { + rst_disconnected_ = true; + } disconnected_ = true; } } @@ -321,6 +325,11 @@ class SharedConnectionWrapper : public Network::ConnectionCallbacks, return !disconnected_; } + bool rstDisconnected() { + lock_.AssertReaderHeld(); + return rst_disconnected_; + } + // This provides direct access to the underlying connection, but only to const methods. // Stateful connection related methods should happen on the connection's dispatcher via // executeOnDispatcher. @@ -383,6 +392,7 @@ class SharedConnectionWrapper : public Network::ConnectionCallbacks, absl::Mutex lock_; bool parented_ ABSL_GUARDED_BY(lock_){}; bool disconnected_ ABSL_GUARDED_BY(lock_){}; + bool rst_disconnected_ ABSL_GUARDED_BY(lock_){}; }; using SharedConnectionWrapperPtr = std::unique_ptr; @@ -401,6 +411,10 @@ class FakeConnectionBase : public Logger::Loggable { ABSL_MUST_USE_RESULT testing::AssertionResult close(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); + ABSL_MUST_USE_RESULT + testing::AssertionResult close(Network::ConnectionCloseType close_type, + std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); + ABSL_MUST_USE_RESULT testing::AssertionResult readDisable(bool disable, std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); @@ -409,6 +423,10 @@ class FakeConnectionBase : public Logger::Loggable { testing::AssertionResult waitForDisconnect(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); + ABSL_MUST_USE_RESULT + testing::AssertionResult + waitForRstDisconnect(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); + ABSL_MUST_USE_RESULT testing::AssertionResult waitForHalfClose(std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); diff --git a/test/integration/filters/test_network_async_tcp_filter.cc b/test/integration/filters/test_network_async_tcp_filter.cc index a9123fdeab7e..610a8ffcf468 100644 --- a/test/integration/filters/test_network_async_tcp_filter.cc +++ b/test/integration/filters/test_network_async_tcp_filter.cc @@ -51,7 +51,7 @@ class TestNetworkAsyncTcpFilter : public Network::ReadFilter { Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override { stats_.on_data_.inc(); - ENVOY_LOG_MISC(debug, "Downstream onData: {}, length: {} back to down", data.toString(), + ENVOY_LOG_MISC(debug, "Downstream onData: {}, length: {} sending to upstream", data.toString(), data.length()); client_->write(data, end_stream); @@ -71,16 +71,45 @@ class TestNetworkAsyncTcpFilter : public Network::ReadFilter { void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override { read_callbacks_ = &callbacks; + downstream_callbacks_ = std::make_unique(*this); read_callbacks_->connection().enableHalfClose(true); + read_callbacks_->connection().addConnectionCallbacks(*downstream_callbacks_); } private: + struct DownstreamCallbacks : public Envoy::Network::ConnectionCallbacks { + explicit DownstreamCallbacks(TestNetworkAsyncTcpFilter& parent) : parent_(parent) {} + ~DownstreamCallbacks() override = default; + void onEvent(Network::ConnectionEvent event) override { + ENVOY_LOG_MISC(debug, "tcp client test filter downstream callback onEvent: {}", + static_cast(event)); + if (event != Network::ConnectionEvent::RemoteClose) { + return; + } + + ENVOY_LOG_MISC(debug, "tcp client test filter downstream detected close type: {}.", + static_cast(parent_.read_callbacks_->connection().detectedCloseType())); + + if (parent_.read_callbacks_->connection().detectedCloseType() == + Network::DetectedCloseType::RemoteReset) { + parent_.client_->close(Network::ConnectionCloseType::AbortReset); + } else { + parent_.client_->close(Network::ConnectionCloseType::NoFlush); + } + }; + + void onAboveWriteBufferHighWatermark() override{}; + void onBelowWriteBufferLowWatermark() override{}; + + TestNetworkAsyncTcpFilter& parent_; + }; + struct RequestAsyncCallbacks : public Tcp::AsyncTcpClientCallbacks { RequestAsyncCallbacks(TestNetworkAsyncTcpFilter& parent) : parent_(parent) {} void onData(Buffer::Instance& data, bool end_stream) override { parent_.stats_.on_receive_async_data_.inc(); - ENVOY_LOG_MISC(debug, "Async onData from peer: {}, length: {} back to down", data.toString(), + ENVOY_LOG_MISC(debug, "async onData from peer: {}, length: {} back to down", data.toString(), data.length()); parent_.read_callbacks_->connection().write(data, end_stream); if (end_stream) { @@ -89,7 +118,22 @@ class TestNetworkAsyncTcpFilter : public Network::ReadFilter { } // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent) override{}; + void onEvent(Network::ConnectionEvent event) override { + ENVOY_LOG_MISC(debug, "tcp client test filter upstream callback onEvent: {}", + static_cast(event)); + if (event != Network::ConnectionEvent::RemoteClose) { + return; + } + + ENVOY_LOG_MISC(debug, "tcp client test filter upstream detected close type: {}.", + static_cast(parent_.client_->detectedCloseType())); + + if (parent_.client_->detectedCloseType() == Network::DetectedCloseType::RemoteReset) { + parent_.read_callbacks_->connection().close(Network::ConnectionCloseType::AbortReset); + } else { + parent_.read_callbacks_->connection().close(Network::ConnectionCloseType::NoFlush); + } + }; void onAboveWriteBufferHighWatermark() override { parent_.read_callbacks_->connection().readDisable(true); @@ -110,6 +154,7 @@ class TestNetworkAsyncTcpFilter : public Network::ReadFilter { Tcp::AsyncTcpClientPtr client_; absl::string_view cluster_name_; std::unique_ptr request_callbacks_; + std::unique_ptr downstream_callbacks_; Upstream::ClusterManager& cluster_manager_; Tcp::AsyncTcpClientOptionsConstSharedPtr options_; }; diff --git a/test/integration/integration_tcp_client.cc b/test/integration/integration_tcp_client.cc index e564ad68057f..719727d6cd62 100644 --- a/test/integration/integration_tcp_client.cc +++ b/test/integration/integration_tcp_client.cc @@ -74,6 +74,10 @@ IntegrationTcpClient::IntegrationTcpClient( void IntegrationTcpClient::close() { connection_->close(Network::ConnectionCloseType::NoFlush); } +void IntegrationTcpClient::close(Network::ConnectionCloseType close_type) { + connection_->close(close_type); +} + void IntegrationTcpClient::waitForData(const std::string& data, bool exact_match) { auto found = payload_reader_->data().find(data); if (found == 0 || (!exact_match && found != std::string::npos)) { diff --git a/test/integration/integration_tcp_client.h b/test/integration/integration_tcp_client.h index f29561d300b9..a79c8b32b445 100644 --- a/test/integration/integration_tcp_client.h +++ b/test/integration/integration_tcp_client.h @@ -33,6 +33,7 @@ class IntegrationTcpClient { absl::string_view destination_address = ""); void close(); + void close(Network::ConnectionCloseType close_type); void waitForData(const std::string& data, bool exact_match = true); // wait for at least `length` bytes to be received ABSL_MUST_USE_RESULT AssertionResult diff --git a/test/integration/tcp_async_client_integration_test.cc b/test/integration/tcp_async_client_integration_test.cc index 4384589fbbd1..8550b9b601f0 100644 --- a/test/integration/tcp_async_client_integration_test.cc +++ b/test/integration/tcp_async_client_integration_test.cc @@ -111,5 +111,111 @@ TEST_P(TcpAsyncClientIntegrationTest, MultipleResponseFrames) { tcp_client->close(); } +// Test if RST close can be detected from downstream and upstream is closed by RST. +TEST_P(TcpAsyncClientIntegrationTest, TestClientCloseRST) { + enableHalfClose(true); + initialize(); + + std::string request("request"); + std::string response("response"); + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); + test_server_->waitForCounterEq("test_network_async_tcp_filter.on_new_connection", 1); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_cx_active", 1); + test_server_->waitForNumHistogramSamplesGe("cluster.cluster_0.upstream_cx_connect_ms", 1); + ASSERT_TRUE(tcp_client->write(request, false)); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_tx_bytes_total", request.size()); + FakeRawConnectionPtr fake_upstream_connection; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + ASSERT_TRUE(fake_upstream_connection->waitForData(request.size())); + ASSERT_TRUE(fake_upstream_connection->write(response, false)); + test_server_->waitForCounterGe("test_network_async_tcp_filter.on_receive_async_data", 1); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_rx_bytes_total", response.size()); + ASSERT_TRUE(tcp_client->waitForData(response.size())); + + tcp_client->close(Network::ConnectionCloseType::AbortReset); + + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_destroy_local", 1); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_destroy", 1); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_total", 1); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_cx_active", 0); + test_server_->waitForNumHistogramSamplesGe("cluster.cluster_0.upstream_cx_length_ms", 1); + ASSERT_TRUE(fake_upstream_connection->waitForRstDisconnect()); +} + +// Test if RST close can be detected from upstream. +TEST_P(TcpAsyncClientIntegrationTest, TestUpstreamCloseRST) { + enableHalfClose(true); + initialize(); + + std::string request("request"); + std::string response("response"); + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); + test_server_->waitForCounterEq("test_network_async_tcp_filter.on_new_connection", 1); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_cx_active", 1); + test_server_->waitForNumHistogramSamplesGe("cluster.cluster_0.upstream_cx_connect_ms", 1); + ASSERT_TRUE(tcp_client->write(request, false)); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_tx_bytes_total", request.size()); + FakeRawConnectionPtr fake_upstream_connection; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + ASSERT_TRUE(fake_upstream_connection->waitForData(request.size())); + ASSERT_TRUE(fake_upstream_connection->write(response, false)); + test_server_->waitForCounterGe("test_network_async_tcp_filter.on_receive_async_data", 1); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_rx_bytes_total", response.size()); + ASSERT_TRUE(tcp_client->waitForData(response.size())); + + ASSERT_TRUE(fake_upstream_connection->close(Network::ConnectionCloseType::AbortReset)); + + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_destroy_remote", 1); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_destroy", 1); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_total", 1); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_cx_active", 0); + test_server_->waitForNumHistogramSamplesGe("cluster.cluster_0.upstream_cx_length_ms", 1); + tcp_client->waitForDisconnect(); +} + +#if !defined(WIN32) +// Test the behaviour when the connection is half closed and then the connection is reset by +// the client. The behavior is different for windows, since RST support is literally supported for +// unix like system, disabled the test for windows. +TEST_P(TcpAsyncClientIntegrationTest, TestDownstremHalfClosedThenRST) { + enableHalfClose(true); + initialize(); + + std::string request("request"); + std::string response("response"); + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); + + // It is half-closed for downstream. + ASSERT_TRUE(tcp_client->write(request, true)); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_tx_bytes_total", request.size()); + FakeRawConnectionPtr fake_upstream_connection; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + ASSERT_TRUE(fake_upstream_connection->waitForData(request.size())); + + // Then the downstream is closed by RST. Listener socket will not try to read the I/O + // since it is half closed. + tcp_client->close(Network::ConnectionCloseType::AbortReset); + + // When the server tries to write to downstream, we will get Broken Pipe error, which is + // RemoteClose event from downstream rather than RemoteReset. + ASSERT_TRUE(fake_upstream_connection->write(response, false)); + + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_destroy_local", 1); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_destroy", 1); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_total", 1); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_cx_active", 0); + test_server_->waitForNumHistogramSamplesGe("cluster.cluster_0.upstream_cx_length_ms", 1); + + // As a basic half close process, the connection is already half closed in Envoy before. + // The normal close in Envoy will not trigger the remote close event for the upstream connection. + // This is the same behavior as the normal half close process without detection of RST. + ASSERT_TRUE(fake_upstream_connection->write(" ", true)); + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); +} +#endif + } // namespace } // namespace Envoy diff --git a/test/mocks/network/connection.cc b/test/mocks/network/connection.cc index 4a7d27f2b108..2e88faa3b697 100644 --- a/test/mocks/network/connection.cc +++ b/test/mocks/network/connection.cc @@ -79,8 +79,15 @@ template static void initializeMockConnection(T& connection) { .WillByDefault(Invoke([&connection](Network::Connection::BytesSentCb cb) { connection.bytes_sent_callbacks_.emplace_back(cb); })); - ON_CALL(connection, close(_)).WillByDefault(Invoke([&connection](ConnectionCloseType) -> void { - connection.raiseEvent(Network::ConnectionEvent::LocalClose); + ON_CALL(connection, close(_)) + .WillByDefault(Invoke([&connection](ConnectionCloseType type) -> void { + if (type == ConnectionCloseType::AbortReset) { + connection.detected_close_type_ = DetectedCloseType::LocalReset; + } + connection.raiseEvent(Network::ConnectionEvent::LocalClose); + })); + ON_CALL(connection, detectedCloseType()).WillByDefault(Invoke([&connection]() { + return connection.detected_close_type_; })); ON_CALL(connection, close(_, _)) .WillByDefault(Invoke([&connection](ConnectionCloseType, absl::string_view details) -> void { diff --git a/test/mocks/network/connection.h b/test/mocks/network/connection.h index 2bba420f051c..351789613af0 100644 --- a/test/mocks/network/connection.h +++ b/test/mocks/network/connection.h @@ -43,6 +43,7 @@ class MockConnectionBase { testing::NiceMock stream_info_; std::string local_close_reason_{"unset_local_close_reason"}; Connection::State state_{Connection::State::Open}; + DetectedCloseType detected_close_type_{DetectedCloseType::Normal}; }; #define DEFINE_MOCK_CONNECTION_MOCK_METHODS \ @@ -58,6 +59,7 @@ class MockConnectionBase { MOCK_METHOD(bool, isHalfCloseEnabled, (), (const)); \ MOCK_METHOD(void, close, (ConnectionCloseType type)); \ MOCK_METHOD(void, close, (ConnectionCloseType type, absl::string_view details)); \ + MOCK_METHOD(DetectedCloseType, detectedCloseType, (), (const)); \ MOCK_METHOD(Event::Dispatcher&, dispatcher, (), (const)); \ MOCK_METHOD(uint64_t, id, (), (const)); \ MOCK_METHOD(void, hashKey, (std::vector&), (const)); \ @@ -99,6 +101,7 @@ class MockConnection : public Connection, public MockConnectionBase { public: MockConnection(); ~MockConnection() override; + DEFINE_MOCK_CONNECTION_MOCK_METHODS; }; diff --git a/test/mocks/network/transport_socket.cc b/test/mocks/network/transport_socket.cc index 669bf60f7e80..9c6a2710a7f7 100644 --- a/test/mocks/network/transport_socket.cc +++ b/test/mocks/network/transport_socket.cc @@ -9,6 +9,7 @@ using testing::_; using testing::Invoke; +using testing::Return; namespace Envoy { namespace Network { @@ -19,7 +20,12 @@ MockTransportSocket::MockTransportSocket() { ON_CALL(*this, connect(_)).WillByDefault(Invoke([&](Network::ConnectionSocket& socket) { return TransportSocket::connect(socket); })); + ON_CALL(*this, doRead(_)) + .WillByDefault(Return(IoResult{PostIoAction::KeepOpen, 0, false, absl::nullopt})); + ON_CALL(*this, doWrite(_, _)) + .WillByDefault(Return(IoResult{PostIoAction::KeepOpen, 0, false, absl::nullopt})); } + MockTransportSocket::~MockTransportSocket() = default; MockTransportSocketFactory::MockTransportSocketFactory() = default; From be9974a8e71e594c6513baee2c747fdac47e779c Mon Sep 17 00:00:00 2001 From: phlax Date: Tue, 12 Sep 2023 17:30:52 +0100 Subject: [PATCH 4/5] git/hooks: Prevent pre-push from looping format check (#29580) Signed-off-by: Ryan Northey --- support/hooks/pre-push | 19 ++++++++++--------- tools/code_format/check_format.py | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/support/hooks/pre-push b/support/hooks/pre-push index cd9687dbb651..beeb8c34eb49 100755 --- a/support/hooks/pre-push +++ b/support/hooks/pre-push @@ -70,16 +70,17 @@ do # `$CLANG_FORMAT` and `$BUILDIFY` are defined, or that the default values it # assumes for these variables correspond to real binaries on the system. If # either of these things aren't true, the check fails. - for i in $(git diff --name-only "$RANGE" --diff-filter=ACMR --ignore-submodules=all 2>&1); do - echo -ne " Checking format for $i - " - bazel run //tools/code_format:check_format -- check "$i" || exit 1 - # TODO(phlax): It seems this is not running in CI anymore and is now finding issues - # in merged PRs. Unify this hook and format checks in CI when the new format tool is rolled - # out. - # echo " Checking spelling for $i" - # "$SCRIPT_DIR"/spelling/check_spelling_pedantic.py check "$i" || exit 1 - done + _CHANGES=$(git diff --name-only "$RANGE" --diff-filter=ACMR --ignore-submodules=all 2>&1 | tr '\n' ' ') + IFS=' ' read -ra CHANGES <<< "$_CHANGES" + + echo -ne " Checking format for ${CHANGES[*]} - " + bazel run //tools/code_format:check_format -- check "${CHANGES[@]}" || exit 1 + # TODO(phlax): It seems this is not running in CI anymore and is now finding issues + # in merged PRs. Unify this hook and format checks in CI when the new format tool is rolled + # out. + # echo " Checking spelling for $i" + # "$SCRIPT_DIR"/spelling/check_spelling_pedantic.py check "${CHANGES[@]}" || exit 1 # TODO(mattklein123): Optimally we would be able to do this on a per-file basis. "$SCRIPT_DIR"/proto_format/proto_format.sh check || exit 1 diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index bd6e1b1be5be..7add4a392217 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -151,7 +151,7 @@ def excluded_prefixes(self): self.config.paths["excluded"] + tuple(self.args.add_excluded_prefixes) if self.args.add_excluded_prefixes else self.config.paths["excluded"]) - @property + @cached_property def error_messages(self): return [] From f01b74666abe13082a154610b7cdcfdfe7842f8c Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 12 Sep 2023 12:47:57 -0400 Subject: [PATCH 5/5] matchers: removing switch default (#29554) In general Envoy config enums we don't allow default: (which makes it easy to add new variables and not implement handling) and we PANIC on completely invalid values. Bringing 2 files in line with this. Risk Level: low Testing: n/a (can't test panic) Docs Changes: n/a Release Notes: n/a envoyproxy/envoy-mobile#176 Signed-off-by: Alyssa Wilk --- source/common/common/matchers.cc | 5 +++-- source/extensions/stat_sinks/graphite_statsd/config.cc | 2 +- source/extensions/stat_sinks/statsd/config.cc | 2 +- test/per_file_coverage.sh | 4 ++-- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/source/common/common/matchers.cc b/source/common/common/matchers.cc index 45c18a463ca6..1026016fdd51 100644 --- a/source/common/common/matchers.cc +++ b/source/common/common/matchers.cc @@ -31,9 +31,10 @@ ValueMatcherConstSharedPtr ValueMatcher::create(const envoy::type::matcher::v3:: return std::make_shared(v.present_match()); case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::kListMatch: return std::make_shared(v.list_match()); - default: - throw EnvoyException("Uncaught default"); + case envoy::type::matcher::v3::ValueMatcher::MatchPatternCase::MATCH_PATTERN_NOT_SET: + break; // Fall through to PANIC. } + PANIC("unexpected"); } bool NullMatcher::match(const ProtobufWkt::Value& value) const { diff --git a/source/extensions/stat_sinks/graphite_statsd/config.cc b/source/extensions/stat_sinks/graphite_statsd/config.cc index 34cd1035b40d..7320993e035e 100644 --- a/source/extensions/stat_sinks/graphite_statsd/config.cc +++ b/source/extensions/stat_sinks/graphite_statsd/config.cc @@ -40,7 +40,7 @@ GraphiteStatsdSinkFactory::createStatsSink(const Protobuf::Message& config, STATSD_SPECIFIER_NOT_SET: break; } - throw EnvoyException("unexpected statsd specifier enum"); + PANIC("unexpected statsd specifier enum"); } ProtobufTypes::MessagePtr GraphiteStatsdSinkFactory::createEmptyConfigProto() { diff --git a/source/extensions/stat_sinks/statsd/config.cc b/source/extensions/stat_sinks/statsd/config.cc index aefe14770725..84670fa1308b 100644 --- a/source/extensions/stat_sinks/statsd/config.cc +++ b/source/extensions/stat_sinks/statsd/config.cc @@ -37,7 +37,7 @@ StatsdSinkFactory::createStatsSink(const Protobuf::Message& config, case envoy::config::metrics::v3::StatsdSink::StatsdSpecifierCase::STATSD_SPECIFIER_NOT_SET: break; // Fall through to PANIC } - throw EnvoyException("unexpected statsd specifier case num"); + PANIC("unexpected statsd specifier case num"); } ProtobufTypes::MessagePtr StatsdSinkFactory::createEmptyConfigProto() { diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 9148e22d89a8..fb5c51271122 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -45,8 +45,8 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/http/cache/simple_http_cache:95.9" "source/extensions/rate_limit_descriptors:95.0" "source/extensions/rate_limit_descriptors/expr:95.0" -"source/extensions/stat_sinks/graphite_statsd:82.1" -"source/extensions/stat_sinks/statsd:84.6" +"source/extensions/stat_sinks/graphite_statsd:78.6" # Death tests don't report LCOV +"source/extensions/stat_sinks/statsd:80.8" # Death tests don't report LCOV "source/extensions/tracers:95.8" "source/extensions/tracers/common:73.8" "source/extensions/tracers/common/ot:71.8"