diff --git a/.bazelrc b/.bazelrc index 90b9b112b170..efb8e171dc51 100644 --- a/.bazelrc +++ b/.bazelrc @@ -260,14 +260,14 @@ build:rbe-toolchain-clang --platforms=@envoy_build_tools//toolchains:rbe_linux_c build:rbe-toolchain-clang --host_platform=@envoy_build_tools//toolchains:rbe_linux_clang_platform build:rbe-toolchain-clang --crosstool_top=@envoy_build_tools//toolchains/configs/linux/clang/cc:toolchain build:rbe-toolchain-clang --extra_toolchains=@envoy_build_tools//toolchains/configs/linux/clang/config:cc-toolchain -build:rbe-toolchain-clang --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin +build:rbe-toolchain-clang --action_env=CC=clang --action_env=CXX=clang++ build:rbe-toolchain-clang-libc++ --config=rbe-toolchain build:rbe-toolchain-clang-libc++ --platforms=@envoy_build_tools//toolchains:rbe_linux_clang_libcxx_platform build:rbe-toolchain-clang-libc++ --host_platform=@envoy_build_tools//toolchains:rbe_linux_clang_libcxx_platform build:rbe-toolchain-clang-libc++ --crosstool_top=@envoy_build_tools//toolchains/configs/linux/clang_libcxx/cc:toolchain build:rbe-toolchain-clang-libc++ --extra_toolchains=@envoy_build_tools//toolchains/configs/linux/clang_libcxx/config:cc-toolchain -build:rbe-toolchain-clang-libc++ --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin +build:rbe-toolchain-clang-libc++ --action_env=CC=clang --action_env=CXX=clang++ build:rbe-toolchain-clang-libc++ --action_env=CXXFLAGS=-stdlib=libc++ build:rbe-toolchain-clang-libc++ --action_env=LDFLAGS=-stdlib=libc++ build:rbe-toolchain-clang-libc++ --define force_libcpp=enabled diff --git a/.github/workflows/_publish_build.yml b/.github/workflows/_publish_build.yml index 857a2cf56b49..efc08f290254 100644 --- a/.github/workflows/_publish_build.yml +++ b/.github/workflows/_publish_build.yml @@ -6,6 +6,10 @@ permissions: on: workflow_call: secrets: + dockerhub-username: + required: false + dockerhub-password: + required: false gpg-key: required: true gpg-key-password: @@ -113,6 +117,9 @@ jobs: permissions: contents: read packages: read + secrets: + dockerhub-username: ${{ secrets.dockerhub-username }} + dockerhub-password: ${{ secrets.dockerhub-password }} name: ${{ matrix.name || matrix.target }} needs: - binary diff --git a/.github/workflows/_run.yml b/.github/workflows/_run.yml index 91667dc2f93d..2dc2b72850f5 100644 --- a/.github/workflows/_run.yml +++ b/.github/workflows/_run.yml @@ -8,6 +8,8 @@ on: secrets: app-id: app-key: + dockerhub-username: + dockerhub-password: gpg-key: gpg-key-password: rbe-key: @@ -279,6 +281,8 @@ jobs: working-directory: ${{ inputs.working-directory }} env: GITHUB_TOKEN: ${{ inputs.trusted && steps.appauth.outputs.token || github.token }} + DOCKERHUB_USERNAME: ${{ secrets.dockerhub-username }} + DOCKERHUB_PASSWORD: ${{ secrets.dockerhub-password }} ENVOY_DOCKER_BUILD_DIR: ${{ runner.temp }} ENVOY_RBE: ${{ inputs.rbe == true && 1 || '' }} RBE_KEY: ${{ secrets.rbe-key }} @@ -287,6 +291,10 @@ jobs: ${{ inputs.bazel-extra }} ${{ inputs.rbe == true && format('--jobs={0}', inputs.bazel-rbe-jobs) || '' }} BAZEL_FAKE_SCM_REVISION: ${{ github.event_name == 'pull_request' && 'e3b4a6e9570da15ac1caffdded17a8bebdc7dfc9' || '' }} + CI_BRANCH: >- + ${{ inputs.trusted + && format('refs/heads/{0}', fromJSON(inputs.request).request.target-branch) + || '' }} CI_SHA1: ${{ github.sha }} CI_TARGET_BRANCH: ${{ fromJSON(inputs.request).request.target-branch }} MOUNT_GPG_HOME: ${{ inputs.import-gpg && 1 || '' }} diff --git a/.github/workflows/envoy-publish.yml b/.github/workflows/envoy-publish.yml index d4703d50bc1d..5d4b5fa1af6d 100644 --- a/.github/workflows/envoy-publish.yml +++ b/.github/workflows/envoy-publish.yml @@ -55,6 +55,14 @@ jobs: contents: read packages: read secrets: + dockerhub-username: >- + ${{ fromJSON(needs.load.outputs.trusted) + && secrets.DOCKERHUB_USERNAME + || '' }} + dockerhub-password: >- + ${{ fromJSON(needs.load.outputs.trusted) + && secrets.DOCKERHUB_PASSWORD + || '' }} gpg-key: ${{ fromJSON(needs.load.outputs.trusted) && secrets.ENVOY_GPG_MAINTAINER_KEY || secrets.ENVOY_GPG_SNAKEOIL_KEY }} gpg-key-password: >- ${{ fromJSON(needs.load.outputs.trusted) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index bca48856e4a8..4919f9c9325e 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -169,11 +169,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_desc = "Common Expression Language -- specification and binary representation", project_url = "https://github.com/google/cel-spec", strip_prefix = "cel-spec-{version}", - sha256 = "3ee09eb69dbe77722e9dee23dc48dc2cd9f765869fcf5ffb1226587c81791a0b", - version = "0.15.0", + sha256 = "24fd9b5aa218044f2923b8bcfccbf996eb024f05d1acbe1b27aca554f2720ac6", + version = "0.16.1", urls = ["https://github.com/google/cel-spec/archive/v{version}.tar.gz"], use_category = ["api"], - release_date = "2024-03-27", + release_date = "2024-08-28", ), envoy_toolshed = dict( project_name = "envoy_toolshed", diff --git a/api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto b/api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto index eec230317943..c497637b5de4 100644 --- a/api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto +++ b/api/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto @@ -28,11 +28,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // The External Processing filter allows an external service to act on HTTP traffic in a flexible way. -// **Current Implementation Status:** -// All options and processing modes are implemented except for the following: -// -// * "async mode" is not implemented. - // The filter communicates with an external gRPC service called an "external processor" // that can do a variety of things with the request and response: // diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 5f8bc42cfdb8..ad11894e3918 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -3898,12 +3898,14 @@ envoy_quic_cc_library( "quiche/quic/core/chlo_extractor.cc", "quiche/quic/core/quic_buffered_packet_store.cc", "quiche/quic/core/quic_dispatcher.cc", + "quiche/quic/core/quic_dispatcher_stats.cc", "quiche/quic/core/tls_chlo_extractor.cc", ], hdrs = [ "quiche/quic/core/chlo_extractor.h", "quiche/quic/core/quic_buffered_packet_store.h", "quiche/quic/core/quic_dispatcher.h", + "quiche/quic/core/quic_dispatcher_stats.h", "quiche/quic/core/tls_chlo_extractor.h", ], deps = [ diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 191c51bc995d..9fab5c89d323 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -96,12 +96,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "envoy_examples", project_desc = "Envoy proxy examples", project_url = "https://github.com/envoyproxy/examples", - version = "0.0.4", - sha256 = "a90a00ebea98a06e521d1c80c137085b542ff5c5ed6fec23afcb1fc165cc4b62", + version = "0.0.5", + sha256 = "2660070645623edbf4136d3a47109249bd53ffd8ff99ea13159439ad0be757cc", strip_prefix = "examples-{version}", urls = ["https://github.com/envoyproxy/examples/archive/v{version}.tar.gz"], use_category = ["test_only"], - release_date = "2024-08-22", + release_date = "2024-09-05", cpe = "N/A", license = "Apache-2.0", license_url = "https://github.com/envoyproxy/examples/blob/v{version}/LICENSE", @@ -1221,12 +1221,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://github.com/google/quiche", - version = "36723962ef5c9f3f9f42093ff9cbe057bc7a80c4", - sha256 = "8735afd08104215a8487cc9f2ffff1adc16e6168dc61c4e65127a3fb23d90c54", + version = "8da3bbb0b08b151d410de69a6fbe73b1974a044e", + sha256 = "a183fe0516e601f2f4568ef68a9da462742f240be5035ef0cbd7e2ff4a40a40c", urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"], strip_prefix = "quiche-{version}", use_category = ["controlplane", "dataplane_core"], - release_date = "2024-08-11", + release_date = "2024-09-03", cpe = "N/A", license = "BSD-3-Clause", license_url = "https://github.com/google/quiche/blob/{version}/LICENSE", diff --git a/bazel/setup_clang.sh b/bazel/setup_clang.sh index 46308db868b4..1b49ad94861d 100755 --- a/bazel/setup_clang.sh +++ b/bazel/setup_clang.sh @@ -5,27 +5,25 @@ set -e BAZELRC_FILE="${BAZELRC_FILE:-./clang.bazelrc}" LLVM_PREFIX=$1 +LLVM_CONFIG="${LLVM_PREFIX}/bin/llvm-config" -if [[ ! -e "${LLVM_PREFIX}/bin/llvm-config" ]]; then +if [[ ! -e "${LLVM_CONFIG}" ]]; then echo "Error: cannot find local llvm-config in ${LLVM_PREFIX}." exit 1 fi -PATH="$("${LLVM_PREFIX}"/bin/llvm-config --bindir):${PATH}" -export PATH - -LLVM_VERSION="$(llvm-config --version)" -LLVM_LIBDIR="$(llvm-config --libdir)" -LLVM_TARGET="$(llvm-config --host-target)" +LLVM_VERSION="$("${LLVM_CONFIG}" --version)" +LLVM_LIBDIR="$("${LLVM_CONFIG}" --libdir)" +LLVM_TARGET="$("${LLVM_CONFIG}" --host-target)" RT_LIBRARY_PATH="${LLVM_LIBDIR}/clang/${LLVM_VERSION}/lib/${LLVM_TARGET}" -echo "# Generated file, do not edit. If you want to disable clang, just delete this file. -build:clang --action_env='PATH=${PATH}' --host_action_env='PATH=${PATH}' -build:clang --action_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config' --host_action_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config' -build:clang --repo_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config' -build:clang --linkopt='-L$(llvm-config --libdir)' -build:clang --linkopt='-Wl,-rpath,$(llvm-config --libdir)' +cat < "${BAZELRC_FILE}" +# Generated file, do not edit. If you want to disable clang, just delete this file. +build:clang --action_env=LLVM_CONFIG=${LLVM_CONFIG} --host_action_env=LLVM_CONFIG=${LLVM_CONFIG} +build:clang --repo_env=LLVM_CONFIG=${LLVM_CONFIG} +build:clang --linkopt=-L${LLVM_LIBDIR} +build:clang --linkopt=-Wl,-rpath,${LLVM_LIBDIR} -build:clang-asan --linkopt='-L${RT_LIBRARY_PATH}' -" >"${BAZELRC_FILE}" +build:clang-asan --linkopt=-L${RT_LIBRARY_PATH} +EOF diff --git a/changelogs/current.yaml b/changelogs/current.yaml index f788f20f7a86..92c5ba02c0d2 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -113,6 +113,12 @@ bug_fixes: change: | RBAC will now allow stat prefixes configured in per-route config to override the base config's stat prefix. +- area: http3 + change: | + Fixed a bug where an empty trailers block could be sent. This would occur if a filter removed + the last trailer - a likely occurrence with the ``grpc_web_filter``. This change makes HTTP/3 codec + behave the same way HTTP/2 codec does, converting an empty trailers block to no trailers. + This behavior can be reverted by setting the runtime guard ``envoy.reloadable_features.http3_remove_empty_trailers`` to ``false``. - area: http change: | Fixed a bug where an incomplete request (missing body or trailers) may be proxied to the upstream when the limit on diff --git a/ci/Dockerfile-envoy b/ci/Dockerfile-envoy index 216df38370ac..67135ed6404b 100644 --- a/ci/Dockerfile-envoy +++ b/ci/Dockerfile-envoy @@ -58,7 +58,7 @@ COPY --chown=0:0 --chmod=755 \ # STAGE: envoy-distroless -FROM gcr.io/distroless/base-nossl-debian12:nonroot@sha256:9652482d535c6e2b68823b7dbd9175eefe33edf12e86c02ab8e68fb38fd159b4 AS envoy-distroless +FROM gcr.io/distroless/base-nossl-debian12:nonroot@sha256:fb10a979880367004a93467d9dad87eea1af67c6adda0a0060d2e785a8c1a0e6 AS envoy-distroless EXPOSE 10000 ENTRYPOINT ["/usr/local/bin/envoy"] CMD ["-c", "/etc/envoy/envoy.yaml"] diff --git a/ci/format_pre.sh b/ci/format_pre.sh index ad7054ca2c17..daa6e164b458 100755 --- a/ci/format_pre.sh +++ b/ci/format_pre.sh @@ -58,12 +58,6 @@ bazel "${BAZEL_STARTUP_OPTIONS[@]}" run "${BAZEL_BUILD_OPTIONS[@]}" //configs:ex CURRENT=spelling "${ENVOY_SRCDIR}/tools/spelling/check_spelling_pedantic.py" --mark check -# TODO(phlax): move clang/buildifier checks to bazel rules (/aspects) -if [[ -n "$CI_BRANCH" ]]; then - CURRENT=check_format_test - "${ENVOY_SRCDIR}/tools/code_format/check_format_test_helper.sh" --log=WARN -fi - CURRENT=check_format bazel "${BAZEL_STARTUP_OPTIONS[@]}" run "${BAZEL_BUILD_OPTIONS[@]}" //tools/code_format:check_format -- fix --fail_on_diff diff --git a/docs/root/start/sandboxes/setup.rst b/docs/root/start/sandboxes/setup.rst index 24b13c957ae5..cbb4245ff32c 100644 --- a/docs/root/start/sandboxes/setup.rst +++ b/docs/root/start/sandboxes/setup.rst @@ -70,21 +70,21 @@ You can `find instructions for installing Git on various operating systems here .. _start_sandboxes_setup_envoy: -Clone the Envoy repository --------------------------- +Clone the Envoy examples repository +----------------------------------- -If you have not cloned the `Envoy repository `_ already, +If you have not cloned the `Envoy examples repository `_ already, clone it with: .. tabs:: .. code-tab:: console SSH - git clone git@github.com:envoyproxy/envoy + git clone git@github.com:envoyproxy/examples .. code-tab:: console HTTPS - git clone https://github.com/envoyproxy/envoy.git + git clone https://github.com/envoyproxy/examples.git .. _start_sandboxes_setup_additional: diff --git a/mobile/.bazelrc b/mobile/.bazelrc index fe912379dda7..f9a13c743f98 100644 --- a/mobile/.bazelrc +++ b/mobile/.bazelrc @@ -245,17 +245,6 @@ test:mobile-remote-ci-android --config=mobile-test-android build:mobile-remote-ci-cc --config=mobile-remote-ci test:mobile-remote-ci-cc --action_env=LD_LIBRARY_PATH -# TODO(alyssar) remove in a follow-up PR -build:mobile-remote-ci-cc-no-exceptions --config=mobile-remote-ci-cc -build:mobile-remote-ci-cc-no-exceptions --define envoy_exceptions=disabled -build:mobile-remote-ci-cc-no-exceptions --copt=-fno-exceptions - -build:mobile-remote-ci-cc-full-protos-enabled --config=mobile-remote-ci-cc -test:mobile-remote-ci-cc-full-protos-enabled --config=mobile-remote-ci-cc - -build:mobile-remote-ci-macos-kotlin --config=mobile-remote-ci-macos -build:mobile-remote-ci-macos-kotlin --fat_apk_cpu=x86_64 - build:mobile-remote-ci-core --config=mobile-remote-ci test:mobile-remote-ci-core --action_env=LD_LIBRARY_PATH diff --git a/mobile/library/cc/engine_builder.cc b/mobile/library/cc/engine_builder.cc index fa8a104d4dc2..0d1ffc57a1b9 100644 --- a/mobile/library/cc/engine_builder.cc +++ b/mobile/library/cc/engine_builder.cc @@ -321,6 +321,11 @@ EngineBuilder& EngineBuilder::addRuntimeGuard(std::string guard, bool value) { return *this; } +EngineBuilder& EngineBuilder::addRestartRuntimeGuard(std::string guard, bool value) { + restart_runtime_guards_.emplace_back(std::move(guard), value); + return *this; +} + #if defined(__APPLE__) EngineBuilder& EngineBuilder::respectSystemProxySettings(bool value) { respect_system_proxy_settings_ = value; @@ -818,6 +823,10 @@ std::unique_ptr EngineBuilder::generate // needed to be merged with the default off due to unresolved test issues. Once those are fixed, // and the default for `allow_client_socket_creation_failure` is true, we can remove this. (*restart_features.mutable_fields())["allow_client_socket_creation_failure"].set_bool_value(true); + for (auto& guard_and_value : restart_runtime_guards_) { + (*restart_features.mutable_fields())[guard_and_value.first].set_bool_value( + guard_and_value.second); + } (*runtime_values.mutable_fields())["disallow_global_stats"].set_bool_value(true); ProtobufWkt::Struct& overload_values = diff --git a/mobile/library/cc/engine_builder.h b/mobile/library/cc/engine_builder.h index 3d111e0d3e6d..cb2c0d06b9d8 100644 --- a/mobile/library/cc/engine_builder.h +++ b/mobile/library/cc/engine_builder.h @@ -89,6 +89,11 @@ class EngineBuilder { // For example if the runtime guard is `envoy.reloadable_features.use_foo`, the guard name is // `use_foo`. EngineBuilder& addRuntimeGuard(std::string guard, bool value); + // Adds a runtime guard for the `envoy.restart_features.`. Restart features cannot be + // changed after the Envoy applicable has started and initialized. + // For example if the runtime guard is `envoy.restart_features.use_foo`, the guard name is + // `use_foo`. + EngineBuilder& addRestartRuntimeGuard(std::string guard, bool value); // These functions don't affect the Bootstrap configuration but instead perform registrations. EngineBuilder& addKeyValueStore(std::string name, KeyValueStoreSharedPtr key_value_store); @@ -183,6 +188,7 @@ class EngineBuilder { std::vector> dns_preresolve_hostnames_; std::vector> runtime_guards_; + std::vector> restart_runtime_guards_; absl::flat_hash_map string_accessors_; bool use_gro_if_available_ = false; diff --git a/mobile/library/common/internal_engine.cc b/mobile/library/common/internal_engine.cc index c5b774cda157..708daf1112e8 100644 --- a/mobile/library/common/internal_engine.cc +++ b/mobile/library/common/internal_engine.cc @@ -281,10 +281,14 @@ envoy_status_t InternalEngine::resetConnectivityState() { return dispatcher_->post([&]() -> void { connectivity_manager_->resetConnectivityState(); }); } -envoy_status_t InternalEngine::setPreferredNetwork(NetworkType network) { - return dispatcher_->post([&, network]() -> void { - envoy_netconf_t configuration_key = - Network::ConnectivityManagerImpl::setPreferredNetwork(network); +void InternalEngine::onDefaultNetworkAvailable() { + ENVOY_LOG_MISC(trace, "Calling the default network available callback"); +} + +void InternalEngine::onDefaultNetworkChanged(NetworkType network) { + ENVOY_LOG_MISC(trace, "Calling the default network changed callback"); + dispatcher_->post([&, network]() -> void { + envoy_netconf_t configuration = Network::ConnectivityManagerImpl::setPreferredNetwork(network); if (Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.dns_cache_set_ip_version_to_remove")) { // The IP version to remove flag must be set first before refreshing the DNS cache so that @@ -305,10 +309,15 @@ envoy_status_t InternalEngine::setPreferredNetwork(NetworkType network) { [](Http::HttpServerPropertiesCache& cache) { cache.resetBrokenness(); }; cache_manager.forEachThreadLocalCache(clear_brokenness); } - connectivity_manager_->refreshDns(configuration_key, true); + connectivity_manager_->refreshDns(configuration, true); }); } +void InternalEngine::onDefaultNetworkUnavailable() { + ENVOY_LOG_MISC(trace, "Calling the default network unavailable callback"); + dispatcher_->post([&]() -> void { connectivity_manager_->dnsCache()->stop(); }); +} + envoy_status_t InternalEngine::recordCounterInc(absl::string_view elements, envoy_stats_tags tags, uint64_t count) { return dispatcher_->post( diff --git a/mobile/library/common/internal_engine.h b/mobile/library/common/internal_engine.h index 4b7ab545ccf6..7541e88373f5 100644 --- a/mobile/library/common/internal_engine.h +++ b/mobile/library/common/internal_engine.h @@ -106,9 +106,15 @@ class InternalEngine : public Logger::Loggable { // to networkConnectivityManager after doing a dispatcher post (thread context switch) envoy_status_t setProxySettings(const char* host, const uint16_t port); envoy_status_t resetConnectivityState(); + + /** + * This function is called when the default network is available. This function is currently + * no-op. + */ + void onDefaultNetworkAvailable(); + /** - * This function does the following on a network change event (such as switching from WiFI to - * cellular, WIFi A to WiFI B, etc.). + * This function does the following when the default network configuration was changed. * * - Sets the preferred network. * - Check for IPv6 connectivity. If there is no IPv6 no connectivity, it will call @@ -117,7 +123,15 @@ class InternalEngine : public Logger::Loggable { * - Force refresh the hosts in the DNS cache (will take `setIpVersionToRemove` into account). * - Optionally (if configured) clear HTTP/3 broken status. */ - envoy_status_t setPreferredNetwork(NetworkType network); + void onDefaultNetworkChanged(NetworkType network); + + /** + * This functions does the following when the default network is unavailable. + * + * - Cancel the DNS pending queries. + * - Stop the DNS timeout and refresh timers. + */ + void onDefaultNetworkUnavailable(); /** * Increment a counter with a given string of elements and by the given count. diff --git a/mobile/library/java/io/envoyproxy/envoymobile/engine/AndroidEngineImpl.java b/mobile/library/java/io/envoyproxy/envoymobile/engine/AndroidEngineImpl.java index 7fad68820255..561ea2cc2e6a 100644 --- a/mobile/library/java/io/envoyproxy/envoymobile/engine/AndroidEngineImpl.java +++ b/mobile/library/java/io/envoyproxy/envoymobile/engine/AndroidEngineImpl.java @@ -1,6 +1,8 @@ package io.envoyproxy.envoymobile.engine; import android.content.Context; +import android.net.ConnectivityManager; + import io.envoyproxy.envoymobile.engine.types.EnvoyEventTracker; import io.envoyproxy.envoymobile.engine.types.EnvoyHTTPCallbacks; import io.envoyproxy.envoymobile.engine.types.EnvoyLogger; @@ -15,6 +17,7 @@ /* Android-specific implementation of the `EnvoyEngine` interface. */ public class AndroidEngineImpl implements EnvoyEngine { private final EnvoyEngine envoyEngine; + private final Context context; /** * @param runningCallback Called when the engine finishes its async startup and begins running. @@ -22,6 +25,7 @@ public class AndroidEngineImpl implements EnvoyEngine { public AndroidEngineImpl(Context context, EnvoyOnEngineRunning runningCallback, EnvoyLogger logger, EnvoyEventTracker eventTracker, Boolean enableProxying) { + this.context = context; this.envoyEngine = new EnvoyEngineImpl(runningCallback, logger, eventTracker); if (ContextUtils.getApplicationContext() == null) { ContextUtils.initApplicationContext(context.getApplicationContext()); @@ -44,6 +48,10 @@ public void performRegistration(EnvoyConfiguration envoyConfiguration) { @Override public EnvoyStatus runWithConfig(EnvoyConfiguration envoyConfiguration, String logLevel) { + if (envoyConfiguration.useCares) { + JniLibrary.initCares( + (ConnectivityManager)context.getSystemService(Context.CONNECTIVITY_SERVICE)); + } return envoyEngine.runWithConfig(envoyConfiguration, logLevel); } @@ -73,8 +81,18 @@ public void resetConnectivityState() { } @Override - public void setPreferredNetwork(EnvoyNetworkType network) { - envoyEngine.setPreferredNetwork(network); + public void onDefaultNetworkAvailable() { + envoyEngine.onDefaultNetworkAvailable(); + } + + @Override + public void onDefaultNetworkChanged(EnvoyNetworkType network) { + envoyEngine.onDefaultNetworkChanged(network); + } + + @Override + public void onDefaultNetworkUnavailable() { + envoyEngine.onDefaultNetworkUnavailable(); } public void setProxySettings(String host, int port) { envoyEngine.setProxySettings(host, port); } diff --git a/mobile/library/java/io/envoyproxy/envoymobile/engine/AndroidNetworkMonitor.java b/mobile/library/java/io/envoyproxy/envoymobile/engine/AndroidNetworkMonitor.java index 5cad82964844..708ab67b640f 100644 --- a/mobile/library/java/io/envoyproxy/envoymobile/engine/AndroidNetworkMonitor.java +++ b/mobile/library/java/io/envoyproxy/envoymobile/engine/AndroidNetworkMonitor.java @@ -3,41 +3,37 @@ import io.envoyproxy.envoymobile.engine.types.EnvoyNetworkType; import android.Manifest; -import android.annotation.TargetApi; -import android.content.BroadcastReceiver; import android.content.Context; -import android.content.Intent; -import android.content.IntentFilter; import android.content.pm.PackageManager; import android.net.ConnectivityManager; import android.net.ConnectivityManager.NetworkCallback; import android.net.Network; import android.net.NetworkCapabilities; -import android.net.NetworkInfo; -import android.net.NetworkRequest; -import android.os.Build; + +import androidx.annotation.NonNull; import androidx.annotation.VisibleForTesting; import androidx.core.content.ContextCompat; import java.util.Collections; /** - * This class makes use of some deprecated APIs, but it's only current purpose is to attempt to - * distill some notion of a preferred network from the OS, upon which we can assume new sockets will - * be opened. + * This class does the following. + *
    + *
  • When the internet is available: call the + * InternalEngine::onDefaultNetworkAvailable callback.
  • + * + *
  • When the internet is not available: call the + * InternalEngine::onDefaultNetworkUnavailable callback.
  • + * + *
  • When the capabilities are changed: call the + * EnvoyEngine::onDefaultNetworkChanged.
  • + *
*/ -@TargetApi(Build.VERSION_CODES.LOLLIPOP) -public class AndroidNetworkMonitor extends BroadcastReceiver { +public class AndroidNetworkMonitor { private static final String PERMISSION_DENIED_STATS_ELEMENT = "android_permissions.network_state_denied"; - private static volatile AndroidNetworkMonitor instance = null; - - private int previousNetworkType = ConnectivityManager.TYPE_DUMMY; - private EnvoyEngine envoyEngine; private ConnectivityManager connectivityManager; - private NetworkCallback networkCallback; - private NetworkRequest networkRequest; public static void load(Context context, EnvoyEngine envoyEngine) { if (instance != null) { @@ -61,6 +57,36 @@ public static void shutdown() { instance = null; } + private static class DefaultNetworkCallback extends NetworkCallback { + private final EnvoyEngine envoyEngine; + + private DefaultNetworkCallback(EnvoyEngine envoyEngine) { this.envoyEngine = envoyEngine; } + + @Override + public void onAvailable(@NonNull Network network) { + envoyEngine.onDefaultNetworkAvailable(); + } + + @Override + public void onCapabilitiesChanged(@NonNull Network network, + @NonNull NetworkCapabilities networkCapabilities) { + if (networkCapabilities.hasCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET)) { + if (networkCapabilities.hasCapability(NetworkCapabilities.TRANSPORT_WIFI)) { + envoyEngine.onDefaultNetworkChanged(EnvoyNetworkType.WLAN); + } else if (networkCapabilities.hasCapability(NetworkCapabilities.TRANSPORT_CELLULAR)) { + envoyEngine.onDefaultNetworkChanged(EnvoyNetworkType.WWAN); + } else { + envoyEngine.onDefaultNetworkChanged(EnvoyNetworkType.GENERIC); + } + } + } + + @Override + public void onLost(@NonNull Network network) { + envoyEngine.onDefaultNetworkUnavailable(); + } + } + private AndroidNetworkMonitor(Context context, EnvoyEngine envoyEngine) { int permission = ContextCompat.checkSelfPermission(context, Manifest.permission.ACCESS_NETWORK_STATE); @@ -73,42 +99,9 @@ private AndroidNetworkMonitor(Context context, EnvoyEngine envoyEngine) { return; } - this.envoyEngine = envoyEngine; - connectivityManager = (ConnectivityManager)context.getSystemService(Context.CONNECTIVITY_SERVICE); - networkRequest = new NetworkRequest.Builder() - .addCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET) - .build(); - - networkCallback = new NetworkCallback() { - @Override - public void onAvailable(Network network) { - handleNetworkChange(); - } - @Override - public void onCapabilitiesChanged(Network network, NetworkCapabilities networkCapabilities) { - handleNetworkChange(); - } - @Override - public void onLosing(Network network, int maxMsToLive) { - handleNetworkChange(); - } - @Override - public void onLost(final Network network) { - handleNetworkChange(); - } - }; - - try { - connectivityManager.registerNetworkCallback(networkRequest, networkCallback); - - context.registerReceiver(this, new IntentFilter() { - { addAction(ConnectivityManager.CONNECTIVITY_ACTION); } - }); - } catch (Throwable t) { - // no-op - } + connectivityManager.registerDefaultNetworkCallback(new DefaultNetworkCallback(envoyEngine)); } /** @returns The singleton instance of {@link AndroidNetworkMonitor}. */ @@ -117,32 +110,14 @@ public static AndroidNetworkMonitor getInstance() { return instance; } - @Override - public void onReceive(Context context, Intent intent) { - handleNetworkChange(); - } - - /** @returns True if there is connectivity */ - public boolean isOnline() { return previousNetworkType != -1; } - - private void handleNetworkChange() { - NetworkInfo networkInfo = connectivityManager.getActiveNetworkInfo(); - int networkType = networkInfo == null ? -1 : networkInfo.getType(); - if (networkType == previousNetworkType) { - return; - } - previousNetworkType = networkType; - - switch (networkType) { - case ConnectivityManager.TYPE_MOBILE: - envoyEngine.setPreferredNetwork(EnvoyNetworkType.WWAN); - return; - case ConnectivityManager.TYPE_WIFI: - envoyEngine.setPreferredNetwork(EnvoyNetworkType.WLAN); - return; - default: - envoyEngine.setPreferredNetwork(EnvoyNetworkType.GENERIC); - } + /** + * Returns true if there is an internet connectivity. + */ + public boolean isOnline() { + NetworkCapabilities networkCapabilities = + connectivityManager.getNetworkCapabilities(connectivityManager.getActiveNetwork()); + return networkCapabilities != null && + networkCapabilities.hasCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET); } /** Expose connectivityManager only for testing */ diff --git a/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyEngine.java b/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyEngine.java index b9ca5b1a93c0..4726662535ed 100644 --- a/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyEngine.java +++ b/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyEngine.java @@ -63,12 +63,19 @@ public interface EnvoyEngine { void resetConnectivityState(); /** - * Update the network interface to the preferred network for opening new - * streams. - * - * @param network The network to be preferred for new streams. + * A callback into the Envoy Engine when the default network is available. + */ + void onDefaultNetworkAvailable(); + + /** + * A callback into the Envoy Engine when the default network configuration was changed. + */ + void onDefaultNetworkChanged(EnvoyNetworkType network); + + /** + * A callback into the Envoy Engine when the default network is unavailable. */ - void setPreferredNetwork(EnvoyNetworkType network); + void onDefaultNetworkUnavailable(); /** * Update proxy settings. diff --git a/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyEngineImpl.java b/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyEngineImpl.java index cabbcd4b8ccc..c8a4c0c04ec2 100644 --- a/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyEngineImpl.java +++ b/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyEngineImpl.java @@ -135,9 +135,21 @@ public void resetConnectivityState() { } @Override - public void setPreferredNetwork(EnvoyNetworkType network) { + public void onDefaultNetworkAvailable() { checkIsTerminated(); - JniLibrary.setPreferredNetwork(engineHandle, network.getValue()); + JniLibrary.onDefaultNetworkAvailable(engineHandle); + } + + @Override + public void onDefaultNetworkChanged(EnvoyNetworkType network) { + checkIsTerminated(); + JniLibrary.onDefaultNetworkChanged(engineHandle, network.getValue()); + } + + @Override + public void onDefaultNetworkUnavailable() { + checkIsTerminated(); + JniLibrary.onDefaultNetworkUnavailable(engineHandle); } public void setProxySettings(String host, int port) { diff --git a/mobile/library/java/io/envoyproxy/envoymobile/engine/JniLibrary.java b/mobile/library/java/io/envoyproxy/envoymobile/engine/JniLibrary.java index 8780613d443b..01f1704370c8 100644 --- a/mobile/library/java/io/envoyproxy/envoymobile/engine/JniLibrary.java +++ b/mobile/library/java/io/envoyproxy/envoymobile/engine/JniLibrary.java @@ -1,5 +1,7 @@ package io.envoyproxy.envoymobile.engine; +import android.net.ConnectivityManager; + import io.envoyproxy.envoymobile.engine.types.EnvoyEventTracker; import io.envoyproxy.envoymobile.engine.types.EnvoyHTTPCallbacks; import io.envoyproxy.envoymobile.engine.types.EnvoyLogger; @@ -223,14 +225,19 @@ protected static native int registerStringAccessor(String accessorName, protected static native int resetConnectivityState(long engine); /** - * Update the network interface to the preferred network for opening new - * streams. Note that this state is shared by all engines. - * - * @param engine Handle to the engine whose preferred network will be set. - * @param network the network to be preferred for new streams. - * @return The resulting status of the operation. + * A callback into the Envoy Engine when the default network is available. + */ + protected static native void onDefaultNetworkAvailable(long engine); + + /** + * A callback into the Envoy Engine when the default network configuration was changed. */ - protected static native int setPreferredNetwork(long engine, int network); + protected static native void onDefaultNetworkChanged(long engine, int networkType); + + /** + * A callback into the Envoy Engine when the default network is unavailable. + */ + protected static native void onDefaultNetworkUnavailable(long engine); /** * Update the proxy settings. @@ -305,4 +312,11 @@ public static native long createBootstrap( long maxConnectionsPerHost, long streamIdleTimeoutSeconds, long perTryIdleTimeoutSeconds, String appVersion, String appId, boolean trustChainVerification, byte[][] filterChain, boolean enablePlatformCertificatesValidation, String upstreamTlsSni, byte[][] runtimeGuards); + + /** + * Initializes c-ares. + * See ares_library_init_android. + */ + public static native void initCares(ConnectivityManager connectivityManager); } diff --git a/mobile/library/jni/jni_impl.cc b/mobile/library/jni/jni_impl.cc index 902485c1557f..049be773eff5 100644 --- a/mobile/library/jni/jni_impl.cc +++ b/mobile/library/jni/jni_impl.cc @@ -3,6 +3,7 @@ #include "source/common/protobuf/protobuf.h" +#include "ares.h" #include "library/cc/engine_builder.h" #include "library/common/api/c_types.h" #include "library/common/bridge/utility.h" @@ -201,6 +202,19 @@ extern "C" JNIEXPORT void JNICALL Java_io_envoyproxy_envoymobile_engine_JniLibra delete internal_engine; } +extern "C" JNIEXPORT void JNICALL Java_io_envoyproxy_envoymobile_engine_JniLibrary_initCares( + JNIEnv* env, jclass, jobject connectivity_manager) { +#if defined(__ANDROID_API__) + Envoy::JNI::JniHelper jni_helper(env); + ares_library_init_jvm(jni_helper.getJavaVm()); + ares_library_init_android(connectivity_manager); +#else + // For suppressing unused parameters. + (void)env; + (void)connectivity_manager; +#endif +} + extern "C" JNIEXPORT jint JNICALL Java_io_envoyproxy_envoymobile_engine_JniLibrary_recordCounterInc( JNIEnv* env, jclass, // class @@ -1335,12 +1349,24 @@ Java_io_envoyproxy_envoymobile_engine_JniLibrary_resetConnectivityState(JNIEnv* return reinterpret_cast(engine)->resetConnectivityState(); } -extern "C" JNIEXPORT jint JNICALL -Java_io_envoyproxy_envoymobile_engine_JniLibrary_setPreferredNetwork(JNIEnv* /*env*/, - jclass, // class - jlong engine, jint network) { - return reinterpret_cast(engine)->setPreferredNetwork( - static_cast(network)); +extern "C" JNIEXPORT void JNICALL +Java_io_envoyproxy_envoymobile_engine_JniLibrary_onDefaultNetworkAvailable(JNIEnv*, jclass, + jlong engine) { + reinterpret_cast(engine)->onDefaultNetworkAvailable(); +} + +extern "C" JNIEXPORT void JNICALL +Java_io_envoyproxy_envoymobile_engine_JniLibrary_onDefaultNetworkChanged(JNIEnv*, jclass, + jlong engine, + jint network_type) { + reinterpret_cast(engine)->onDefaultNetworkChanged( + static_cast(network_type)); +} + +extern "C" JNIEXPORT void JNICALL +Java_io_envoyproxy_envoymobile_engine_JniLibrary_onDefaultNetworkUnavailable(JNIEnv*, jclass, + jlong engine) { + reinterpret_cast(engine)->onDefaultNetworkUnavailable(); } extern "C" JNIEXPORT jint JNICALL Java_io_envoyproxy_envoymobile_engine_JniLibrary_setProxySettings( diff --git a/mobile/library/objective-c/EnvoyNetworkMonitor.mm b/mobile/library/objective-c/EnvoyNetworkMonitor.mm index 999074522f88..93b7d374061b 100644 --- a/mobile/library/objective-c/EnvoyNetworkMonitor.mm +++ b/mobile/library/objective-c/EnvoyNetworkMonitor.mm @@ -66,7 +66,7 @@ - (void)startPathMonitor { if (network != previousNetworkType) { NSLog(@"[Envoy] setting preferred network to %d", network); - engine->setPreferredNetwork(network); + engine->onDefaultNetworkChanged(network); previousNetworkType = network; } @@ -135,8 +135,8 @@ static void _reachability_callback(SCNetworkReachabilityRef target, NSLog(@"[Envoy] setting preferred network to %@", isUsingWWAN ? @"WWAN" : @"WLAN"); EnvoyNetworkMonitor *monitor = (__bridge EnvoyNetworkMonitor *)info; - monitor->_engine->setPreferredNetwork(isUsingWWAN ? Envoy::NetworkType::WWAN - : Envoy::NetworkType::WLAN); + monitor->_engine->onDefaultNetworkChanged(isUsingWWAN ? Envoy::NetworkType::WWAN + : Envoy::NetworkType::WLAN); } @end diff --git a/mobile/test/common/integration/client_integration_test.cc b/mobile/test/common/integration/client_integration_test.cc index 499b86a8dfd5..4c47ed94cc6a 100644 --- a/mobile/test/common/integration/client_integration_test.cc +++ b/mobile/test/common/integration/client_integration_test.cc @@ -1339,7 +1339,7 @@ TEST_P(ClientIntegrationTest, TestProxyResolutionApi) { TEST_P(ClientIntegrationTest, OnNetworkChanged) { builder_.addRuntimeGuard("dns_cache_set_ip_version_to_remove", true); initialize(); - internalEngine()->setPreferredNetwork(NetworkType::WLAN); + internalEngine()->onDefaultNetworkChanged(NetworkType::WLAN); basicTest(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_EQ(cc_.on_complete_received_byte_count_, 67); diff --git a/mobile/test/common/integration/test_server.cc b/mobile/test/common/integration/test_server.cc index 6106161fe486..069125fa8398 100644 --- a/mobile/test/common/integration/test_server.cc +++ b/mobile/test/common/integration/test_server.cc @@ -169,12 +169,16 @@ void TestServer::start(TestServerType type, int port) { break; case TestServerType::HTTP2_WITH_TLS: upstream_config_.upstream_protocol_ = Http::CodecType::HTTP2; - factory = createUpstreamTlsContext(factory_context_); + factory = createUpstreamTlsContext(factory_context_, /* add_alpn= */ true); break; case TestServerType::HTTP1_WITHOUT_TLS: upstream_config_.upstream_protocol_ = Http::CodecType::HTTP1; factory = Network::Test::createRawBufferDownstreamSocketFactory(); break; + case TestServerType::HTTP1_WITH_TLS: + upstream_config_.upstream_protocol_ = Http::CodecType::HTTP1; + factory = createUpstreamTlsContext(factory_context_, /* add_alpn= */ false); + break; case TestServerType::HTTP_PROXY: { Server::forceRegisterDefaultListenerManagerFactoryImpl(); Extensions::TransportSockets::RawBuffer::forceRegisterDownstreamRawBufferSocketFactory(); @@ -327,7 +331,8 @@ Network::DownstreamTransportSocketFactoryPtr TestServer::createQuicUpstreamTlsCo } Network::DownstreamTransportSocketFactoryPtr TestServer::createUpstreamTlsContext( - testing::NiceMock& factory_context) { + testing::NiceMock& factory_context, + bool add_alpn) { envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; envoy::extensions::transport_sockets::tls::v3::TlsCertificate* certs = tls_context.mutable_common_tls_context()->add_tls_certificates(); @@ -338,7 +343,9 @@ Network::DownstreamTransportSocketFactoryPtr TestServer::createUpstreamTlsContex auto* ctx = tls_context.mutable_common_tls_context()->mutable_validation_context(); ctx->mutable_trusted_ca()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); - tls_context.mutable_common_tls_context()->add_alpn_protocols("h2"); + if (add_alpn) { + tls_context.mutable_common_tls_context()->add_alpn_protocols("h2"); + } auto cfg = *Extensions::TransportSockets::Tls::ServerContextConfigImpl::create( tls_context, factory_context, false); static auto* upstream_stats_store = new Stats::TestIsolatedStoreImpl(); diff --git a/mobile/test/common/integration/test_server.h b/mobile/test/common/integration/test_server.h index c4156aa667b8..ecfa6f54df6d 100644 --- a/mobile/test/common/integration/test_server.h +++ b/mobile/test/common/integration/test_server.h @@ -18,10 +18,11 @@ namespace Envoy { enum class TestServerType : int { HTTP1_WITHOUT_TLS = 0, - HTTP2_WITH_TLS = 1, - HTTP3 = 2, - HTTP_PROXY = 3, - HTTPS_PROXY = 4, + HTTP1_WITH_TLS = 1, + HTTP2_WITH_TLS = 2, + HTTP3 = 3, + HTTP_PROXY = 4, + HTTPS_PROXY = 5, }; class TestServer : public ListenerHooks { @@ -92,7 +93,7 @@ class TestServer : public ListenerHooks { testing::NiceMock&); Network::DownstreamTransportSocketFactoryPtr createUpstreamTlsContext( - testing::NiceMock&); + testing::NiceMock&, bool); }; } // namespace Envoy diff --git a/mobile/test/java/io/envoyproxy/envoymobile/engine/AndroidNetworkMonitorTest.java b/mobile/test/java/io/envoyproxy/envoymobile/engine/AndroidNetworkMonitorTest.java index 5fd1274901f6..ef1e4e799b1b 100644 --- a/mobile/test/java/io/envoyproxy/envoymobile/engine/AndroidNetworkMonitorTest.java +++ b/mobile/test/java/io/envoyproxy/envoymobile/engine/AndroidNetworkMonitorTest.java @@ -1,68 +1,126 @@ package io.envoyproxy.envoymobile.engine; +import static com.google.common.truth.Truth.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.robolectric.Shadows.shadowOf; import android.content.Context; -import android.content.Intent; import android.Manifest; import android.net.ConnectivityManager; -import android.net.NetworkInfo; -import androidx.test.filters.MediumTest; +import android.net.NetworkCapabilities; import androidx.test.platform.app.InstrumentationRegistry; import androidx.test.rule.GrantPermissionRule; -import androidx.test.annotation.UiThreadTest; -import io.envoyproxy.envoymobile.mocks.MockEnvoyEngine; -import org.junit.Assert; +import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.robolectric.RobolectricTestRunner; -import org.robolectric.shadows.ShadowConnectivityManager; +import org.robolectric.shadows.ShadowNetwork; +import org.robolectric.shadows.ShadowNetworkCapabilities; + +import io.envoyproxy.envoymobile.engine.types.EnvoyNetworkType; /** * Tests functionality of AndroidNetworkMonitor */ @RunWith(RobolectricTestRunner.class) public class AndroidNetworkMonitorTest { - @Rule public GrantPermissionRule mRuntimePermissionRule = GrantPermissionRule.grant(Manifest.permission.ACCESS_NETWORK_STATE); private AndroidNetworkMonitor androidNetworkMonitor; - private ShadowConnectivityManager connectivityManager; - private Context context; + private ConnectivityManager connectivityManager; + private NetworkCapabilities networkCapabilities; + private final EnvoyEngine mockEnvoyEngine = mock(EnvoyEngine.class); @Before public void setUp() { - context = InstrumentationRegistry.getInstrumentation().getTargetContext(); - AndroidNetworkMonitor.load(context, new MockEnvoyEngine()); + Context context = InstrumentationRegistry.getInstrumentation().getTargetContext(); + AndroidNetworkMonitor.load(context, mockEnvoyEngine); androidNetworkMonitor = AndroidNetworkMonitor.getInstance(); - connectivityManager = shadowOf(androidNetworkMonitor.getConnectivityManager()); + connectivityManager = androidNetworkMonitor.getConnectivityManager(); + networkCapabilities = + connectivityManager.getNetworkCapabilities(connectivityManager.getActiveNetwork()); + } + + @After + public void tearDown() { + AndroidNetworkMonitor.shutdown(); } /** * Tests that isOnline() returns the correct result. */ @Test - @MediumTest - @UiThreadTest - public void testAndroidNetworkMonitorIsOnline() { - Intent intent = new Intent(ConnectivityManager.CONNECTIVITY_ACTION); - // Set up network change - androidNetworkMonitor.onReceive(context, intent); - Assert.assertTrue(androidNetworkMonitor.isOnline()); - - // Save old networkInfo and simulate a no network scenerio - NetworkInfo networkInfo = androidNetworkMonitor.getConnectivityManager().getActiveNetworkInfo(); - connectivityManager.setActiveNetworkInfo(null); - androidNetworkMonitor.onReceive(context, intent); - Assert.assertFalse(androidNetworkMonitor.isOnline()); - - // Bring back online since the AndroidNetworkMonitor class is a singleton - connectivityManager.setActiveNetworkInfo(networkInfo); - androidNetworkMonitor.onReceive(context, intent); + public void testIsOnline() { + shadowOf(networkCapabilities).addCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET); + assertThat(androidNetworkMonitor.isOnline()).isTrue(); + + shadowOf(networkCapabilities).removeCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET); + assertThat(androidNetworkMonitor.isOnline()).isFalse(); + } + + //===================================================================================== + // TODO(fredyw): The ShadowConnectivityManager doesn't currently trigger + // ConnectivityManager.NetworkCallback, so we have to call the callbacks manually. This + // has been fixed in https://github.com/robolectric/robolectric/pull/9509 but it is + // not available in the current Roboelectric Shadows framework that we use. + //===================================================================================== + + @Test + public void testOnDefaultNetworkAvailable() { + shadowOf(connectivityManager) + .getNetworkCallbacks() + .forEach(callback -> callback.onAvailable(ShadowNetwork.newInstance(0))); + + verify(mockEnvoyEngine).onDefaultNetworkAvailable(); + } + + @Test + public void testOnDefaultNetworkUnavailable() { + shadowOf(connectivityManager) + .getNetworkCallbacks() + .forEach(callback -> callback.onLost(ShadowNetwork.newInstance(0))); + + verify(mockEnvoyEngine).onDefaultNetworkUnavailable(); + } + + @Test + public void testOnDefaultNetworkChangedWlan() { + shadowOf(connectivityManager).getNetworkCallbacks().forEach(callback -> { + NetworkCapabilities capabilities = ShadowNetworkCapabilities.newInstance(); + shadowOf(capabilities).addCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET); + shadowOf(capabilities).addCapability(NetworkCapabilities.TRANSPORT_WIFI); + callback.onCapabilitiesChanged(ShadowNetwork.newInstance(0), capabilities); + }); + + verify(mockEnvoyEngine).onDefaultNetworkChanged(EnvoyNetworkType.WLAN); + } + + @Test + public void testOnDefaultNetworkChangedWwan() { + shadowOf(connectivityManager).getNetworkCallbacks().forEach(callback -> { + NetworkCapabilities capabilities = ShadowNetworkCapabilities.newInstance(); + shadowOf(capabilities).addCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET); + shadowOf(capabilities).addCapability(NetworkCapabilities.TRANSPORT_CELLULAR); + callback.onCapabilitiesChanged(ShadowNetwork.newInstance(0), capabilities); + }); + + verify(mockEnvoyEngine).onDefaultNetworkChanged(EnvoyNetworkType.WWAN); + } + + @Test + public void testOnDefaultNetworkChangedGeneric() { + shadowOf(connectivityManager).getNetworkCallbacks().forEach(callback -> { + NetworkCapabilities capabilities = ShadowNetworkCapabilities.newInstance(); + shadowOf(capabilities).addCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET); + callback.onCapabilitiesChanged(ShadowNetwork.newInstance(0), capabilities); + }); + + verify(mockEnvoyEngine).onDefaultNetworkChanged(EnvoyNetworkType.GENERIC); } } diff --git a/mobile/test/java/io/envoyproxy/envoymobile/engine/BUILD b/mobile/test/java/io/envoyproxy/envoymobile/engine/BUILD index adaddced6795..2b8a073afdfd 100644 --- a/mobile/test/java/io/envoyproxy/envoymobile/engine/BUILD +++ b/mobile/test/java/io/envoyproxy/envoymobile/engine/BUILD @@ -67,6 +67,7 @@ envoy_mobile_android_test( deps = [ "//library/java/io/envoyproxy/envoymobile/engine:envoy_base_engine_lib", "//library/java/io/envoyproxy/envoymobile/engine:envoy_engine_lib", + "//library/java/io/envoyproxy/envoymobile/engine/types:envoy_c_types_lib", "//library/kotlin/io/envoyproxy/envoymobile:envoy_interfaces_lib", "//test/kotlin/io/envoyproxy/envoymobile/mocks:mocks_lib", ], diff --git a/mobile/test/java/io/envoyproxy/envoymobile/engine/testing/HttpProxyTestServerFactory.java b/mobile/test/java/io/envoyproxy/envoymobile/engine/testing/HttpProxyTestServerFactory.java index 1d3bb4fb393f..4b8811615609 100644 --- a/mobile/test/java/io/envoyproxy/envoymobile/engine/testing/HttpProxyTestServerFactory.java +++ b/mobile/test/java/io/envoyproxy/envoymobile/engine/testing/HttpProxyTestServerFactory.java @@ -4,8 +4,8 @@ public final class HttpProxyTestServerFactory { /** The supported {@link HttpProxyTestServer} types. */ public static class Type { - public static final int HTTP_PROXY = 3; - public static final int HTTPS_PROXY = 4; + public static final int HTTP_PROXY = 4; + public static final int HTTPS_PROXY = 5; private Type() {} } diff --git a/mobile/test/java/io/envoyproxy/envoymobile/engine/testing/HttpTestServerFactory.java b/mobile/test/java/io/envoyproxy/envoymobile/engine/testing/HttpTestServerFactory.java index ee4e7f84d5a5..2569e174553f 100644 --- a/mobile/test/java/io/envoyproxy/envoymobile/engine/testing/HttpTestServerFactory.java +++ b/mobile/test/java/io/envoyproxy/envoymobile/engine/testing/HttpTestServerFactory.java @@ -8,8 +8,9 @@ public final class HttpTestServerFactory { /** The supported {@link HttpTestServer} types. */ public static class Type { public static final int HTTP1_WITHOUT_TLS = 0; - public static final int HTTP2_WITH_TLS = 1; - public static final int HTTP3 = 2; + public static final int HTTP1_WITH_TLS = 1; + public static final int HTTP2_WITH_TLS = 2; + public static final int HTTP3 = 3; private Type() {} } diff --git a/mobile/test/java/org/chromium/net/CronetHttp3Test.java b/mobile/test/java/org/chromium/net/CronetHttp3Test.java index 41ad3f848fdc..709b6346023d 100644 --- a/mobile/test/java/org/chromium/net/CronetHttp3Test.java +++ b/mobile/test/java/org/chromium/net/CronetHttp3Test.java @@ -1,6 +1,5 @@ package org.chromium.net; -import static org.chromium.net.testing.CronetTestRule.getContext; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -11,17 +10,14 @@ import androidx.test.core.app.ApplicationProvider; import org.chromium.net.testing.TestUploadDataProvider; import androidx.test.filters.SmallTest; -import org.chromium.net.impl.CronvoyUrlRequestContext; + import org.chromium.net.impl.NativeCronvoyEngineBuilderImpl; import org.chromium.net.testing.CronetTestRule; -import org.chromium.net.testing.CronetTestRule.CronetTestFramework; -import org.chromium.net.testing.CronetTestRule.RequiresMinApi; import org.chromium.net.testing.Feature; import org.chromium.net.testing.TestUrlRequestCallback; -import org.chromium.net.testing.TestUrlRequestCallback.ResponseStep; + import io.envoyproxy.envoymobile.engine.JniLibrary; import org.junit.After; -import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -44,7 +40,7 @@ public class CronetHttp3Test { // If true, dump envoy logs on test completion. // Ideally we could override this from the command line but that's TBD. - private boolean printEnvoyLogs = false; + private boolean printEnvoyLogs = true; // The HTTP/2 server, set up to alt-svc to the HTTP/3 server private HttpTestServerFactory.HttpTestServer http2TestServer; // The HTTP/3 server @@ -138,7 +134,7 @@ private TestUrlRequestCallback doBasicPostRequest() { return callback; } - void doInitialHttp2Request() { + private void doInitialHttp2Request() { // Do a request to https://127.0.0.1:test_server_port/ TestUrlRequestCallback callback = doBasicGetRequest(); @@ -148,90 +144,90 @@ void doInitialHttp2Request() { assertEquals("h2", callback.mResponseInfo.getNegotiatedProtocol()); } - @Test - @SmallTest - @Feature({"Cronet"}) - public void basicHttp3Get() throws Exception { - // Ideally we could override this from the command line but that's TBD. - setUp(printEnvoyLogs); - - // Do the initial HTTP/2 request to get the alt-svc response. - doInitialHttp2Request(); - - // Set up a second request, which will hopefully go out over HTTP/3 due to alt-svc - // advertisement. - TestUrlRequestCallback callback = doBasicGetRequest(); - - // Verify the second request used HTTP/3 - assertEquals(200, callback.mResponseInfo.getHttpStatusCode()); - assertEquals("h3", callback.mResponseInfo.getNegotiatedProtocol()); - } - - @Test - @SmallTest - @Feature({"Cronet"}) - public void failToHttp2() throws Exception { - // Ideally we could override this from the command line but that's TBD. - setUp(printEnvoyLogs); - - // Do the initial HTTP/2 request to get the alt-svc response. - doInitialHttp2Request(); - - // Set up a second request, which will hopefully go out over HTTP/3 due to alt-svc - // advertisement. - TestUrlRequestCallback getCallback = doBasicGetRequest(); - - // Verify the second request used HTTP/3 - assertEquals(200, getCallback.mResponseInfo.getHttpStatusCode()); - assertEquals("h3", getCallback.mResponseInfo.getNegotiatedProtocol()); - - // Now stop the HTTP/3 server. - http3TestServer.shutdown(); - http3TestServer = null; - - // The next request will fail on HTTP2 but should succeed on HTTP/2 despite having a body. - TestUrlRequestCallback postCallback = doBasicPostRequest(); - assertEquals(200, postCallback.mResponseInfo.getHttpStatusCode()); - assertEquals("h2", postCallback.mResponseInfo.getNegotiatedProtocol()); - } - - @Test - @SmallTest - @Feature({"Cronet"}) - public void testNoRetryPostAfterHandshake() throws Exception { - setUp(printEnvoyLogs); - - // Do the initial HTTP/2 request to get the alt-svc response. - doInitialHttp2Request(); - - // Set up a second request, which will hopefully go out over HTTP/3 due to alt-svc - // advertisement. - TestUrlRequestCallback callback = new TestUrlRequestCallback(); - UrlRequest.Builder urlRequestBuilder = - cronvoyEngine.newUrlRequestBuilder(testServerUrl, callback, callback.getExecutor()); - // Set the upstream to reset after the request. - urlRequestBuilder.addHeader("reset_after_request", "yes"); - urlRequestBuilder.addHeader("content-type", "text"); - urlRequestBuilder.setHttpMethod("POST"); - TestUploadDataProvider dataProvider = new TestUploadDataProvider( - TestUploadDataProvider.SuccessCallbackMode.SYNC, callback.getExecutor()); - dataProvider.addRead("test".getBytes()); - urlRequestBuilder.setUploadDataProvider(dataProvider, callback.getExecutor()); - - urlRequestBuilder.build().start(); - callback.blockForDone(); - - // Both HTTP/3 and HTTP/2 servers will reset after the request. - assertTrue(callback.mOnErrorCalled); - // There are 2 requests - the initial HTTP/2 alt-svc request and the HTTP/3 request. - // By default, POST requests will not retry. - String stats = cronvoyEngine.getEnvoyEngine().dumpStats(); - assertTrue(stats.contains("cluster.base.upstream_rq_total: 2")); - } + // @Test + // @SmallTest + // @Feature({"Cronet"}) + // public void basicHttp3Get() throws Exception { + // // Ideally we could override this from the command line but that's TBD. + // setUp(printEnvoyLogs); + // + // // Do the initial HTTP/2 request to get the alt-svc response. + // doInitialHttp2Request(); + // + // // Set up a second request, which will hopefully go out over HTTP/3 due to alt-svc + // // advertisement. + // TestUrlRequestCallback callback = doBasicGetRequest(); + // + // // Verify the second request used HTTP/3 + // assertEquals(200, callback.mResponseInfo.getHttpStatusCode()); + // assertEquals("h3", callback.mResponseInfo.getNegotiatedProtocol()); + // } + + // @Test + // @SmallTest + // @Feature({"Cronet"}) + // public void failToHttp2() throws Exception { + // // Ideally we could override this from the command line but that's TBD. + // setUp(printEnvoyLogs); + // + // // Do the initial HTTP/2 request to get the alt-svc response. + // doInitialHttp2Request(); + // + // // Set up a second request, which will hopefully go out over HTTP/3 due to alt-svc + // // advertisement. + // TestUrlRequestCallback getCallback = doBasicGetRequest(); + // + // // Verify the second request used HTTP/3 + // assertEquals(200, getCallback.mResponseInfo.getHttpStatusCode()); + // assertEquals("h3", getCallback.mResponseInfo.getNegotiatedProtocol()); + // + // // Now stop the HTTP/3 server. + // http3TestServer.shutdown(); + // http3TestServer = null; + // + // // The next request will fail on HTTP2 but should succeed on HTTP/2 despite having a body. + // TestUrlRequestCallback postCallback = doBasicPostRequest(); + // assertEquals(200, postCallback.mResponseInfo.getHttpStatusCode()); + // assertEquals("h2", postCallback.mResponseInfo.getNegotiatedProtocol()); + // } + + // @Test + // @SmallTest + // @Feature({"Cronet"}) + // public void testNoRetryPostAfterHandshake() throws Exception { + // setUp(printEnvoyLogs); + // + // // Do the initial HTTP/2 request to get the alt-svc response. + // doInitialHttp2Request(); + // + // // Set up a second request, which will hopefully go out over HTTP/3 due to alt-svc + // // advertisement. + // TestUrlRequestCallback callback = new TestUrlRequestCallback(); + // UrlRequest.Builder urlRequestBuilder = + // cronvoyEngine.newUrlRequestBuilder(testServerUrl, callback, callback.getExecutor()); + // // Set the upstream to reset after the request. + // urlRequestBuilder.addHeader("reset_after_request", "yes"); + // urlRequestBuilder.addHeader("content-type", "text"); + // urlRequestBuilder.setHttpMethod("POST"); + // TestUploadDataProvider dataProvider = new TestUploadDataProvider( + // TestUploadDataProvider.SuccessCallbackMode.SYNC, callback.getExecutor()); + // dataProvider.addRead("test".getBytes()); + // urlRequestBuilder.setUploadDataProvider(dataProvider, callback.getExecutor()); + // + // urlRequestBuilder.build().start(); + // callback.blockForDone(); + // + // // Both HTTP/3 and HTTP/2 servers will reset after the request. + // assertTrue(callback.mOnErrorCalled); + // // There are 2 requests - the initial HTTP/2 alt-svc request and the HTTP/3 request. + // // By default, POST requests will not retry. + // String stats = cronvoyEngine.getEnvoyEngine().dumpStats(); + // assertTrue(stats.contains("cluster.base.upstream_rq_total: 2")); + // } // Set up to use HTTP/3, then force HTTP/3 to fail post-handshake. The request should // be retried on HTTP/2 and HTTP/3 will be marked broken. - public void retryPostHandshake() throws Exception { + private void retryPostHandshake() throws Exception { // Do the initial HTTP/2 request to get the alt-svc response. doInitialHttp2Request(); @@ -265,14 +261,14 @@ public void retryPostHandshake() throws Exception { assertTrue(stats.contains("cluster.base.upstream_http3_broken: 1")); } - @Test - @SmallTest - @Feature({"Cronet"}) - public void testRetryPostHandshake() throws Exception { - setUp(printEnvoyLogs); - - retryPostHandshake(); - } + // @Test + // @SmallTest + // @Feature({"Cronet"}) + // public void testRetryPostHandshake() throws Exception { + // setUp(printEnvoyLogs); + // + // retryPostHandshake(); + // } @Test @SmallTest @@ -288,7 +284,9 @@ public void networkChangeAffectsBrokenness() throws Exception { assertTrue(preStats.contains("cluster.base.upstream_cx_http3_total: 1")); // This should change QUIC brokenness to "failed recently". - cronvoyEngine.getEnvoyEngine().setPreferredNetwork(EnvoyNetworkType.WLAN); + cronvoyEngine.getEnvoyEngine().onDefaultNetworkUnavailable(); + cronvoyEngine.getEnvoyEngine().onDefaultNetworkChanged(EnvoyNetworkType.WLAN); + cronvoyEngine.getEnvoyEngine().onDefaultNetworkAvailable(); // The next request may go out over HTTP/2 or HTTP/3 (depends on who wins the race) // but HTTP/3 will be tried. diff --git a/mobile/test/java/org/chromium/net/CronetUrlRequestTest.java b/mobile/test/java/org/chromium/net/CronetUrlRequestTest.java index e8a275327045..8b511e17df2c 100644 --- a/mobile/test/java/org/chromium/net/CronetUrlRequestTest.java +++ b/mobile/test/java/org/chromium/net/CronetUrlRequestTest.java @@ -13,6 +13,7 @@ import android.content.Intent; import android.Manifest; import android.net.ConnectivityManager; +import android.net.NetworkCapabilities; import android.net.NetworkInfo; import android.os.Build; import android.os.ConditionVariable; @@ -59,6 +60,7 @@ import org.junit.runner.RunWith; import org.robolectric.RobolectricTestRunner; import org.robolectric.shadows.ShadowConnectivityManager; +import org.robolectric.shadows.ShadowNetworkCapabilities; /** * Test functionality of CronetUrlRequest. @@ -85,6 +87,7 @@ public void setUp() { mMockUrlRequestJobFactory = new MockUrlRequestJobFactory(mTestRule.buildCronetTestFramework().mBuilder); assertTrue(NativeTestServer.startNativeTestServer(getContext())); + enableInternet(true); } @After @@ -98,6 +101,19 @@ public void tearDown() { AndroidNetworkMonitor.shutdown(); } + private static void enableInternet(boolean enabled) { + AndroidNetworkMonitor androidNetworkMonitor = AndroidNetworkMonitor.getInstance(); + ConnectivityManager connectivityManager = androidNetworkMonitor.getConnectivityManager(); + ShadowNetworkCapabilities shadowNetworkCapabilities = shadowOf( + connectivityManager.getNetworkCapabilities(connectivityManager.getActiveNetwork())); + + if (enabled) { + shadowNetworkCapabilities.addCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET); + } else { + shadowNetworkCapabilities.removeCapability(NetworkCapabilities.NET_CAPABILITY_INTERNET); + } + } + private TestUrlRequestCallback startAndWaitForComplete(CronetEngine engine, String url) throws Exception { TestUrlRequestCallback callback = new TestUrlRequestCallback(); @@ -2083,26 +2099,12 @@ public void testErrorCodes() throws Exception { @SmallTest @Feature({"Cronet"}) public void testInternetDisconnectedError() throws Exception { - AndroidNetworkMonitor androidNetworkMonitor = AndroidNetworkMonitor.getInstance(); - Intent intent = new Intent(ConnectivityManager.CONNECTIVITY_ACTION); - // save old networkInfo before overriding - NetworkInfo networkInfo = androidNetworkMonitor.getConnectivityManager().getActiveNetworkInfo(); - - // simulate no network - ShadowConnectivityManager connectivityManager = - shadowOf(androidNetworkMonitor.getConnectivityManager()); - connectivityManager.setActiveNetworkInfo(null); - androidNetworkMonitor.onReceive(getContext(), intent); + enableInternet(false); - // send request and confirm errorcode checkSpecificErrorCode( EnvoyMobileError.DNS_RESOLUTION_FAILED, NetError.ERR_INTERNET_DISCONNECTED, NetworkException.ERROR_INTERNET_DISCONNECTED, "INTERNET_DISCONNECTED", false, /*error_details=*/"rc: 400|ec: 0|rsp_flags: 26|http: 1"); - - // bring back online since the AndroidNetworkMonitor class is a singleton - connectivityManager.setActiveNetworkInfo(networkInfo); - androidNetworkMonitor.onReceive(getContext(), intent); } /* diff --git a/mobile/test/kotlin/apps/experimental/MainActivity.kt b/mobile/test/kotlin/apps/experimental/MainActivity.kt index 7d8c119feb17..807bdff21ade 100644 --- a/mobile/test/kotlin/apps/experimental/MainActivity.kt +++ b/mobile/test/kotlin/apps/experimental/MainActivity.kt @@ -84,6 +84,7 @@ class MainActivity : Activity() { Log.d(TAG, "Event emitted: ${entry.key}, ${entry.value}") } }) + .useCares(true) .build() recyclerView = findViewById(R.id.recycler_view)!! diff --git a/mobile/test/kotlin/integration/proxying/BUILD b/mobile/test/kotlin/integration/proxying/BUILD index 2671823b3894..eea8dd00eb5c 100644 --- a/mobile/test/kotlin/integration/proxying/BUILD +++ b/mobile/test/kotlin/integration/proxying/BUILD @@ -10,9 +10,6 @@ envoy_mobile_android_test( srcs = [ "ProxyInfoIntentPerformHTTPRequestUsingProxyTest.kt", ], - exec_properties = { - "dockerNetwork": "standard", - }, native_deps = [ "//test/jni:libenvoy_jni_with_test_and_listener_extensions.so", ] + select({ @@ -28,6 +25,7 @@ envoy_mobile_android_test( "//library/kotlin/io/envoyproxy/envoymobile:envoy_lib", "//test/java/io/envoyproxy/envoymobile/engine/testing", "//test/java/io/envoyproxy/envoymobile/engine/testing:http_proxy_test_server_factory_lib", + "//test/java/io/envoyproxy/envoymobile/engine/testing:http_test_server_factory_lib", ], ) diff --git a/mobile/test/kotlin/integration/proxying/ProxyInfoIntentPerformHTTPRequestUsingProxyTest.kt b/mobile/test/kotlin/integration/proxying/ProxyInfoIntentPerformHTTPRequestUsingProxyTest.kt index 6b9a559598ed..e6a44edda8db 100644 --- a/mobile/test/kotlin/integration/proxying/ProxyInfoIntentPerformHTTPRequestUsingProxyTest.kt +++ b/mobile/test/kotlin/integration/proxying/ProxyInfoIntentPerformHTTPRequestUsingProxyTest.kt @@ -13,6 +13,7 @@ import io.envoyproxy.envoymobile.RequestHeadersBuilder import io.envoyproxy.envoymobile.RequestMethod import io.envoyproxy.envoymobile.engine.JniLibrary import io.envoyproxy.envoymobile.engine.testing.HttpProxyTestServerFactory +import io.envoyproxy.envoymobile.engine.testing.HttpTestServerFactory import java.util.concurrent.CountDownLatch import java.util.concurrent.Executors import java.util.concurrent.TimeUnit @@ -41,16 +42,19 @@ class ProxyInfoIntentPerformHTTPRequestUsingProxyTest { } private lateinit var httpProxyTestServer: HttpProxyTestServerFactory.HttpProxyTestServer + private lateinit var httpTestServer: HttpTestServerFactory.HttpTestServer @Before fun setUp() { httpProxyTestServer = HttpProxyTestServerFactory.start(HttpProxyTestServerFactory.Type.HTTP_PROXY) + httpTestServer = HttpTestServerFactory.start(HttpTestServerFactory.Type.HTTP1_WITHOUT_TLS) } @After fun tearDown() { httpProxyTestServer.shutdown() + httpTestServer.shutdown() } @Test @@ -86,8 +90,8 @@ class ProxyInfoIntentPerformHTTPRequestUsingProxyTest { RequestHeadersBuilder( method = RequestMethod.GET, scheme = "http", - authority = "api.lyft.com", - path = "/ping" + authority = httpTestServer.address, + path = "/" ) .build() @@ -96,7 +100,7 @@ class ProxyInfoIntentPerformHTTPRequestUsingProxyTest { .newStreamPrototype() .setOnResponseHeaders { responseHeaders, _, _ -> val status = responseHeaders.httpStatus ?: 0L - assertThat(status).isEqualTo(301) + assertThat(status).isEqualTo(200) assertThat(responseHeaders.value("x-proxy-response")).isEqualTo(listOf("true")) onResponseHeadersLatch.countDown() } diff --git a/mobile/test/kotlin/io/envoyproxy/envoymobile/mocks/MockEnvoyEngine.kt b/mobile/test/kotlin/io/envoyproxy/envoymobile/mocks/MockEnvoyEngine.kt index f39d033b5e13..542dfd2728fc 100644 --- a/mobile/test/kotlin/io/envoyproxy/envoymobile/mocks/MockEnvoyEngine.kt +++ b/mobile/test/kotlin/io/envoyproxy/envoymobile/mocks/MockEnvoyEngine.kt @@ -40,7 +40,11 @@ class MockEnvoyEngine : EnvoyEngine { override fun resetConnectivityState() = Unit - override fun setPreferredNetwork(network: EnvoyNetworkType) = Unit + override fun onDefaultNetworkAvailable() = Unit + + override fun onDefaultNetworkChanged(network: EnvoyNetworkType) = Unit + + override fun onDefaultNetworkUnavailable() = Unit override fun setProxySettings(host: String, port: Int) = Unit diff --git a/source/common/json/BUILD b/source/common/json/BUILD index d5d5af16dc51..9fd6cdf8a848 100644 --- a/source/common/json/BUILD +++ b/source/common/json/BUILD @@ -50,9 +50,18 @@ envoy_cc_library( name = "json_streamer_lib", hdrs = ["json_streamer.h"], deps = [ + ":constants_lib", ":json_sanitizer_lib", "//envoy/buffer:buffer_interface", "//source/common/buffer:buffer_util_lib", "//source/common/common:assert_lib", ], ) + +envoy_cc_library( + name = "constants_lib", + hdrs = ["constants.h"], + external_deps = [ + "abseil_strings", + ], +) diff --git a/source/common/json/constants.h b/source/common/json/constants.h new file mode 100644 index 000000000000..2207d6c7b5bb --- /dev/null +++ b/source/common/json/constants.h @@ -0,0 +1,17 @@ +#pragma once + +#include "absl/strings/string_view.h" + +namespace Envoy { +namespace Json { + +class Constants { +public: + // Constants for common JSON values. + static constexpr absl::string_view True = "true"; + static constexpr absl::string_view False = "false"; + static constexpr absl::string_view Null = "null"; +}; + +} // namespace Json +} // namespace Envoy diff --git a/source/common/json/json_sanitizer.cc b/source/common/json/json_sanitizer.cc index ab814c70e815..d45a7bdc8013 100644 --- a/source/common/json/json_sanitizer.cc +++ b/source/common/json/json_sanitizer.cc @@ -77,16 +77,26 @@ absl::string_view sanitize(std::string& buffer, absl::string_view str) { } END_TRY catch (std::exception&) { - // If Nlohmann throws an error, emit an octal escape for any character + // If Nlohmann throws an error, emit a hex escape for any character // requiring it. This can occur for invalid utf-8 sequences, and we don't // want to crash the server if such a sequence makes its way into a string // we need to serialize. For example, if admin endpoint /stats?format=json // is called, and a stat name was synthesized from dynamic content such as a // gRPC method. + // + // Note that JSON string escapes are always 4 digit hex. 3 digit octal would + // be more compact, and is legal JavaScript, but not legal JSON. See + // https://www.json.org/json-en.html for details. + // + // TODO(jmarantz): It would better to use the compact JSON escapes for + // quotes, slashes, backspace, form-feed, linefeed, CR, and tab, in which + // case we'd also need to modify jsonEquivalentStrings in + // test/common/json/json_sanitizer_test_util.h. We don't expect to hit this + // often, so it isn't a priority to use these more compact encodings. buffer.clear(); for (char c : str) { if (needs_slow_sanitizer[static_cast(c)]) { - buffer.append(absl::StrFormat("\\%03o", c)); + buffer.append(absl::StrFormat("\\u%04x", c)); } else { buffer.append(1, c); } diff --git a/source/common/json/json_streamer.h b/source/common/json/json_streamer.h index a1fd5f9dcb23..97d7ef424d54 100644 --- a/source/common/json/json_streamer.h +++ b/source/common/json/json_streamer.h @@ -9,6 +9,7 @@ #include "envoy/buffer/buffer.h" #include "source/common/buffer/buffer_util.h" +#include "source/common/json/constants.h" #include "source/common/json/json_sanitizer.h" #include "absl/strings/string_view.h" @@ -38,6 +39,18 @@ namespace Json { #define ASSERT_LEVELS_EMPTY ASSERT(this->levels_.empty()) #endif +// Simple abstraction that provide a output buffer for streaming JSON output. +class BufferOutput { +public: + void add(absl::string_view a) { buffer_.addFragments({a}); } + void add(absl::string_view a, absl::string_view b, absl::string_view c) { + buffer_.addFragments({a, b, c}); + } + + explicit BufferOutput(Buffer::Instance& output) : buffer_(output) {} + Buffer::Instance& buffer_; +}; + /** * Provides an API for streaming JSON output, as an alternative to populating a * JSON structure with an image of what you want to serialize, or using a @@ -143,7 +156,7 @@ class Streamer { void addString(absl::string_view str) { ASSERT_THIS_IS_TOP_LEVEL; nextField(); - streamer_.addSanitized("\"", str, "\""); + streamer_.addString(str); } /** @@ -243,7 +256,7 @@ class Streamer { ASSERT_THIS_IS_TOP_LEVEL; ASSERT(!expecting_value_); nextField(); - this->streamer_.addSanitized(R"(")", key, R"(":)"); + this->streamer_.addSanitized("\"", key, "\":"); expecting_value_ = true; } @@ -331,32 +344,39 @@ class Streamer { */ void addSanitized(absl::string_view prefix, absl::string_view token, absl::string_view suffix) { absl::string_view sanitized = Json::sanitize(sanitize_buffer_, token); - response_.addFragments({prefix, sanitized, suffix}); + response_.add(prefix, sanitized, suffix); } + /** + * Serializes a string to the output stream. The input string value will be sanitized and + * surrounded by quotes. + * @param str the string to be serialized. + */ + void addString(absl::string_view str) { addSanitized("\"", str, "\""); } + /** * Serializes a number. */ void addNumber(double d) { if (std::isnan(d)) { - response_.addFragments({"null"}); + response_.add(Constants::Null); } else { - Buffer::Util::serializeDouble(d, response_); + Buffer::Util::serializeDouble(d, response_.buffer_); } } - void addNumber(uint64_t u) { response_.addFragments({absl::StrCat(u)}); } - void addNumber(int64_t i) { response_.addFragments({absl::StrCat(i)}); } + void addNumber(uint64_t u) { response_.add(absl::StrCat(u)); } + void addNumber(int64_t i) { response_.add(absl::StrCat(i)); } /** * Serializes a bool to the output stream. */ - void addBool(bool b) { response_.addFragments({b ? "true" : "false"}); } + void addBool(bool b) { response_.add(b ? Constants::True : Constants::False); } /** * Adds a constant string to the output stream. The string must outlive the * Streamer object, and is intended for literal strings such as punctuation. */ - void addConstantString(absl::string_view str) { response_.addFragments({str}); } + void addConstantString(absl::string_view str) { response_.add(str); } #ifndef NDEBUG /** @@ -379,7 +399,7 @@ class Streamer { #endif - Buffer::Instance& response_; + BufferOutput response_; std::string sanitize_buffer_; #ifndef NDEBUG diff --git a/source/common/quic/envoy_quic_server_stream.cc b/source/common/quic/envoy_quic_server_stream.cc index 522c7209d4c8..331495df292b 100644 --- a/source/common/quic/envoy_quic_server_stream.cc +++ b/source/common/quic/envoy_quic_server_stream.cc @@ -92,6 +92,15 @@ void EnvoyQuicServerStream::encodeHeaders(const Http::ResponseHeaderMap& headers } void EnvoyQuicServerStream::encodeTrailers(const Http::ResponseTrailerMap& trailers) { + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.http3_remove_empty_trailers")) { + if (trailers.empty()) { + ENVOY_STREAM_LOG(debug, "skipping submitting empty trailers", *this); + // Instead of submitting empty trailers, we send empty data with end_stream=true instead. + Buffer::OwnedImpl empty_buffer; + encodeData(empty_buffer, true); + return; + } + } ENVOY_STREAM_LOG(debug, "encodeTrailers: {}.", *this, trailers); encodeTrailersImpl(envoyHeadersToHttp2HeaderBlock(trailers)); } diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index e1cc4cec1bf7..7f0378342a27 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -59,6 +59,7 @@ RUNTIME_GUARD(envoy_reloadable_features_http2_use_oghttp2); RUNTIME_GUARD(envoy_reloadable_features_http2_use_visitor_for_data); RUNTIME_GUARD(envoy_reloadable_features_http2_validate_authority_with_quiche); RUNTIME_GUARD(envoy_reloadable_features_http3_happy_eyeballs); +RUNTIME_GUARD(envoy_reloadable_features_http3_remove_empty_trailers); RUNTIME_GUARD(envoy_reloadable_features_http_filter_avoid_reentrant_local_reply); // Delay deprecation and decommission until UHV is enabled. RUNTIME_GUARD(envoy_reloadable_features_http_reject_path_with_fragment); diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache.h b/source/extensions/common/dynamic_forward_proxy/dns_cache.h index 50e2b0cbb486..d656efce9cec 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache.h @@ -278,6 +278,13 @@ class DnsCache { * addresses. */ virtual void setIpVersionToRemove(absl::optional ip_version) PURE; + + /** + * Stops the DNS cache background tasks by canceling the pending queries and stopping the timeout + * and refresh timers. This function can be useful when the network is unavailable, such as when + * a device is in airplane mode, etc. + */ + virtual void stop() PURE; }; using DnsCacheSharedPtr = std::shared_ptr; diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index 494df335ea64..b06e8ab4aee1 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -337,6 +337,27 @@ void DnsCacheImpl::setIpVersionToRemove(absl::optionalresetNetworking(); + + absl::ReaderMutexLock reader_lock{&primary_hosts_lock_}; + for (auto& primary_host : primary_hosts_) { + if (primary_host.second->active_query_ != nullptr) { + primary_host.second->active_query_->cancel( + Network::ActiveDnsQuery::CancelReason::QueryAbandoned); + primary_host.second->active_query_ = nullptr; + } + + primary_host.second->timeout_timer_->disableTimer(); + ASSERT(!primary_host.second->timeout_timer_->enabled()); + primary_host.second->refresh_timer_->disableTimer(); + ENVOY_LOG_EVENT(debug, "stop_host", "stop host='{}'", primary_host.first); + } +} + void DnsCacheImpl::startResolve(const std::string& host, PrimaryHostInfo& host_info) { ENVOY_LOG(debug, "starting main thread resolve for host='{}' dns='{}' port='{}' timeout='{}'", host, host_info.host_info_->resolvedHost(), host_info.port_, timeout_interval_.count()); diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h index a521d14ec458..fe8d6819447d 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.h @@ -70,6 +70,7 @@ class DnsCacheImpl : public DnsCache, Logger::Loggable ip_version) override; + void stop() override; private: DnsCacheImpl(Server::Configuration::GenericFactoryContext& context, diff --git a/source/extensions/dynamic_modules/sdk/README.md b/source/extensions/dynamic_modules/sdk/README.md new file mode 100644 index 000000000000..64d61baa62f2 --- /dev/null +++ b/source/extensions/dynamic_modules/sdk/README.md @@ -0,0 +1,4 @@ +## Dynamic Modules SDKs + +This directory contains the SDKs for the Dynamic Modules feature. Each SDK passes the same set of tests and +is guaranteed to provide the same functionality. diff --git a/source/extensions/dynamic_modules/sdk/rust/.gitignore b/source/extensions/dynamic_modules/sdk/rust/.gitignore new file mode 100644 index 000000000000..ea8c4bf7f35f --- /dev/null +++ b/source/extensions/dynamic_modules/sdk/rust/.gitignore @@ -0,0 +1 @@ +/target diff --git a/source/extensions/dynamic_modules/sdk/rust/BUILD b/source/extensions/dynamic_modules/sdk/rust/BUILD new file mode 100644 index 000000000000..480a9432eb58 --- /dev/null +++ b/source/extensions/dynamic_modules/sdk/rust/BUILD @@ -0,0 +1,15 @@ +load("@rules_rust//rust:defs.bzl", "rust_library") +load( + "//bazel:envoy_build_system.bzl", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +rust_library( + name = "envoy_proxy_dynamic_modules_rust_sdk", + srcs = glob(["src/**/*.rs"]), + edition = "2021", +) diff --git a/source/extensions/dynamic_modules/sdk/rust/Cargo.lock b/source/extensions/dynamic_modules/sdk/rust/Cargo.lock new file mode 100644 index 000000000000..f6f126e02005 --- /dev/null +++ b/source/extensions/dynamic_modules/sdk/rust/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "envoy-proxy-dynamic-modules-rust-sdk" +version = "0.1.0" diff --git a/source/extensions/dynamic_modules/sdk/rust/Cargo.toml b/source/extensions/dynamic_modules/sdk/rust/Cargo.toml new file mode 100644 index 000000000000..3874a8a3301b --- /dev/null +++ b/source/extensions/dynamic_modules/sdk/rust/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "envoy-proxy-dynamic-modules-rust-sdk" +version = "0.1.0" +edition = "2021" +authors = ["Envoy Proxy Authors "] +description = "Envoy Proxy Dynamic Modules Rust SDK" +license = "Apache-2.0" +repository = "https://github.com/envoyproxy/envoy" + +[dependencies] + +[lib] diff --git a/source/extensions/dynamic_modules/sdk/rust/README.md b/source/extensions/dynamic_modules/sdk/rust/README.md new file mode 100644 index 000000000000..9470ff7b65df --- /dev/null +++ b/source/extensions/dynamic_modules/sdk/rust/README.md @@ -0,0 +1,7 @@ +# Envoy Dynamic Modules Rust SDK + +This directory contains the Rust SDK for the Dynamic Modules feature. The SDK passes the same set of tests and is guaranteed to provide the same functionality as the other SDKs. This directory is organized in the way that it can be used as a standalone Rust crate. The SDK is basically the high-level abstraction layer for the Dynamic Modules ABI defined in the [abi.h](../../abi.h). + +Currently, the ABI binding ([src/abi.rs](./src/abi.rs)) is manually generated by [`bindgen`](https://github.com/rust-lang/rust-bindgen) for the ABI header. Ideally, we should be able to do the bindgen in the build.rs file. + +TODO(@mathetake): figure out how to properly setup the bindgen in build.rs with rules_rust. The most recommended way is to use [crate_universe](https://bazelbuild.github.io/rules_rust/crate_universe.html#setup) and use bindgen as a dev-dependency. However, it seems that crate_universe tries to use the underlying gcc system linker which we cannot assume always available. diff --git a/source/extensions/dynamic_modules/sdk/rust/src/abi.rs b/source/extensions/dynamic_modules/sdk/rust/src/abi.rs new file mode 100644 index 000000000000..d673d5b2c599 --- /dev/null +++ b/source/extensions/dynamic_modules/sdk/rust/src/abi.rs @@ -0,0 +1,17 @@ +/* automatically generated by rust-bindgen 0.70.1 */ + +pub type wchar_t = ::std::os::raw::c_int; +#[repr(C)] +#[repr(align(16))] +#[derive(Debug, Copy, Clone)] +pub struct max_align_t { + pub __clang_max_align_nonce1: ::std::os::raw::c_longlong, + pub __bindgen_padding_0: u64, + pub __clang_max_align_nonce2: u128, +} +#[doc = " envoy_dynamic_module_type_abi_version represents a null-terminated string that contains the ABI\n version of the dynamic module. This is used to ensure that the dynamic module is built against\n the compatible version of the ABI."] +pub type envoy_dynamic_module_type_abi_version = *const ::std::os::raw::c_char; +extern "C" { + #[doc = " envoy_dynamic_module_on_program_init is called by the main thread exactly when the module is\n loaded. The function returns the ABI version of the dynamic module. If null is returned, the\n module will be unloaded immediately.\n\n For Envoy, the return value will be used to check the compatibility of the dynamic module.\n\n For dynamic modules, this is useful when they need to perform some process-wide\n initialization or check if the module is compatible with the platform, such as CPU features.\n Note that initialization routines of a dynamic module can also be performed without this function\n through constructor functions in an object file. However, normal constructors cannot be used\n to check compatibility and gracefully fail the initialization because there is no way to\n report an error to Envoy.\n\n @return envoy_dynamic_module_type_abi_version is the ABI version of the dynamic module. Null\n means the error and the module will be unloaded immediately."] + pub fn envoy_dynamic_module_on_program_init() -> envoy_dynamic_module_type_abi_version; +} diff --git a/source/extensions/dynamic_modules/sdk/rust/src/lib.rs b/source/extensions/dynamic_modules/sdk/rust/src/lib.rs new file mode 100644 index 000000000000..e8aa578d6c95 --- /dev/null +++ b/source/extensions/dynamic_modules/sdk/rust/src/lib.rs @@ -0,0 +1,40 @@ +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(dead_code)] + +mod abi; + +/// Declare the init function for the dynamic module. This function is called when the dynamic module is loaded. +/// The function must return true on success, and false on failure. When it returns false, +/// the dynamic module will not be loaded. +/// +/// This is useful to perform any process-wide initialization that the dynamic module needs. +/// +/// # Example +/// +/// ``` +/// use envoy_proxy_dynamic_modules_rust_sdk::declare_program_init; +/// +/// declare_program_init!(my_program_init); +/// +/// fn my_program_init() -> bool { +/// true +/// } +/// ``` +#[macro_export] +macro_rules! declare_program_init { + ($f:ident) => { + #[no_mangle] + pub extern "C" fn envoy_dynamic_module_on_program_init() -> *const ::std::os::raw::c_char { + if ($f()) { + // This magic number is sha256 of the ABI headers which must match the + // value in abi_version.h + b"749b1e6bf97309b7d171009700a80e651ac61e35f9770c24a63460d765895a51\0".as_ptr() + as *const ::std::os::raw::c_char + } else { + ::std::ptr::null() + } + } + }; +} diff --git a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc index 8ddb89670ea7..58ef86e3591f 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.cc @@ -63,7 +63,7 @@ bool HttpBodyUtils::parseMessageByFieldPath( } void HttpBodyUtils::appendHttpBodyEnvelope( - Buffer::Instance& output, const std::vector& request_body_field_path, + Buffer::Instance& output, const std::vector& request_body_field_path, std::string content_type, uint64_t content_length, const UnknownQueryParams& unknown_params) { // Manually encode the protobuf envelope for the body. // See https://developers.google.com/protocol-buffers/docs/encoding#embedded for wire format. diff --git a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h index a0bd96aea8e9..37b550bde77a 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h +++ b/source/extensions/filters/http/grpc_json_transcoder/http_body_utils.h @@ -18,7 +18,8 @@ class HttpBodyUtils { const std::vector& field_path, Protobuf::Message* message); static void appendHttpBodyEnvelope( - Buffer::Instance& output, const std::vector& request_body_field_path, + Buffer::Instance& output, + const std::vector& request_body_field_path, std::string content_type, uint64_t content_length, const envoy::extensions::filters::http::grpc_json_transcoder::v3::UnknownQueryParams& unknown_params); diff --git a/source/extensions/filters/http/rate_limit_quota/client_impl.cc b/source/extensions/filters/http/rate_limit_quota/client_impl.cc index 85fcab8725de..92390ea8a302 100644 --- a/source/extensions/filters/http/rate_limit_quota/client_impl.cc +++ b/source/extensions/filters/http/rate_limit_quota/client_impl.cc @@ -85,7 +85,7 @@ void RateLimitClientImpl::onReceiveMessage(RateLimitQuotaResponsePtr&& response) // Get the hash id value from BucketId in the response. const size_t bucket_id = MessageUtil::hash(action.bucket_id()); - ENVOY_LOG(trace, + ENVOY_LOG(debug, "Received a response for bucket id proto :\n {}, and generated " "the associated hashed bucket id: {}", action.bucket_id().DebugString(), bucket_id); @@ -97,10 +97,11 @@ void RateLimitClientImpl::onReceiveMessage(RateLimitQuotaResponsePtr&& response) switch (action.bucket_action_case()) { case envoy::service::rate_limit_quota::v3::RateLimitQuotaResponse_BucketAction:: kQuotaAssignmentAction: { - quota_buckets_[bucket_id]->bucket_action = action; - if (quota_buckets_[bucket_id]->bucket_action.has_quota_assignment_action()) { + quota_buckets_[bucket_id]->cached_action = action; + quota_buckets_[bucket_id]->current_assignment_time = time_source_.monotonicTime(); + if (quota_buckets_[bucket_id]->cached_action->has_quota_assignment_action()) { auto rate_limit_strategy = quota_buckets_[bucket_id] - ->bucket_action.quota_assignment_action() + ->cached_action->quota_assignment_action() .rate_limit_strategy(); if (rate_limit_strategy.has_token_bucket()) { @@ -127,6 +128,7 @@ void RateLimitClientImpl::onReceiveMessage(RateLimitQuotaResponsePtr&& response) case envoy::service::rate_limit_quota::v3::RateLimitQuotaResponse_BucketAction:: kAbandonAction: { quota_buckets_.erase(bucket_id); + ENVOY_LOG(debug, "Bucket id {} removed from the cache by abandon action.", bucket_id); break; } default: { @@ -136,6 +138,7 @@ void RateLimitClientImpl::onReceiveMessage(RateLimitQuotaResponsePtr&& response) } } } + ENVOY_LOG(debug, "Assignment cached for bucket id {}.", bucket_id); } // `rlqs_callback_` has been reset to nullptr for periodical report case. diff --git a/source/extensions/filters/http/rate_limit_quota/filter.cc b/source/extensions/filters/http/rate_limit_quota/filter.cc index 7cbb29fcdb4f..65535753c3e3 100644 --- a/source/extensions/filters/http/rate_limit_quota/filter.cc +++ b/source/extensions/filters/http/rate_limit_quota/filter.cc @@ -9,6 +9,8 @@ namespace Extensions { namespace HttpFilters { namespace RateLimitQuota { +using envoy::type::v3::RateLimitStrategy; + const char kBucketMetadataNamespace[] = "envoy.extensions.http_filters.rate_limit_quota.bucket"; Http::FilterHeadersStatus RateLimitQuotaFilter::decodeHeaders(Http::RequestHeaderMap& headers, @@ -23,7 +25,7 @@ Http::FilterHeadersStatus RateLimitQuotaFilter::decodeHeaders(Http::RequestHeade // allowed/denied, matching succeed/fail and so on. ENVOY_LOG(debug, "The request is not matched by any matchers: ", match_result.status().message()); - return Envoy::Http::FilterHeadersStatus::Continue; + return sendAllowResponse(); } // Second, generate the bucket id for this request based on match action when the request matching @@ -36,7 +38,7 @@ Http::FilterHeadersStatus RateLimitQuotaFilter::decodeHeaders(Http::RequestHeade // When it failed to generate the bucket id for this specific request, the request is ALLOWED by // default (i.e., fail-open). ENVOY_LOG(debug, "Unable to generate the bucket id: {}", ret.status().message()); - return Envoy::Http::FilterHeadersStatus::Continue; + return sendAllowResponse(); } const BucketId& bucket_id_proto = *ret; @@ -123,34 +125,36 @@ void RateLimitQuotaFilter::createNewBucket(const BucketId& bucket_id, // Create new bucket and store it into quota cache. std::unique_ptr new_bucket = std::make_unique(); - // The first matched request doesn't have quota assignment from the RLQS server yet, so the - // action is performed based on pre-configured strategy from no assignment behavior config. + // The first matched request doesn't have quota assignment from the RLQS + // server yet, so the action is performed based on pre-configured strategy + // from no assignment behavior config. auto mutable_rate_limit_strategy = - new_bucket->bucket_action.mutable_quota_assignment_action()->mutable_rate_limit_strategy(); + new_bucket->default_action.mutable_quota_assignment_action()->mutable_rate_limit_strategy(); if (match_action.bucketSettings().has_no_assignment_behavior()) { *mutable_rate_limit_strategy = match_action.bucketSettings().no_assignment_behavior().fallback_rate_limit(); } else { - // When `no_assignment_behavior` is not configured, default blanket rule is set to ALLOW_ALL. - // (i.e., fail-open). - mutable_rate_limit_strategy->set_blanket_rule(envoy::type::v3::RateLimitStrategy::ALLOW_ALL); + // When `no_assignment_behavior` is not configured, default blanket rule is + // set to ALLOW_ALL. (i.e., fail-open). + mutable_rate_limit_strategy->set_blanket_rule(RateLimitStrategy::ALLOW_ALL); } // Set up the bucket id. new_bucket->bucket_id = bucket_id; - // Set up the first time assignment time. - new_bucket->first_assignment_time = time_source_.monotonicTime(); + // Mark the assignment time. + auto now = time_source_.monotonicTime(); + new_bucket->current_assignment_time = now; // Set up the quota usage. QuotaUsage quota_usage; - quota_usage.last_report = std::chrono::duration_cast( - time_source_.monotonicTime().time_since_epoch()); + quota_usage.last_report = + std::chrono::duration_cast(now.time_since_epoch()); switch (mutable_rate_limit_strategy->blanket_rule()) { PANIC_ON_PROTO_ENUM_SENTINEL_VALUES; - case envoy::type::v3::RateLimitStrategy::ALLOW_ALL: + case RateLimitStrategy::ALLOW_ALL: quota_usage.num_requests_allowed++; break; - case envoy::type::v3::RateLimitStrategy::DENY_ALL: + case RateLimitStrategy::DENY_ALL: quota_usage.num_requests_denied++; break; } @@ -159,6 +163,8 @@ void RateLimitQuotaFilter::createNewBucket(const BucketId& bucket_id, quota_buckets_[id] = std::move(new_bucket); } +// This function should not update QuotaUsage as that will have been handled +// when constructing the Report before this function is called. Http::FilterHeadersStatus RateLimitQuotaFilter::sendImmediateReport(const size_t bucket_id, const RateLimitOnMatchAction& match_action) { @@ -183,10 +189,9 @@ RateLimitQuotaFilter::sendImmediateReport(const size_t bucket_id, if (!status.ok()) { ENVOY_LOG(error, "Failed to start the gRPC stream: ", status.message()); // TODO(tyxia) Check `NoAssignmentBehavior` behavior instead of fail-open here. - return Envoy::Http::FilterHeadersStatus::Continue; - } else { - ENVOY_LOG(debug, "The gRPC stream is established and active"); + return sendAllowResponse(); } + ENVOY_LOG(debug, "The gRPC stream is established and active"); // Send the usage report to RLQS server immediately on the first time when the request is // matched. @@ -202,103 +207,172 @@ RateLimitQuotaFilter::sendImmediateReport(const size_t bucket_id, // bucket_matchers for the first time) should be already set based on no assignment behavior in // `createNewBucket` when the bucket is initially created. ASSERT(quota_buckets_.find(bucket_id) != quota_buckets_.end()); - if (quota_buckets_[bucket_id] - ->bucket_action.quota_assignment_action() - .rate_limit_strategy() - .blanket_rule() == envoy::type::v3::RateLimitStrategy::ALLOW_ALL) { - ENVOY_LOG( - trace, - "For first matched request with hashed bucket_id {}, it is allowed by ALLOW_ALL strategy.", - bucket_id); - return Http::FilterHeadersStatus::Continue; - } else { - // For the request that is rejected due to DENY_ALL no_assignment_behavior, immediate report is - // still sent to RLQS server above, and here the local reply with deny response is sent. - ENVOY_LOG( - trace, - "For first matched request with hashed bucket_id {}, it is throttled by DENY_ALL strategy.", - bucket_id); - sendDenyResponse(); - return Envoy::Http::FilterHeadersStatus::StopIteration; + // If not given a default blanket rule, the first matched request is allowed. + if (!quota_buckets_[bucket_id]->default_action.has_quota_assignment_action() || + !quota_buckets_[bucket_id] + ->default_action.quota_assignment_action() + .has_rate_limit_strategy() || + !quota_buckets_[bucket_id] + ->default_action.quota_assignment_action() + .rate_limit_strategy() + .has_blanket_rule()) { + ENVOY_LOG(trace, "Without a default blanket rule configured, the first matched " + "request with hashed bucket_id {} is allowed through."); + ENVOY_LOG(debug, "Default action for bucket_id {} does not contain a blanket action: {}", + bucket_id, quota_buckets_[bucket_id]->default_action.DebugString()); + return sendAllowResponse(); } + auto blanket_rule = quota_buckets_[bucket_id] + ->default_action.quota_assignment_action() + .rate_limit_strategy() + .blanket_rule(); + if (blanket_rule == RateLimitStrategy::DENY_ALL) { + // For the request that is rejected due to DENY_ALL + // no_assignment_behavior, immediate report is still sent to RLQS server + // above, and here the local reply with deny response is sent. + ENVOY_LOG(trace, + "For first matched request with hashed bucket_id {}, it is " + "throttled by DENY_ALL strategy.", + bucket_id); + ENVOY_LOG(debug, "Hit configured default DENY_ALL for bucket_id {}", bucket_id); + return sendDenyResponse(); + } + + ENVOY_LOG(trace, + "For first matched request with hashed bucket_id {}, it is " + "allowed by the configured default ALLOW_ALL strategy.", + bucket_id); + ENVOY_LOG(debug, "Hit configured default ALLOW_ALL for bucket_id {}", bucket_id); + return sendAllowResponse(); } Http::FilterHeadersStatus -RateLimitQuotaFilter::processCachedBucket(size_t bucket_id, - const RateLimitOnMatchAction& match_action) { +RateLimitQuotaFilter::setUsageAndResponseFromAction(const BucketAction& action, + const size_t bucket_id) { + if (!action.has_quota_assignment_action() || + !action.quota_assignment_action().has_rate_limit_strategy()) { + ENVOY_LOG(debug, + "Selected bucket action defaulting to ALLOW_ALL as it does not " + "have an assignment for bucket_id {}", + bucket_id); + return sendAllowResponse("a_buckets_[bucket_id]->quota_usage); + } + + // TODO(tyxia) Currently, blanket rule and token bucket strategies are + // implemented. Change to switch case when `RequestsPerTimeUnit` strategy is + // implemented. + auto rate_limit_strategy = action.quota_assignment_action().rate_limit_strategy(); + if (rate_limit_strategy.has_blanket_rule()) { + bool allow = (rate_limit_strategy.blanket_rule() != RateLimitStrategy::DENY_ALL); + ENVOY_LOG(trace, "Request with hashed bucket_id {} is {} by the selected blanket rule.", + bucket_id, allow ? "allowed" : "denied"); + if (allow) { + return sendAllowResponse("a_buckets_[bucket_id]->quota_usage); + } + return sendDenyResponse("a_buckets_[bucket_id]->quota_usage); + } + + if (rate_limit_strategy.has_token_bucket()) { + auto token_bucket = quota_buckets_[bucket_id]->token_bucket_limiter.get(); + ASSERT(token_bucket); + + // Try to consume 1 token from the bucket. + if (token_bucket->consume(1, /*allow_partial=*/false)) { + // Request is allowed. + ENVOY_LOG(trace, + "Request with hashed bucket_id {} is allowed by token bucket " + "limiter.", + bucket_id); + ENVOY_LOG(debug, + "Allowing request as token bucket is not empty for bucket_id " + "{}. Initial assignment: {}.", + bucket_id, rate_limit_strategy.token_bucket().ShortDebugString()); + return sendAllowResponse("a_buckets_[bucket_id]->quota_usage); + } + // Request is throttled. + ENVOY_LOG(trace, + "Request with hashed bucket_id {} is throttled by token " + "bucket limiter", + bucket_id); + ENVOY_LOG(debug, + "Denying request as token bucket is exhausted for bucket_id {}. " + "Initial assignment: {}.", + bucket_id, rate_limit_strategy.token_bucket().ShortDebugString()); + return sendDenyResponse("a_buckets_[bucket_id]->quota_usage); + } + + ENVOY_LOG(error, + "Failing open as selected bucket action for bucket_id {} contains " + "an unsupported rate limit strategy: {}", + bucket_id, rate_limit_strategy.DebugString()); + return sendAllowResponse("a_buckets_[bucket_id]->quota_usage); +} + +bool isCachedActionExpired(TimeSource& time_source, const Bucket& bucket) { // First, check if assignment has expired nor not. auto now = std::chrono::duration_cast( - time_source_.monotonicTime().time_since_epoch()); + time_source.monotonicTime().time_since_epoch()); auto assignment_time_elapsed = Protobuf::util::TimeUtil::NanosecondsToDuration( (now - std::chrono::duration_cast( - quota_buckets_[bucket_id]->first_assignment_time.time_since_epoch())) + bucket.current_assignment_time.time_since_epoch())) .count()); - if (assignment_time_elapsed > quota_buckets_[bucket_id] - ->bucket_action.quota_assignment_action() - .assignment_time_to_live()) { - // If expired, remove the cache entry. - quota_buckets_.erase(bucket_id); - - // Default strategy is fail-Open (i.e., allow_all). - auto ret_status = Envoy::Http::FilterHeadersStatus::Continue; - // Check the expired assignment behavior if configured. - // Note, only fail-open and fail-close are supported, more advanced expired assignment can be - // supported as needed. - if (match_action.bucketSettings().has_expired_assignment_behavior()) { - if (match_action.bucketSettings() - .expired_assignment_behavior() - .fallback_rate_limit() - .blanket_rule() == envoy::type::v3::RateLimitStrategy::DENY_ALL) { - sendDenyResponse(); - ret_status = Envoy::Http::FilterHeadersStatus::StopIteration; - } - } + return (assignment_time_elapsed > + bucket.cached_action->quota_assignment_action().assignment_time_to_live()); +} + +Http::FilterHeadersStatus +RateLimitQuotaFilter::processCachedBucket(size_t bucket_id, + const RateLimitOnMatchAction& match_action) { + auto* cached_bucket = quota_buckets_[bucket_id].get(); + + // If no cached action, use the default action. + if (!cached_bucket->cached_action.has_value()) { + return setUsageAndResponseFromAction(cached_bucket->default_action, bucket_id); + } + + // If expired, remove the expired action & fallback. + if (isCachedActionExpired(time_source_, *cached_bucket)) { + Http::FilterHeadersStatus ret_status = processExpiredBucket(bucket_id, match_action); + cached_bucket->cached_action = std::nullopt; return ret_status; } - // Second, get the quota assignment (if exists) from the cached bucket action. - if (quota_buckets_[bucket_id]->bucket_action.has_quota_assignment_action()) { - auto rate_limit_strategy = - quota_buckets_[bucket_id]->bucket_action.quota_assignment_action().rate_limit_strategy(); - - // TODO(tyxia) Currently, blanket rule and token bucket strategies are implemented. - // Change to switch case when `RequestsPerTimeUnit` strategy is implemented. - if (rate_limit_strategy.has_blanket_rule()) { - if (rate_limit_strategy.blanket_rule() == envoy::type::v3::RateLimitStrategy::ALLOW_ALL) { - quota_buckets_[bucket_id]->quota_usage.num_requests_allowed += 1; - ENVOY_LOG(trace, - "Request with hashed bucket_id {} is allowed by cached ALLOW_ALL strategy.", - bucket_id); - } else if (rate_limit_strategy.blanket_rule() == - envoy::type::v3::RateLimitStrategy::DENY_ALL) { - quota_buckets_[bucket_id]->quota_usage.num_requests_denied += 1; - ENVOY_LOG(trace, - "Request with hashed bucket_id {} is throttled by cached DENY_ALL strategy.", - bucket_id); - sendDenyResponse(); - return Envoy::Http::FilterHeadersStatus::StopIteration; - } - } else if (rate_limit_strategy.has_token_bucket()) { - ASSERT(quota_buckets_[bucket_id]->token_bucket_limiter != nullptr); - TokenBucket* limiter = quota_buckets_[bucket_id]->token_bucket_limiter.get(); - // Try to consume 1 token from the bucket. - if (limiter->consume(1, /*allow_partial=*/false)) { - // Request is allowed. - quota_buckets_[bucket_id]->quota_usage.num_requests_allowed += 1; - ENVOY_LOG(trace, "Request with hashed bucket_id {} is allowed by token bucket limiter.", - bucket_id); - } else { - // Request is throttled. - quota_buckets_[bucket_id]->quota_usage.num_requests_denied += 1; - ENVOY_LOG(trace, "Request with hashed bucket_id {} is throttled by token bucket limiter", - bucket_id); - sendDenyResponse(); - return Envoy::Http::FilterHeadersStatus::StopIteration; - } - } + // If not expired, use the cached action. + return setUsageAndResponseFromAction(*cached_bucket->cached_action, bucket_id); +} + +// Note: does not remove the expired entity from the cache. +Http::FilterHeadersStatus +RateLimitQuotaFilter::processExpiredBucket(size_t bucket_id, + const RateLimitOnMatchAction& match_action) { + auto* cached_bucket = quota_buckets_[bucket_id].get(); + + if (!match_action.bucketSettings().has_expired_assignment_behavior() || + !match_action.bucketSettings().expired_assignment_behavior().has_fallback_rate_limit()) { + ENVOY_LOG(debug, + "Selecting default action for bucket_id as expiration " + "fallback assignment doesn't have a configured override {}", + match_action.bucketSettings().expired_assignment_behavior().DebugString()); + return setUsageAndResponseFromAction(cached_bucket->default_action, bucket_id); } - return Envoy::Http::FilterHeadersStatus::Continue; + + const RateLimitStrategy& fallback_rate_limit = + match_action.bucketSettings().expired_assignment_behavior().fallback_rate_limit(); + if (fallback_rate_limit.has_blanket_rule() && + fallback_rate_limit.blanket_rule() == RateLimitStrategy::DENY_ALL) { + ENVOY_LOG(debug, + "Exipred action falling back to configured DENY_ALL for " + "bucket_id {}", + bucket_id); + return sendDenyResponse(&cached_bucket->quota_usage); + } + + ENVOY_LOG(debug, + "Exipred action falling back to ALLOW_ALL for bucket_id {} with " + "fallback action {}", + bucket_id, fallback_rate_limit.DebugString()); + return sendAllowResponse(&cached_bucket->quota_usage); } } // namespace RateLimitQuota diff --git a/source/extensions/filters/http/rate_limit_quota/filter.h b/source/extensions/filters/http/rate_limit_quota/filter.h index e1302214affb..d6259999bf5b 100644 --- a/source/extensions/filters/http/rate_limit_quota/filter.h +++ b/source/extensions/filters/http/rate_limit_quota/filter.h @@ -93,14 +93,34 @@ class RateLimitQuotaFilter : public Http::PassThroughFilter, Http::FilterHeadersStatus sendImmediateReport(const size_t bucket_id, const RateLimitOnMatchAction& match_action); + Http::FilterHeadersStatus setUsageAndResponseFromAction(const BucketAction& action, + size_t bucket_id); + Http::FilterHeadersStatus processCachedBucket(size_t bucket_id, const RateLimitOnMatchAction& match_action); // TODO(tyxia) Build the customized response based on `DenyResponseSettings`. - void sendDenyResponse() { + // Send a deny response and update quota usage if provided. + Http::FilterHeadersStatus sendDenyResponse(QuotaUsage* quota_usage = nullptr) { callbacks_->sendLocalReply(Envoy::Http::Code::TooManyRequests, "", nullptr, absl::nullopt, ""); callbacks_->streamInfo().setResponseFlag(StreamInfo::CoreResponseFlag::RateLimited); + if (quota_usage) + quota_usage->num_requests_denied++; + return Http::FilterHeadersStatus::StopIteration; + } + + // Send an allow response and update quota usage if provided. + Http::FilterHeadersStatus sendAllowResponse(QuotaUsage* quota_usage = nullptr) { + if (quota_usage) + quota_usage->num_requests_allowed++; + return Http::FilterHeadersStatus::Continue; } + // Get the FilterHeadersStatus to return when a selected bucket has an expired + // assignment. Note: this does not actually remove the expired entity from the + // cache. + Http::FilterHeadersStatus processExpiredBucket(size_t bucket_id, + const RateLimitOnMatchAction& match_action); + FilterConfigConstSharedPtr config_; Grpc::GrpcServiceConfigWithHashKey config_with_hash_key_; Server::Configuration::FactoryContext& factory_context_; diff --git a/source/extensions/filters/http/rate_limit_quota/quota_bucket_cache.h b/source/extensions/filters/http/rate_limit_quota/quota_bucket_cache.h index e2923b9156d2..1896a1e24983 100644 --- a/source/extensions/filters/http/rate_limit_quota/quota_bucket_cache.h +++ b/source/extensions/filters/http/rate_limit_quota/quota_bucket_cache.h @@ -41,13 +41,17 @@ struct Bucket { // RLQS server. BucketId bucket_id; // Cached action from the response that was received from the RLQS server. - BucketAction bucket_action; + absl::optional cached_action = absl::nullopt; + // Default action defined by the bucket's no_assignment_behavior setting. Used + // when the bucket is waiting for an assigned action from the RLQS server + // (e.g. during initial bucket hits & after stale assignments expire). + BucketAction default_action; // Cache quota usage. QuotaUsage quota_usage; // Rate limiter based on token bucket algorithm. TokenBucketPtr token_bucket_limiter; - // First assignment time. - Envoy::MonotonicTime first_assignment_time = {}; + // Most recent assignment time. + Envoy::MonotonicTime current_assignment_time; }; using BucketsCache = absl::flat_hash_map>; diff --git a/source/extensions/http/header_validators/envoy_default/header_validator.cc b/source/extensions/http/header_validators/envoy_default/header_validator.cc index f0104cd9e0dc..9d1a848849b0 100644 --- a/source/extensions/http/header_validators/envoy_default/header_validator.cc +++ b/source/extensions/http/header_validators/envoy_default/header_validator.cc @@ -282,8 +282,6 @@ HeaderValidator::validateHostHeader(const HeaderString& value) { // // Host = uri-host [ ":" port ] // uri-host = IP-literal / IPv4address / reg-name - // - // TODO(#22859, #23314) - Fully implement IPv6 address validation const auto host = value.getStringView(); if (host.empty()) { return {HeaderValueValidationResult::Action::Reject, UhvResponseCodeDetail::get().InvalidHost}; @@ -351,12 +349,46 @@ HeaderValidator::validateHostHeaderIPv6(absl::string_view host) { // Get the trailing port substring const auto port_string = host.substr(closing_bracket + 1); // Validate the IPv6 address characters - bool is_valid = !address.empty(); - for (auto iter = address.begin(); iter != address.end() && is_valid; ++iter) { - is_valid &= testCharInTable(kHostIPv6AddressCharTable, *iter); + if (address.empty()) { + return HostHeaderValidationResult::reject(UhvResponseCodeDetail::get().InvalidHost); + } + if (address == "::") { + return HostHeaderValidationResult::success(address, port_string); + } + // Split address by (:) and validate: + // 1. there are no more than 8 parts + // 2. each part has only hex digit and is 16-bit + // 3. only one double colon is allowed + absl::InlinedVector address_components = absl::StrSplit(address, ':'); + if (address_components.size() > 8) { + return HostHeaderValidationResult::reject(UhvResponseCodeDetail::get().InvalidHost); + } + uint32_t empty_string_count = 0; + for (absl::string_view cur_component : address_components) { + // each part must be 16 bits + if (cur_component.size() > 4) { + return HostHeaderValidationResult::reject(UhvResponseCodeDetail::get().InvalidHost); + } + if (cur_component.empty()) { + empty_string_count++; + continue; + } + // Validate each char is hex digit + for (char c : cur_component) { + if (!testCharInTable(kHostIPv6AddressCharTable, c)) { + return HostHeaderValidationResult::reject(UhvResponseCodeDetail::get().InvalidHost); + } + } + } + // The address should never have more than 2 empty parts, except "::" + if (empty_string_count >= 3) { + return HostHeaderValidationResult::reject(UhvResponseCodeDetail::get().InvalidHost); } - if (!is_valid) { + // Double colon is allowed at the beginning or end + // Otherwise the address shouldn't have two empty parts + if (empty_string_count == 2 && + !(absl::StartsWith(address, "::") || absl::EndsWith(address, "::"))) { return HostHeaderValidationResult::reject(UhvResponseCodeDetail::get().InvalidHost); } diff --git a/test/common/http/conn_manager_impl_test_2.cc b/test/common/http/conn_manager_impl_test_2.cc index f7a2b55d1246..fbc37cddee1b 100644 --- a/test/common/http/conn_manager_impl_test_2.cc +++ b/test/common/http/conn_manager_impl_test_2.cc @@ -3975,9 +3975,10 @@ TEST_F(HttpConnectionManagerImplTest, HeaderValidatorRejectTrailersBeforeRespons TEST_F(HttpConnectionManagerImplTest, HeaderValidatorFailTrailersTransformationBeforeResponse) { codec_->protocol_ = Protocol::Http11; setup(); - expectUhvTrailerCheck(HeaderValidator::ValidationResult( - HeaderValidator::ValidationResult::Action::Reject, "bad_trailer_map"), - HeaderValidator::TransformationResult::success()); + expectUhvTrailerCheck( + HeaderValidator::ValidationResult::success(), + HeaderValidator::TransformationResult(HeaderValidator::TransformationResult::Action::Reject, + "bad_trailer_map")); EXPECT_CALL(*codec_, dispatch(_)).WillOnce(Invoke([&](Buffer::Instance& data) -> Http::Status { decoder_ = &conn_manager_->newStream(response_encoder_); diff --git a/test/common/json/json_sanitizer_test.cc b/test/common/json/json_sanitizer_test.cc index 23222b72930a..14434eefe60f 100644 --- a/test/common/json/json_sanitizer_test.cc +++ b/test/common/json/json_sanitizer_test.cc @@ -32,11 +32,9 @@ class JsonSanitizerTest : public testing::Test { } absl::string_view sanitizeAndCheckAgainstProtobufJson(absl::string_view str) { - EXPECT_TRUE(TestUtil::isProtoSerializableUtf8(str)) << "str=" << str; absl::string_view sanitized = sanitize(str); - if (TestUtil::isProtoSerializableUtf8(str)) { - EXPECT_UTF8_EQ(protoSanitize(str), sanitized, str); - } + EXPECT_TRUE(TestUtil::isProtoSerializableUtf8(str)) << "str=" << str; + EXPECT_UTF8_EQ(protoSanitize(str), sanitized, str); return sanitized; } @@ -53,9 +51,11 @@ class JsonSanitizerTest : public testing::Test { return corrupt_second_byte; } - absl::string_view sanitizeInvalid(absl::string_view str) { - EXPECT_EQ(Utf8::UnicodeSizePair(0, 0), decode(str)); - return sanitize(str); + absl::string_view sanitizeInvalidAndCheckEscapes(absl::string_view str) { + EXPECT_FALSE(TestUtil::isProtoSerializableUtf8(str)); + absl::string_view sanitized = sanitize(str); + EXPECT_JSON_STREQ(sanitized, str, str); + return sanitized; } std::pair decode(absl::string_view str) { @@ -165,6 +165,9 @@ TEST_F(JsonSanitizerTest, AllThreeByteUtf8) { EXPECT_EQ(3, consumed); EXPECT_UTF8_EQ(protoSanitize(utf8), sanitized, absl::StrFormat("0x%x(%d,%d,%d)", unicode, byte1, byte2, byte3)); + } else { + EXPECT_JSON_STREQ(sanitized, utf8, + absl::StrFormat("non-utf8(%d,%d,%d)", byte1, byte2, byte3)); } } } @@ -200,6 +203,9 @@ TEST_F(JsonSanitizerTest, AllFourByteUtf8) { EXPECT_UTF8_EQ( protoSanitize(utf8), sanitized, absl::StrFormat("0x%x(%d,%d,%d,%d)", unicode, byte1, byte2, byte3, byte4)); + } else { + EXPECT_JSON_STREQ(sanitized, utf8, + absl::StrFormat("non-utf8(%d,%d,%d,%d)", byte1, byte2, byte3, byte4)); } } } @@ -256,33 +262,44 @@ TEST_F(JsonSanitizerTest, High8Bit) { // exception, which Json::sanitizer catches and just escapes the characters so // we don't lose information in the encoding. All bytes with the high-bit set // are invalid utf-8 in isolation, so we fall through to escaping these. - EXPECT_EQ("\\200\\201\\202\\203\\204\\205\\206\\207\\210\\211\\212\\213\\214\\215\\216\\217" - "\\220\\221\\222\\223\\224\\225\\226\\227\\230\\231\\232\\233\\234\\235\\236\\237" - "\\240\\241\\242\\243\\244\\245\\246\\247\\250\\251\\252\\253\\254\\255\\256\\257" - "\\260\\261\\262\\263\\264\\265\\266\\267\\270\\271\\272\\273\\274\\275\\276\\277" - "\\300\\301\\302\\303\\304\\305\\306\\307\\310\\311\\312\\313\\314\\315\\316\\317" - "\\320\\321\\322\\323\\324\\325\\326\\327\\330\\331\\332\\333\\334\\335\\336\\337" - "\\340\\341\\342\\343\\344\\345\\346\\347\\350\\351\\352\\353\\354\\355\\356\\357" - "\\360\\361\\362\\363\\364\\365\\366\\367\\370\\371\\372\\373\\374\\375\\376\\377", + EXPECT_EQ("\\u0080\\u0081\\u0082\\u0083\\u0084\\u0085\\u0086\\u0087\\u0088\\u0089\\u008a" + "\\u008b\\u008c\\u008d\\u008e\\u008f\\u0090\\u0091\\u0092\\u0093\\u0094\\u0095" + "\\u0096\\u0097\\u0098\\u0099\\u009a\\u009b\\u009c\\u009d\\u009e\\u009f\\u00a0" + "\\u00a1\\u00a2\\u00a3\\u00a4\\u00a5\\u00a6\\u00a7\\u00a8\\u00a9\\u00aa\\u00ab" + "\\u00ac\\u00ad\\u00ae\\u00af\\u00b0\\u00b1\\u00b2\\u00b3\\u00b4\\u00b5\\u00b6" + "\\u00b7\\u00b8\\u00b9\\u00ba\\u00bb\\u00bc\\u00bd\\u00be\\u00bf\\u00c0\\u00c1" + "\\u00c2\\u00c3\\u00c4\\u00c5\\u00c6\\u00c7\\u00c8\\u00c9\\u00ca\\u00cb\\u00cc" + "\\u00cd\\u00ce\\u00cf\\u00d0\\u00d1\\u00d2\\u00d3\\u00d4\\u00d5\\u00d6\\u00d7" + "\\u00d8\\u00d9\\u00da\\u00db\\u00dc\\u00dd\\u00de\\u00df\\u00e0\\u00e1\\u00e2" + "\\u00e3\\u00e4\\u00e5\\u00e6\\u00e7\\u00e8\\u00e9\\u00ea\\u00eb\\u00ec\\u00ed" + "\\u00ee\\u00ef\\u00f0\\u00f1\\u00f2\\u00f3\\u00f4\\u00f5\\u00f6\\u00f7\\u00f8" + "\\u00f9\\u00fa\\u00fb\\u00fc\\u00fd\\u00fe\\u00ff", sanitize(x80_ff)); } TEST_F(JsonSanitizerTest, InvalidUtf8) { // 2 byte - EXPECT_EQ("\\316", sanitizeInvalid(truncate(LambdaUtf8))); - EXPECT_EQ("\\316\\373", sanitizeInvalid(corruptByte2(LambdaUtf8))); + EXPECT_EQ("\\u00ce", sanitizeInvalidAndCheckEscapes(truncate(LambdaUtf8))); + EXPECT_EQ("\\u00ce\\u00fb", sanitizeInvalidAndCheckEscapes(corruptByte2(LambdaUtf8))); // 3 byte - EXPECT_EQ("\\341\\275", sanitizeInvalid(truncate(OmicronUtf8))); - EXPECT_EQ("\\341\\375\\271", sanitizeInvalid(corruptByte2(OmicronUtf8))); + EXPECT_EQ("\\u00e1\\u00bd", sanitizeInvalidAndCheckEscapes(truncate(OmicronUtf8))); + EXPECT_EQ("\\u00e1\\u00fd\\u00b9", sanitizeInvalidAndCheckEscapes(corruptByte2(OmicronUtf8))); // 4 byte - EXPECT_EQ("\\360\\235\\204", sanitizeInvalid(truncate(TrebleClefUtf8))); - EXPECT_EQ("\\360\\375\\204\\236", sanitizeInvalid(corruptByte2(TrebleClefUtf8))); + EXPECT_EQ("\\u00f0\\u009d\\u0084", sanitizeInvalidAndCheckEscapes(truncate(TrebleClefUtf8))); + EXPECT_EQ("\\u00f0\\u00fd\\u0084\\u009e", + sanitizeInvalidAndCheckEscapes(corruptByte2(TrebleClefUtf8))); // Invalid input embedded in normal text. - EXPECT_EQ("Hello, \\360\\235\\204, World!", - sanitize(absl::StrCat("Hello, ", truncate(TrebleClefUtf8), ", World!"))); + EXPECT_EQ("Hello, \\u00f0\\u009d\\u0084, World!", + sanitizeInvalidAndCheckEscapes( + absl::StrCat("Hello, ", truncate(TrebleClefUtf8), ", World!"))); + + // Invalid input with leading slash. + EXPECT_EQ("\\u005cHello, \\u00f0\\u009d\\u0084, World!", + sanitizeInvalidAndCheckEscapes( + absl::StrCat("\\Hello, ", truncate(TrebleClefUtf8), ", World!"))); // Replicate a few other cases that were discovered during initial fuzzing, // to ensure we see these as invalid utf8 and avoid them in comparisons. diff --git a/test/common/json/json_sanitizer_test_util.cc b/test/common/json/json_sanitizer_test_util.cc index e19c154160c8..b2096cdd35e9 100644 --- a/test/common/json/json_sanitizer_test_util.cc +++ b/test/common/json/json_sanitizer_test_util.cc @@ -105,6 +105,13 @@ bool parseUnicode(absl::string_view str, uint32_t& hex_value) { return false; } +// Removes 'prefix_size' characters from the beginning of 'str'. +void removePrefix(absl::string_view& str, uint32_t prefix_size) { + ASSERT(prefix_size > 0); + ASSERT(prefix_size <= str.size()); + str = str.substr(prefix_size, str.size() - prefix_size); +} + // Compares a string that's possibly an escaped Unicode, e.g. \u1234, to // one that is utf8-encoded. bool compareUnicodeEscapeAgainstUtf8(absl::string_view& escaped, absl::string_view& utf8) { @@ -113,8 +120,8 @@ bool compareUnicodeEscapeAgainstUtf8(absl::string_view& escaped, absl::string_vi // If one side of the comparison is a Unicode escape, auto [unicode, consumed] = Utf8::decode(utf8); if (consumed != 0 && unicode == escaped_unicode) { - utf8 = utf8.substr(consumed, utf8.size() - consumed); - escaped = escaped.substr(UnicodeEscapeLength, escaped.size() - UnicodeEscapeLength); + removePrefix(utf8, consumed); + removePrefix(escaped, UnicodeEscapeLength); return true; } } @@ -137,8 +144,8 @@ bool utf8Equivalent(absl::string_view a, absl::string_view b, std::string& diffs diffs = absl::StrFormat("`%s' and `%s` have different lengths", a, b); return false; } else if (a[0] == b[0]) { - a = a.substr(1, a.size() - 1); - b = b.substr(1, b.size() - 1); + removePrefix(a, 1); + removePrefix(b, 1); } else if (!compareUnicodeEscapeAgainstUtf8(a, b) && !compareUnicodeEscapeAgainstUtf8(b, a)) { diffs = absl::StrFormat("%s != %s, [%d]%c(0x02%x, \\%03o) != [%d] %c(0x02%x, \\%03o)", all_a, all_b, a.data() - all_a.data(), a[0], a[0], a[0], @@ -148,6 +155,25 @@ bool utf8Equivalent(absl::string_view a, absl::string_view b, std::string& diffs } } +bool decodeEscapedJson(absl::string_view sanitized, std::string& decoded, std::string& errmsg) { + while (!sanitized.empty()) { + uint32_t hex; + if (sanitized.size() >= UnicodeEscapeLength && + parseUnicode(sanitized.substr(0, UnicodeEscapeLength), hex)) { + if (hex >= 256) { + errmsg = absl::StrFormat("Unexpected encoding >= 256: %u", hex); + return false; + } + decoded.append(1, hex); + removePrefix(sanitized, UnicodeEscapeLength); + } else { + decoded.append(1, sanitized[0]); + removePrefix(sanitized, 1); + } + } + return true; +} + } // namespace TestUtil } // namespace Json } // namespace Envoy diff --git a/test/common/json/json_sanitizer_test_util.h b/test/common/json/json_sanitizer_test_util.h index 3a2b950a8b75..713315d4d83f 100644 --- a/test/common/json/json_sanitizer_test_util.h +++ b/test/common/json/json_sanitizer_test_util.h @@ -23,6 +23,35 @@ bool utf8Equivalent(absl::string_view a, absl::string_view b, std::string& errms EXPECT_TRUE(TestUtil::utf8Equivalent(a, b, errmsg)) << context << "\n" << errmsg; \ } +/** + * Reverses the json escaping algorithm in sanitize(), which is used when the + * utf-8 serialization fails. Note that `sanitized` may be in any encoding, e.g. + * ascii, binary, utf-8, gb2132. `sanitized` is valid JSON with `\u` escapes for + * any characters are not allowed in JSON strings, per + * https://www.json.org/json-en.html. We want to make sure our json encoding for + * `original` would be decoded into the same bytes. + * + * Writes the decoded version into `decoded`. + * + * Note that the `sanitized` argument does not accept arbitrary json string + * encodings, such as `\r`, as those are not currently generated by the + * exception handler in Json::sanitize(); only numeric escapes are generated + * so that's all this test helper accepts. + * + * @param sanitized the output of Json::sanitize(), when it is not utf-8 compliant. + * @param decoded the decoded form, undoing any escapes added by Json::sanitize(). + * @param errmsg details any error encountered during decoding. + * @param true if the encoding was successful. + */ +bool decodeEscapedJson(absl::string_view sanitized, std::string& decoded, std::string& errmsg); +#define EXPECT_JSON_STREQ(sanitized, original, context) \ + { \ + std::string decoded, errmsg; \ + EXPECT_TRUE(TestUtil::decodeEscapedJson(sanitized, decoded, errmsg)) \ + << context << ": " << errmsg; \ + EXPECT_EQ(decoded, original) << context; \ + } + } // namespace TestUtil } // namespace Json } // namespace Envoy diff --git a/test/common/quic/envoy_quic_client_session_test.cc b/test/common/quic/envoy_quic_client_session_test.cc index 0b03d78a5e9d..592b7782dd27 100644 --- a/test/common/quic/envoy_quic_client_session_test.cc +++ b/test/common/quic/envoy_quic_client_session_test.cc @@ -223,7 +223,6 @@ TEST_P(EnvoyQuicClientSessionTest, NewStream) { EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); quic::QuicHeaderList headers; - headers.OnHeaderBlockStart(); headers.OnHeader(":status", "200"); headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0); // Response headers should be propagated to decoder. @@ -244,7 +243,6 @@ TEST_P(EnvoyQuicClientSessionTest, PacketLimits) { EnvoyQuicClientStream& stream = sendGetRequest(response_decoder, stream_callbacks); quic::QuicHeaderList headers; - headers.OnHeaderBlockStart(); headers.OnHeader(":status", "200"); headers.OnHeaderBlockEnd(/*uncompressed_header_bytes=*/0, /*compressed_header_bytes=*/0); // Response headers should be propagated to decoder. diff --git a/test/common/quic/envoy_quic_server_session_test.cc b/test/common/quic/envoy_quic_server_session_test.cc index b670a20d8702..b138c059bc5b 100644 --- a/test/common/quic/envoy_quic_server_session_test.cc +++ b/test/common/quic/envoy_quic_server_session_test.cc @@ -309,7 +309,6 @@ TEST_F(EnvoyQuicServerSessionTest, NewStream) { // Receive a GET request on created stream. quic::QuicHeaderList headers; - headers.OnHeaderBlockStart(); std::string host("www.abc.com"); headers.OnHeader(":authority", host); headers.OnHeader(":method", "GET"); @@ -575,7 +574,6 @@ TEST_F(EnvoyQuicServerSessionTest, WriteUpdatesDelayCloseTimer) { // Receive a GET request on created stream. quic::QuicHeaderList request_headers; - request_headers.OnHeaderBlockStart(); std::string host("www.abc.com"); request_headers.OnHeader(":authority", host); request_headers.OnHeader(":method", "GET"); @@ -678,7 +676,6 @@ TEST_F(EnvoyQuicServerSessionTest, FlushCloseNoTimeout) { // Receive a GET request on created stream. quic::QuicHeaderList request_headers; - request_headers.OnHeaderBlockStart(); std::string host("www.abc.com"); request_headers.OnHeader(":authority", host); request_headers.OnHeader(":method", "GET"); @@ -923,7 +920,6 @@ TEST_F(EnvoyQuicServerSessionTest, SendBufferWatermark) { // Receive a GET request on created stream. quic::QuicHeaderList request_headers; - request_headers.OnHeaderBlockStart(); std::string host("www.abc.com"); request_headers.OnHeader(":authority", host); request_headers.OnHeader(":method", "GET"); diff --git a/test/common/quic/envoy_quic_server_stream_test.cc b/test/common/quic/envoy_quic_server_stream_test.cc index a1958f3afa2d..68d1346efbe8 100644 --- a/test/common/quic/envoy_quic_server_stream_test.cc +++ b/test/common/quic/envoy_quic_server_stream_test.cc @@ -912,7 +912,6 @@ TEST_F(EnvoyQuicServerStreamTest, RegularHeaderBeforePseudoHeader) { TEST_F(EnvoyQuicServerStreamTest, DuplicatedPathHeader) { quic::QuicHeaderList header_list; - header_list.OnHeaderBlockStart(); header_list.OnHeader(":authority", "www.google.com:4433"); header_list.OnHeader(":method", "GET"); header_list.OnHeader(":scheme", "https"); diff --git a/test/common/quic/envoy_quic_utils_test.cc b/test/common/quic/envoy_quic_utils_test.cc index 03d55b79b5c5..def41145ad1d 100644 --- a/test/common/quic/envoy_quic_utils_test.cc +++ b/test/common/quic/envoy_quic_utils_test.cc @@ -77,7 +77,6 @@ TEST(EnvoyQuicUtilsTest, HeadersConversion) { EXPECT_EQ(rst, quic::QUIC_REFUSED_STREAM); // With no error it will be untouched. quic::QuicHeaderList quic_headers; - quic_headers.OnHeaderBlockStart(); quic_headers.OnHeader(":authority", "www.google.com"); quic_headers.OnHeader(":path", "/index.hml"); quic_headers.OnHeader(":scheme", "https"); @@ -101,7 +100,6 @@ TEST(EnvoyQuicUtilsTest, HeadersConversion) { EXPECT_EQ(rst, quic::QUIC_REFUSED_STREAM); // With no error it will be untouched. quic::QuicHeaderList quic_headers2; - quic_headers2.OnHeaderBlockStart(); quic_headers2.OnHeader(":authority", "www.google.com"); quic_headers2.OnHeader(":path", "/index.hml"); quic_headers2.OnHeader(":scheme", "https"); @@ -226,7 +224,6 @@ TEST(EnvoyQuicUtilsTest, HeaderMapMaxSizeLimit) { absl::string_view details; quic::QuicRstStreamErrorCode rst = quic::QUIC_REFUSED_STREAM; quic::QuicHeaderList quic_headers; - quic_headers.OnHeaderBlockStart(); quic_headers.OnHeader(":authority", "www.google.com"); quic_headers.OnHeader(":path", "/index.hml"); quic_headers.OnHeader(":scheme", "https"); diff --git a/test/common/quic/platform/quic_platform_test.cc b/test/common/quic/platform/quic_platform_test.cc index 7209f33da4d5..755168b5eed1 100644 --- a/test/common/quic/platform/quic_platform_test.cc +++ b/test/common/quic/platform/quic_platform_test.cc @@ -363,7 +363,7 @@ TEST_F(QuicPlatformTest, QuicMutex) { mu.AssertReaderHeld(); mu.WriterUnlock(); { - QuicReaderMutexLock rmu(&mu); + quiche::QuicheReaderMutexLock rmu(&mu); mu.AssertReaderHeld(); } mu.WriterLock(); diff --git a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc index 85e4e72a53aa..a39c18d30433 100644 --- a/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc +++ b/test/extensions/common/dynamic_forward_proxy/dns_cache_impl_test.cc @@ -328,6 +328,45 @@ TEST_F(DnsCacheImplTest, ForceRefresh) { 1 /* added */, 0 /* removed */, 1 /* num hosts */); } +TEST_F(DnsCacheImplTest, Stop) { + initialize(); + InSequence s; + + // No hosts so should not do anything other than reset the resolver. + EXPECT_CALL(*resolver_, resetNetworking()); + dns_cache_->stop(); + checkStats(0 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */, + 0 /* added */, 0 /* removed */, 0 /* num hosts */); + + MockLoadDnsCacheEntryCallbacks callbacks; + Network::DnsResolver::ResolveCb resolve_cb; + Event::MockTimer* resolve_timer = + new Event::MockTimer(&context_.server_factory_context_.dispatcher_); + Event::MockTimer* timeout_timer = + new Event::MockTimer(&context_.server_factory_context_.dispatcher_); + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(5000), nullptr)); + EXPECT_CALL(*resolver_, resolve("foo.com", _, _)) + .WillOnce(DoAll(SaveArg<2>(&resolve_cb), Return(&resolver_->active_query_))); + auto result = dns_cache_->loadDnsCacheEntry("foo.com", 80, false, callbacks); + EXPECT_EQ(DnsCache::LoadDnsCacheEntryStatus::Loading, result.status_); + EXPECT_NE(result.handle_, nullptr); + EXPECT_EQ(absl::nullopt, result.host_info_); + + checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); + + // Query in progress so should reset and then cancel. + EXPECT_CALL(*resolver_, resetNetworking()); + EXPECT_CALL(resolver_->active_query_, + cancel(Network::ActiveDnsQuery::CancelReason::QueryAbandoned)); + EXPECT_CALL(*timeout_timer, disableTimer()); + EXPECT_CALL(*timeout_timer, enabled()).Times(AtLeast(0)); + EXPECT_CALL(*resolve_timer, disableTimer()); + dns_cache_->stop(); + checkStats(1 /* attempt */, 0 /* success */, 0 /* failure */, 0 /* address changed */, + 1 /* added */, 0 /* removed */, 1 /* num hosts */); +} + // Ipv4 address. TEST_F(DnsCacheImplTest, Ipv4Address) { initialize(); diff --git a/test/extensions/common/dynamic_forward_proxy/mocks.h b/test/extensions/common/dynamic_forward_proxy/mocks.h index b0c895bedbd2..03dc81476d29 100644 --- a/test/extensions/common/dynamic_forward_proxy/mocks.h +++ b/test/extensions/common/dynamic_forward_proxy/mocks.h @@ -65,6 +65,7 @@ class MockDnsCache : public DnsCache { MOCK_METHOD(Upstream::ResourceAutoIncDec*, canCreateDnsRequest_, ()); MOCK_METHOD(void, forceRefreshHosts, ()); MOCK_METHOD(void, setIpVersionToRemove, (absl::optional)); + MOCK_METHOD(void, stop, ()); }; class MockLoadDnsCacheEntryHandle : public DnsCache::LoadDnsCacheEntryHandle { diff --git a/test/extensions/dynamic_modules/BUILD b/test/extensions/dynamic_modules/BUILD index 487553679f41..f136dd11269e 100644 --- a/test/extensions/dynamic_modules/BUILD +++ b/test/extensions/dynamic_modules/BUILD @@ -16,6 +16,10 @@ envoy_cc_test( "//test/extensions/dynamic_modules/test_data/c:no_op", "//test/extensions/dynamic_modules/test_data/c:no_program_init", "//test/extensions/dynamic_modules/test_data/c:program_init_fail", + "//test/extensions/dynamic_modules/test_data/rust:abi_version_mismatch", + "//test/extensions/dynamic_modules/test_data/rust:no_op", + "//test/extensions/dynamic_modules/test_data/rust:no_program_init", + "//test/extensions/dynamic_modules/test_data/rust:program_init_fail", ], deps = [ "//source/extensions/dynamic_modules:dynamic_modules_lib", diff --git a/test/extensions/dynamic_modules/abi_version_test.cc b/test/extensions/dynamic_modules/abi_version_test.cc index 3f7b4eb74834..958295281dde 100644 --- a/test/extensions/dynamic_modules/abi_version_test.cc +++ b/test/extensions/dynamic_modules/abi_version_test.cc @@ -15,7 +15,7 @@ namespace Envoy { namespace Extensions { namespace DynamicModules { -// This test ensure that abi_version.h contains the correct sha256 hash of ABI header files. +// This test ensures that abi_version.h contains the correct sha256 hash of ABI header files. TEST(DynamicModules, ABIVersionCheck) { const auto abi_header_path = TestEnvironment::substitute("{{ test_rundir }}/source/extensions/dynamic_modules/abi.h"); diff --git a/test/extensions/dynamic_modules/dynamic_modules_test.cc b/test/extensions/dynamic_modules/dynamic_modules_test.cc index 1c02d4e9d4fb..7368af924aa1 100644 --- a/test/extensions/dynamic_modules/dynamic_modules_test.cc +++ b/test/extensions/dynamic_modules/dynamic_modules_test.cc @@ -37,7 +37,7 @@ class DynamicModuleTestLanguages : public ::testing::TestWithParam }; INSTANTIATE_TEST_SUITE_P(LanguageTests, DynamicModuleTestLanguages, - testing::Values("c"), // TODO: Other languages. + testing::Values("c", "rust"), // TODO: add Go. DynamicModuleTestLanguages::languageParamToTestName); TEST_P(DynamicModuleTestLanguages, DoNotClose) { @@ -48,6 +48,7 @@ TEST_P(DynamicModuleTestLanguages, DoNotClose) { EXPECT_TRUE(module.ok()); const auto getSomeVariable = module->get()->getFunctionPointer("getSomeVariable"); + EXPECT_NE(getSomeVariable, nullptr); EXPECT_EQ(getSomeVariable(), 1); EXPECT_EQ(getSomeVariable(), 2); EXPECT_EQ(getSomeVariable(), 3); diff --git a/test/extensions/dynamic_modules/test_data/rust/.gitignore b/test/extensions/dynamic_modules/test_data/rust/.gitignore new file mode 100644 index 000000000000..96ef6c0b944e --- /dev/null +++ b/test/extensions/dynamic_modules/test_data/rust/.gitignore @@ -0,0 +1,2 @@ +/target +Cargo.lock diff --git a/test/extensions/dynamic_modules/test_data/rust/BUILD b/test/extensions/dynamic_modules/test_data/rust/BUILD new file mode 100644 index 000000000000..b9bf03ebf106 --- /dev/null +++ b/test/extensions/dynamic_modules/test_data/rust/BUILD @@ -0,0 +1,32 @@ +load("@rules_rust//rust:defs.bzl", "rust_shared_library") + +licenses(["notice"]) # Apache 2 + +package(default_visibility = ["//test/extensions/dynamic_modules:__pkg__"]) + +rust_shared_library( + name = "no_op", + srcs = ["no_op.rs"], + edition = "2021", + deps = [ + "//source/extensions/dynamic_modules/sdk/rust:envoy_proxy_dynamic_modules_rust_sdk", + ], +) + +rust_shared_library( + name = "no_program_init", + srcs = ["no_program_init.rs"], + edition = "2021", +) + +rust_shared_library( + name = "program_init_fail", + srcs = ["program_init_fail.rs"], + edition = "2021", +) + +rust_shared_library( + name = "abi_version_mismatch", + srcs = ["abi_version_mismatch.rs"], + edition = "2021", +) diff --git a/test/extensions/dynamic_modules/test_data/rust/Cargo.toml b/test/extensions/dynamic_modules/test_data/rust/Cargo.toml new file mode 100644 index 000000000000..592f3f84b52f --- /dev/null +++ b/test/extensions/dynamic_modules/test_data/rust/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "test-programs" +version = "0.1.0" +edition = "2021" +license = "Apache-2.0" +repository = "https://github.com/envoyproxy/envoy" + +[dependencies] +envoy-proxy-dynamic-modules-rust-sdk = { path = "../../../../../source/extensions/dynamic_modules/sdk/rust" } + +[[example]] +name = "no_op" +path = "no_op.rs" +crate-type = ["cdylib"] + +[[example]] +name = "no_program_init" +path = "no_program_init.rs" +crate-type = ["cdylib"] + +[[example]] +name = "program_init_fail" +path = "program_init_fail.rs" +crate-type = ["cdylib"] + +[[example]] +name = "abi_version_mismatch" +path = "abi_version_mismatch.rs" +crate-type = ["cdylib"] diff --git a/test/extensions/dynamic_modules/test_data/rust/abi_version_mismatch.rs b/test/extensions/dynamic_modules/test_data/rust/abi_version_mismatch.rs new file mode 100644 index 000000000000..8afbd0d965d8 --- /dev/null +++ b/test/extensions/dynamic_modules/test_data/rust/abi_version_mismatch.rs @@ -0,0 +1,4 @@ +#[no_mangle] +pub extern "C" fn envoy_dynamic_module_on_program_init() -> *const ::std::os::raw::c_char { + b"invalid-version-hash\0".as_ptr() as *const ::std::os::raw::c_char +} diff --git a/test/extensions/dynamic_modules/test_data/rust/no_op.rs b/test/extensions/dynamic_modules/test_data/rust/no_op.rs new file mode 100644 index 000000000000..309744dd86da --- /dev/null +++ b/test/extensions/dynamic_modules/test_data/rust/no_op.rs @@ -0,0 +1,15 @@ +use envoy_proxy_dynamic_modules_rust_sdk::declare_program_init; +use std::sync::atomic::{AtomicI32, Ordering}; + +declare_program_init!(init); + +fn init() -> bool { + true +} + +static SOME_VARIABLE: AtomicI32 = AtomicI32::new(1); + +#[no_mangle] +pub extern "C" fn getSomeVariable() -> i32 { + SOME_VARIABLE.fetch_add(1, Ordering::SeqCst) +} diff --git a/test/extensions/dynamic_modules/test_data/rust/no_program_init.rs b/test/extensions/dynamic_modules/test_data/rust/no_program_init.rs new file mode 100644 index 000000000000..db872215ab1d --- /dev/null +++ b/test/extensions/dynamic_modules/test_data/rust/no_program_init.rs @@ -0,0 +1,3 @@ +pub extern "C" fn foo() -> i32 { + 0 +} diff --git a/test/extensions/dynamic_modules/test_data/rust/program_init_fail.rs b/test/extensions/dynamic_modules/test_data/rust/program_init_fail.rs new file mode 100644 index 000000000000..e9d7aed01306 --- /dev/null +++ b/test/extensions/dynamic_modules/test_data/rust/program_init_fail.rs @@ -0,0 +1,4 @@ +#[no_mangle] +pub extern "C" fn envoy_dynamic_module_on_program_init() -> *const ::std::os::raw::c_char { + ::std::ptr::null() +} diff --git a/test/extensions/filters/http/rate_limit_quota/integration_test.cc b/test/extensions/filters/http/rate_limit_quota/integration_test.cc index ec4e45a89113..dc37a9720bc4 100644 --- a/test/extensions/filters/http/rate_limit_quota/integration_test.cc +++ b/test/extensions/filters/http/rate_limit_quota/integration_test.cc @@ -28,6 +28,7 @@ enum class BlanketRule { struct ConfigOption { bool valid_rlqs_server = true; BlanketRule no_assignment_blanket_rule = BlanketRule::NOT_SPECIFIED; + bool unsupported_no_assignment_strategy = false; BlanketRule expired_assignment_blanket_rule = BlanketRule::NOT_SPECIFIED; }; @@ -110,6 +111,12 @@ class RateLimitQuotaIntegrationTest ->mutable_fallback_rate_limit() ->set_blanket_rule(envoy::type::v3::RateLimitStrategy::DENY_ALL); } + } else if (config_option.unsupported_no_assignment_strategy) { + auto* requests_per_time_unit = mutable_bucket_settings.mutable_no_assignment_behavior() + ->mutable_fallback_rate_limit() + ->mutable_requests_per_time_unit(); + requests_per_time_unit->set_requests_per_time_unit(100); + requests_per_time_unit->set_time_unit(envoy::type::v3::RateLimitUnit::SECOND); } if (config_option.expired_assignment_blanket_rule != BlanketRule::NOT_SPECIFIED) { @@ -122,6 +129,9 @@ class RateLimitQuotaIntegrationTest ->mutable_fallback_rate_limit() ->set_blanket_rule(envoy::type::v3::RateLimitStrategy::DENY_ALL); } + mutable_bucket_settings.mutable_expired_assignment_behavior() + ->mutable_expired_assignment_behavior_timeout() + ->set_seconds(15); } mutable_config->PackFrom(mutable_bucket_settings); @@ -169,12 +179,35 @@ class RateLimitQuotaIntegrationTest void TearDown() override { cleanUp(); } + bool expectAllowedRequest() { + if (!fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)) + return false; + if (!fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)) + return false; + if (!upstream_request_->waitForEndStream(*dispatcher_)) + return false; + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(100, true); + + // Verify the response to downstream. + if (!response_->waitForEndStream()) + return false; + EXPECT_TRUE(response_->complete()); + EXPECT_EQ(response_->headers().getStatusValue(), "200"); + + // Clean up the upstream and downstream resource but keep the gRPC + // connection to RLQS server open. + cleanupUpstreamAndDownstream(); + return true; + } + envoy::extensions::filters::http::rate_limit_quota::v3::RateLimitQuotaFilterConfig proto_config_{}; std::vector grpc_upstreams_; FakeHttpConnectionPtr rlqs_connection_; FakeStreamPtr rlqs_stream_; IntegrationStreamDecoderPtr response_; + int report_interval_sec = 60; }; INSTANTIATE_TEST_SUITE_P( @@ -676,9 +709,6 @@ TEST_P(RateLimitQuotaIntegrationTest, BasicFlowPeriodicalReport) { EXPECT_TRUE(response_->complete()); EXPECT_EQ(response_->headers().getStatusValue(), "200"); - // TODO(tyxia) Make interval configurable in the test. It is currently 60s in - // ValidMatcherConfig. - int report_interval_sec = 60; // Trigger the report periodically, 10 times. for (int i = 0; i < 10; ++i) { // Advance the time by report_interval. @@ -757,8 +787,6 @@ TEST_P(RateLimitQuotaIntegrationTest, BasicFlowPeriodicalReportWithStreamClosed) EXPECT_TRUE(response_->complete()); EXPECT_EQ(response_->headers().getStatusValue(), "200"); - // ValidMatcherConfig. - int report_interval_sec = 60; // Trigger the report periodically. for (int i = 0; i < 6; ++i) { if (i == 2) { @@ -934,6 +962,153 @@ TEST_P(RateLimitQuotaIntegrationTest, MultiRequestWithTokenBucket) { } } +TEST_P(RateLimitQuotaIntegrationTest, MultiRequestWithUnsupportedStrategy) { + initializeConfig(); + HttpIntegrationTest::initialize(); + absl::flat_hash_map custom_headers = {{"environment", "staging"}, + {"group", "envoy"}}; + + for (int i = 0; i < 2; ++i) { + // Send downstream client request to upstream. + sendClientRequest(&custom_headers); + + // Handle the request received by upstream. All requests will be allowed + // since the strategy is not supported. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(100, true); + + // Verify the response to downstream. + ASSERT_TRUE(response_->waitForEndStream()); + EXPECT_TRUE(response_->complete()); + EXPECT_EQ(response_->headers().getStatusValue(), "200"); + + // Only first downstream client request will trigger the reports to RLQS + // server as the subsequent requests will find the entry in the cache. + if (i == 0) { + // Start the gRPC stream to RLQS server. + ASSERT_TRUE(grpc_upstreams_[0]->waitForHttpConnection(*dispatcher_, rlqs_connection_)); + ASSERT_TRUE(rlqs_connection_->waitForNewStream(*dispatcher_, rlqs_stream_)); + + envoy::service::rate_limit_quota::v3::RateLimitQuotaUsageReports reports; + ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); + rlqs_stream_->startGrpcStream(); + + // Build the response. + envoy::service::rate_limit_quota::v3::RateLimitQuotaResponse rlqs_response; + absl::flat_hash_map custom_headers_cpy = custom_headers; + custom_headers_cpy.insert({"name", "prod"}); + auto* bucket_action = rlqs_response.add_bucket_action(); + for (const auto& [key, value] : custom_headers_cpy) { + (*bucket_action->mutable_bucket_id()->mutable_bucket()).insert({key, value}); + auto* quota_assignment = bucket_action->mutable_quota_assignment_action(); + quota_assignment->mutable_assignment_time_to_live()->set_seconds(120); + auto* strategy = quota_assignment->mutable_rate_limit_strategy(); + auto* unsupported_strategy = strategy->mutable_requests_per_time_unit(); + unsupported_strategy->set_requests_per_time_unit(10); + unsupported_strategy->set_time_unit(envoy::type::v3::RateLimitUnit::SECOND); + } + + // Send the response from RLQS server. + rlqs_stream_->sendGrpcMessage(rlqs_response); + absl::SleepFor(absl::Seconds(1)); + } + + cleanUp(); + } +} + +TEST_P(RateLimitQuotaIntegrationTest, MultiRequestWithUnsetStrategy) { + initializeConfig(); + HttpIntegrationTest::initialize(); + absl::flat_hash_map custom_headers = {{"environment", "staging"}, + {"group", "envoy"}}; + + for (int i = 0; i < 2; ++i) { + // Send downstream client request to upstream. + sendClientRequest(&custom_headers); + + // Handle the request received by upstream. All requests will be allowed + // since the strategy is not set. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(100, true); + + // Verify the response to downstream. + ASSERT_TRUE(response_->waitForEndStream()); + EXPECT_TRUE(response_->complete()); + EXPECT_EQ(response_->headers().getStatusValue(), "200"); + + // Only first downstream client request will trigger the reports to RLQS + // server as the subsequent requests will find the entry in the cache. + if (i == 0) { + // Start the gRPC stream to RLQS server. + ASSERT_TRUE(grpc_upstreams_[0]->waitForHttpConnection(*dispatcher_, rlqs_connection_)); + ASSERT_TRUE(rlqs_connection_->waitForNewStream(*dispatcher_, rlqs_stream_)); + + envoy::service::rate_limit_quota::v3::RateLimitQuotaUsageReports reports; + ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); + rlqs_stream_->startGrpcStream(); + + // Build the response. + envoy::service::rate_limit_quota::v3::RateLimitQuotaResponse rlqs_response; + absl::flat_hash_map custom_headers_cpy = custom_headers; + custom_headers_cpy.insert({"name", "prod"}); + auto* bucket_action = rlqs_response.add_bucket_action(); + for (const auto& [key, value] : custom_headers_cpy) { + (*bucket_action->mutable_bucket_id()->mutable_bucket()).insert({key, value}); + auto* quota_assignment = bucket_action->mutable_quota_assignment_action(); + quota_assignment->mutable_assignment_time_to_live()->set_seconds(120); + } + + // Send the response from RLQS server. + rlqs_stream_->sendGrpcMessage(rlqs_response); + absl::SleepFor(absl::Seconds(1)); + } + + cleanUp(); + } +} + +TEST_P(RateLimitQuotaIntegrationTest, MultiRequestWithUnsupportedDefaultAction) { + ConfigOption option; + option.unsupported_no_assignment_strategy = true; + initializeConfig(option); + HttpIntegrationTest::initialize(); + absl::flat_hash_map custom_headers = {{"environment", "staging"}, + {"group", "envoy"}}; + + // Send downstream client request to upstream. + sendClientRequest(&custom_headers); + + // Handle the request received by upstream. All requests will be allowed + // since the strategy is not set. + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(100, true); + + // Verify the response to downstream. + ASSERT_TRUE(response_->waitForEndStream()); + EXPECT_TRUE(response_->complete()); + EXPECT_EQ(response_->headers().getStatusValue(), "200"); + + // Start the gRPC stream to RLQS server. + ASSERT_TRUE(grpc_upstreams_[0]->waitForHttpConnection(*dispatcher_, rlqs_connection_)); + ASSERT_TRUE(rlqs_connection_->waitForNewStream(*dispatcher_, rlqs_stream_)); + + envoy::service::rate_limit_quota::v3::RateLimitQuotaUsageReports reports; + ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); + rlqs_stream_->startGrpcStream(); + + cleanUp(); +} + TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithExpiredAssignmentDeny) { ConfigOption option; option.expired_assignment_blanket_rule = BlanketRule::DENY_ALL; @@ -974,8 +1149,10 @@ TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithExpiredAssignmentDeny) strategy->set_blanket_rule(envoy::type::v3::RateLimitStrategy::ALLOW_ALL); } - // Send the response from RLQS server. - rlqs_stream_->sendGrpcMessage(rlqs_response); + // Send the response from RLQS server and wait for response processing to + // finish for test consistency. + WAIT_FOR_LOG_CONTAINS("debug", "Assignment cached for bucket id", + { rlqs_stream_->sendGrpcMessage(rlqs_response); }); } // 2nd request is throttled because the assignment has expired and @@ -1018,8 +1195,9 @@ TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithExpiredAssignmentAllow // Send downstream client request to upstream. sendClientRequest(&custom_headers); - // 2nd downstream client request will not trigger the reports to RLQS server since it is - // same as first request, which will find the entry in the cache. + // 2nd downstream client request will not trigger the reports to RLQS server + // since it is same as first request, which will find the entry in the + // cache. if (i != 1) { // 1st request will start the gRPC stream. if (i == 0) { @@ -1031,9 +1209,10 @@ TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithExpiredAssignmentAllow ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); rlqs_stream_->startGrpcStream(); } else { - // 3rd request won't start gRPC stream again since it is kept open. + // 3rd request won't start gRPC stream again since it is kept open and + // the usage will be aggregated instead of spawning an immediate report. envoy::service::rate_limit_quota::v3::RateLimitQuotaUsageReports reports; - ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); + ASSERT_FALSE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); } // Build the response. @@ -1050,12 +1229,14 @@ TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithExpiredAssignmentAllow strategy->set_blanket_rule(envoy::type::v3::RateLimitStrategy::ALLOW_ALL); } - // Send the response from RLQS server. - rlqs_stream_->sendGrpcMessage(rlqs_response); + // Send the response from RLQS server and wait for response processing to + // finish for test consistency. + WAIT_FOR_LOG_CONTAINS("debug", "Assignment cached for bucket id", + { rlqs_stream_->sendGrpcMessage(rlqs_response); }); } - // Even though assignment was expired on 2nd request, the request is still allowed because the - // expired assignment behavior is ALLOW_ALL. + // Even though assignment was expired on 2nd request, the request is still + // allowed because the expired assignment behavior is ALLOW_ALL. ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); @@ -1067,32 +1248,47 @@ TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithExpiredAssignmentAllow EXPECT_TRUE(response_->complete()); EXPECT_EQ(response_->headers().getStatusValue(), "200"); - // Clean up the upstream and downstream resource but keep the gRPC connection to RLQS server - // open. + // Clean up the upstream and downstream resource but keep the gRPC + // connection to RLQS server open. cleanupUpstreamAndDownstream(); } } -TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithAbandonAction) { - initializeConfig(); +TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithExpirationToDefaultDeny) { + ConfigOption option; + option.expired_assignment_blanket_rule = BlanketRule::DENY_ALL; + option.no_assignment_blanket_rule = BlanketRule::ALLOW_ALL; + initializeConfig(option); HttpIntegrationTest::initialize(); absl::flat_hash_map custom_headers = {{"environment", "staging"}, {"group", "envoy"}}; - absl::flat_hash_map custom_headers_cpy = custom_headers; - custom_headers_cpy.insert({"name", "prod"}); - for (int i = 0; i < 3; ++i) { + for (int i = 0; i < 4; ++i) { + // Advance the time to make cached assignment expired. + if (i > 0) { + simTime().advanceTimeWait(std::chrono::seconds(15)); + } // Send downstream client request to upstream. sendClientRequest(&custom_headers); - // 3rd downstream request will not trigger the reports to RLQS server since it is - // same as 2nd request, which will find the entry in the cache. - if (i != 2) { - envoy::service::rate_limit_quota::v3::RateLimitQuotaResponse rlqs_response; + // Query 1: ALLOW_ALL by default. Query 2: DENY_ALL by assignment. + // Query 3: DENY_ALL by assignment expiration. Query 4: ALLOW_ALL by default. + if (i == 0 || i == 3) { + // Handle the request received by upstream. + ASSERT_TRUE( + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(100, true); + + // Verify the response to downstream. + ASSERT_TRUE(response_->waitForEndStream()); + EXPECT_TRUE(response_->complete()); + EXPECT_EQ(response_->headers().getStatusValue(), "200"); - // 1st request will start the gRPC stream. if (i == 0) { - // Start the gRPC stream to RLQS server on the first request. + // Start the gRPC stream to RLQS server & send the initial report. ASSERT_TRUE(grpc_upstreams_[0]->waitForHttpConnection(*dispatcher_, rlqs_connection_)); ASSERT_TRUE(rlqs_connection_->waitForNewStream(*dispatcher_, rlqs_stream_)); @@ -1101,48 +1297,157 @@ TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithAbandonAction) { rlqs_stream_->startGrpcStream(); // Build the response. + envoy::service::rate_limit_quota::v3::RateLimitQuotaResponse rlqs_response; + absl::flat_hash_map custom_headers_cpy = custom_headers; + custom_headers_cpy.insert({"name", "prod"}); auto* bucket_action = rlqs_response.add_bucket_action(); for (const auto& [key, value] : custom_headers_cpy) { (*bucket_action->mutable_bucket_id()->mutable_bucket()).insert({key, value}); + auto* quota_assignment = bucket_action->mutable_quota_assignment_action(); + quota_assignment->mutable_assignment_time_to_live()->set_seconds(15); + auto* strategy = quota_assignment->mutable_rate_limit_strategy(); + strategy->set_blanket_rule(envoy::type::v3::RateLimitStrategy::DENY_ALL); } - // Set up the abandon action. - bucket_action->mutable_abandon_action(); - } else { - // 2nd request will still send report to RLQS server as the previous abandon - // action has removed the cache entry. but it won't start gRPC stream - // again since it is kept open. + // Send the response from RLQS server. + rlqs_stream_->sendGrpcMessage(rlqs_response); + absl::SleepFor(absl::Seconds(1)); + } + } else { + ASSERT_TRUE(response_->waitForEndStream(std::chrono::seconds(5))); + EXPECT_TRUE(response_->complete()); + EXPECT_EQ(response_->headers().getStatusValue(), "429"); + } + + cleanUp(); + } +} + +TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithExpirationWithoutFallback) { + ConfigOption option; + option.no_assignment_blanket_rule = BlanketRule::ALLOW_ALL; + initializeConfig(option); + HttpIntegrationTest::initialize(); + absl::flat_hash_map custom_headers = {{"environment", "staging"}, + {"group", "envoy"}}; + + for (int i = 0; i < 3; ++i) { + // Advance the time to make cached assignment expired. + if (i > 0) { + simTime().advanceTimeWait(std::chrono::seconds(15)); + } + // Send downstream client request to upstream. + sendClientRequest(&custom_headers); + + // Query 1: ALLOW_ALL by default. Query 2: DENY_ALL by assignment. + // Query 3: DENY_ALL by assignment expiration. Query 4: ALLOW_ALL by default. + if (i == 0 || i == 2) { + // Handle the request received by upstream. + ASSERT_TRUE( + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(100, true); + + // Verify the response to downstream. + ASSERT_TRUE(response_->waitForEndStream()); + EXPECT_TRUE(response_->complete()); + EXPECT_EQ(response_->headers().getStatusValue(), "200"); + + if (i == 0) { + // Start the gRPC stream to RLQS server & send the initial report. + ASSERT_TRUE(grpc_upstreams_[0]->waitForHttpConnection(*dispatcher_, rlqs_connection_)); + ASSERT_TRUE(rlqs_connection_->waitForNewStream(*dispatcher_, rlqs_stream_)); + envoy::service::rate_limit_quota::v3::RateLimitQuotaUsageReports reports; ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); + rlqs_stream_->startGrpcStream(); - // Build the rlqs server response. + // Build the response. + envoy::service::rate_limit_quota::v3::RateLimitQuotaResponse rlqs_response; + absl::flat_hash_map custom_headers_cpy = custom_headers; + custom_headers_cpy.insert({"name", "prod"}); auto* bucket_action = rlqs_response.add_bucket_action(); for (const auto& [key, value] : custom_headers_cpy) { (*bucket_action->mutable_bucket_id()->mutable_bucket()).insert({key, value}); + auto* quota_assignment = bucket_action->mutable_quota_assignment_action(); + quota_assignment->mutable_assignment_time_to_live()->set_seconds(15); + auto* strategy = quota_assignment->mutable_rate_limit_strategy(); + strategy->set_blanket_rule(envoy::type::v3::RateLimitStrategy::DENY_ALL); } - } - // Send the response from RLQS server. - rlqs_stream_->sendGrpcMessage(rlqs_response); + // Send the response from RLQS server. + rlqs_stream_->sendGrpcMessage(rlqs_response); + absl::SleepFor(absl::Seconds(1)); + } + } else { + ASSERT_TRUE(response_->waitForEndStream(std::chrono::seconds(5))); + EXPECT_TRUE(response_->complete()); + EXPECT_EQ(response_->headers().getStatusValue(), "429"); } - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); - upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); - upstream_request_->encodeData(100, true); + cleanUp(); + } +} - // Verify the response to downstream. - ASSERT_TRUE(response_->waitForEndStream()); - EXPECT_TRUE(response_->complete()); - EXPECT_EQ(response_->headers().getStatusValue(), "200"); +TEST_P(RateLimitQuotaIntegrationTest, MultiSameRequestWithAbandonAction) { + initializeConfig(); + HttpIntegrationTest::initialize(); + absl::flat_hash_map custom_headers = {{"environment", "staging"}, + {"group", "envoy"}}; - // Clean up the upstream and downstream resource but keep the gRPC connection to RLQS server - // open. - cleanupUpstreamAndDownstream(); + absl::flat_hash_map custom_headers_cpy = custom_headers; + custom_headers_cpy.insert({"name", "prod"}); + + // Send first request & expect a new RLQS stream. + sendClientRequest(&custom_headers); + ASSERT_TRUE(grpc_upstreams_[0]->waitForHttpConnection(*dispatcher_, rlqs_connection_)); + ASSERT_TRUE(rlqs_connection_->waitForNewStream(*dispatcher_, rlqs_stream_)); + + // Expect an initial report. + envoy::service::rate_limit_quota::v3::RateLimitQuotaUsageReports reports; + ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); + rlqs_stream_->startGrpcStream(); + EXPECT_EQ(reports.bucket_quota_usages_size(), 1); + EXPECT_EQ(reports.bucket_quota_usages(0).num_requests_allowed(), 1); + EXPECT_EQ(reports.bucket_quota_usages(0).num_requests_denied(), 0); + + // Expect the first request to be allowed. + ASSERT_TRUE(expectAllowedRequest()); + + // Build an abandon-action response. + envoy::service::rate_limit_quota::v3::RateLimitQuotaResponse rlqs_response; + auto* bucket_action = rlqs_response.add_bucket_action(); + for (const auto& [key, value] : custom_headers_cpy) { + (*bucket_action->mutable_bucket_id()->mutable_bucket()).insert({key, value}); } + bucket_action->mutable_abandon_action(); + // Send the response from RLQS server. + rlqs_stream_->sendGrpcMessage(rlqs_response); + + // Expect the next report to be empty, since the cache entry was removed, but + // allow for retries in case the response hasn't processed yet. + bool empty_report = false; + for (int i = 0; i < 5 && !empty_report; ++i) { + simTime().advanceTimeWait(std::chrono::seconds(report_interval_sec)); + ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); + empty_report = reports.bucket_quota_usages().empty(); + } + ASSERT_TRUE(empty_report); + + // Send a second request & expect an immediate report for the new bucket, but + // no new stream. + sendClientRequest(&custom_headers); + ASSERT_TRUE(rlqs_stream_->waitForGrpcMessage(*dispatcher_, reports)); + EXPECT_EQ(reports.bucket_quota_usages_size(), 1); + EXPECT_EQ(reports.bucket_quota_usages(0).num_requests_allowed(), 1); + EXPECT_EQ(reports.bucket_quota_usages(0).num_requests_denied(), 0); + + // Expect the second request to be allowed. + ASSERT_TRUE(expectAllowedRequest()); } } // namespace diff --git a/test/extensions/http/header_validators/envoy_default/base_header_validator_test.cc b/test/extensions/http/header_validators/envoy_default/base_header_validator_test.cc index 77591cbe1f74..898abb35ec40 100644 --- a/test/extensions/http/header_validators/envoy_default/base_header_validator_test.cc +++ b/test/extensions/http/header_validators/envoy_default/base_header_validator_test.cc @@ -189,16 +189,33 @@ TEST_F(BaseHeaderValidatorTest, ValidateHostHeaderInvalidRegName) { TEST_F(BaseHeaderValidatorTest, ValidateHostHeaderValidIPv6) { HeaderString valid{"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:443"}; HeaderString valid_no_port{"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]"}; + HeaderString valid_double_colon_all_0{"[::]"}; + HeaderString valid_double_colon{"[2001::7334]"}; + HeaderString valid_double_colon_at_beginning{"[::2001:7334]"}; + HeaderString valid_double_colon_at_end{"[2001:7334::]"}; auto uhv = createBase(empty_config); EXPECT_ACCEPT(uhv->validateHostHeader(valid)); EXPECT_ACCEPT(uhv->validateHostHeader(valid_no_port)); + EXPECT_ACCEPT(uhv->validateHostHeader(valid_double_colon_all_0)); + EXPECT_ACCEPT(uhv->validateHostHeader(valid_double_colon)); + EXPECT_ACCEPT(uhv->validateHostHeader(valid_double_colon_at_beginning)); + EXPECT_ACCEPT(uhv->validateHostHeader(valid_double_colon_at_end)); } TEST_F(BaseHeaderValidatorTest, ValidateHostHeaderInvalidIPv6) { HeaderString invalid_missing_closing_bracket{"[2001:0db8:85a3:0000:0000:8a2e:0370:7334"}; HeaderString invalid_chars{"[200z:0db8:85a3:0000:0000:8a2e:0370:7334]"}; HeaderString invalid_no_brackets{"200z:0db8:85a3:0000:0000:8a2e:0370:7334"}; + HeaderString invalid_more_than_8_parts{"[2001:0db8:85a3:0000:0000:8a2e:0370:7334:1]:443"}; + HeaderString invalid_not_16_bits{"[1:1:20012:1:1:1:1:1]:443"}; + HeaderString invalid_2_double_colons{"[2::1::1]:443"}; + HeaderString invalid_2_double_colons_at_beginning{"[::1::1]:443"}; + HeaderString invalid_2_double_colons_at_end{"[1::1::]:443"}; + HeaderString invalid_2_double_colons_at_beginning_and_end{"[::1:1::]:443"}; + HeaderString invalid_3_colons{"[:::]:443"}; + HeaderString invalid_single_colon_at_end{"[1::1:]:443"}; + HeaderString invalid_single_colon_at_beginning{"[:1::1:2]:443"}; auto uhv = createBase(empty_config); EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_missing_closing_bracket), @@ -207,6 +224,24 @@ TEST_F(BaseHeaderValidatorTest, ValidateHostHeaderInvalidIPv6) { UhvResponseCodeDetail::get().InvalidHost); EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_no_brackets), UhvResponseCodeDetail::get().InvalidHost); + EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_more_than_8_parts), + UhvResponseCodeDetail::get().InvalidHost); + EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_not_16_bits), + UhvResponseCodeDetail::get().InvalidHost); + EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_2_double_colons), + UhvResponseCodeDetail::get().InvalidHost); + EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_2_double_colons_at_beginning), + UhvResponseCodeDetail::get().InvalidHost); + EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_2_double_colons_at_end), + UhvResponseCodeDetail::get().InvalidHost); + EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_2_double_colons_at_beginning_and_end), + UhvResponseCodeDetail::get().InvalidHost); + EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_3_colons), + UhvResponseCodeDetail::get().InvalidHost); + EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_single_colon_at_end), + UhvResponseCodeDetail::get().InvalidHost); + EXPECT_REJECT_WITH_DETAILS(uhv->validateHostHeader(invalid_single_colon_at_beginning), + UhvResponseCodeDetail::get().InvalidHost); } TEST_F(BaseHeaderValidatorTest, ValidateHostHeaderInvalidEmpty) { diff --git a/test/extensions/http/header_validators/envoy_default/http2_header_validator_test.cc b/test/extensions/http/header_validators/envoy_default/http2_header_validator_test.cc index 831f7c9ca529..27da09761507 100644 --- a/test/extensions/http/header_validators/envoy_default/http2_header_validator_test.cc +++ b/test/extensions/http/header_validators/envoy_default/http2_header_validator_test.cc @@ -438,15 +438,6 @@ TEST_F(Http2HeaderValidatorTest, ValidateResponseHeaderMapEmptyGenericName) { UhvResponseCodeDetail::get().EmptyHeaderName); } -TEST_F(Http2HeaderValidatorTest, ValidateRequestTrailersAllowUnderscoreHeadersByDefault) { - TestRequestTrailerMapImpl trailers{{"trailer1", "value1"}, {"x_foo", "bar"}}; - auto uhv = createH2ServerUhv(empty_config); - - EXPECT_ACCEPT(uhv->validateRequestTrailers(trailers)); - EXPECT_ACCEPT(uhv->transformRequestTrailers(trailers)); - EXPECT_EQ(trailers, TestRequestTrailerMapImpl({{"trailer1", "value1"}, {"x_foo", "bar"}})); -} - TEST_F(Http2HeaderValidatorTest, ValidateGenericHeaderNameRejectConnectionHeaders) { std::string connection_headers[] = {"transfer-encoding", "connection", "keep-alive", "upgrade", "proxy-connection"}; diff --git a/test/integration/BUILD b/test/integration/BUILD index fd0687cab3a2..fdfa59a6cf62 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -817,6 +817,7 @@ envoy_cc_test_library( "//test/integration/filters:metadata_control_filter_lib", "//test/integration/filters:random_pause_filter_lib", "//test/integration/filters:remove_response_headers_lib", + "//test/integration/filters:remove_response_trailers_lib", "//test/integration/filters:stop_iteration_headers_inject_body", "//test/test_common:logging_lib", "//test/test_common:threadsafe_singleton_injector_lib", diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index 83459f3c5970..dfebb8f118cf 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -871,6 +871,21 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "remove_response_trailers_lib", + srcs = [ + "remove_response_trailers.cc", + ], + deps = [ + ":common_lib", + "//envoy/http:filter_interface", + "//envoy/registry", + "//envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + ], +) + envoy_cc_test_library( name = "repick_cluster_filter_lib", srcs = [ diff --git a/test/integration/filters/remove_response_trailers.cc b/test/integration/filters/remove_response_trailers.cc new file mode 100644 index 000000000000..780d016b4fb4 --- /dev/null +++ b/test/integration/filters/remove_response_trailers.cc @@ -0,0 +1,38 @@ +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "source/extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" +#include "test/integration/filters/common.h" + +namespace Envoy { + +// Registers a filter which removes all trailers, leaving an empty trailers block. +// This resembles the behavior of grpc_web_filter, so we can verify that the codecs +// do the right thing when that happens. +class RemoveResponseTrailersFilter : public Http::PassThroughFilter { +public: + constexpr static char name[] = "remove-response-trailers-filter"; + Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override { + std::vector keys; + trailers.iterate([&keys](const Http::HeaderEntry& trailer) -> Http::HeaderMap::Iterate { + keys.push_back(std::string(trailer.key().getStringView())); + return Http::HeaderMap::Iterate::Continue; + }); + for (auto& k : keys) { + const Http::LowerCaseString lower_key{k}; + trailers.remove(lower_key); + } + return Http::FilterTrailersStatus::Continue; + } +}; + +static Registry::RegisterFactory, + Server::Configuration::NamedHttpFilterConfigFactory> + register_; +static Registry::RegisterFactory, + Server::Configuration::UpstreamHttpFilterConfigFactory> + register_upstream_; + +} // namespace Envoy diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 1c063b008c70..3625b0b58d51 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1718,6 +1718,35 @@ TEST_P(ProtocolIntegrationTest, MaxStreamDurationWithRetryPolicyWhenRetryUpstrea EXPECT_EQ("408", response->headers().getStatusValue()); } +// Verify that empty trailers are not sent as trailers. +TEST_P(DownstreamProtocolIntegrationTest, EmptyTrailersAreNotEncoded) { + config_helper_.addConfigModifier(setEnableDownstreamTrailersHttp1()); + config_helper_.addConfigModifier(setEnableUpstreamTrailersHttp1()); + config_helper_.prependFilter(R"EOF( +name: remove-response-trailers-filter +)EOF"); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + auto encoder_decoder = + codec_client_->startRequest(Http::TestRequestHeaderMapImpl{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "sni.lyft.com"}}); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + codec_client_->sendData(*request_encoder_, 1, true); + waitForNextUpstreamRequest(); + + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData("b", false); + Http::TestResponseTrailerMapImpl removed_trailers{{"some-trailer", "removed-by-filter"}}; + upstream_request_->encodeTrailers(removed_trailers); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); + EXPECT_THAT(response->trailers(), testing::IsNull()); +} + // Verify that headers with underscores in their names are dropped from client requests // but remain in upstream responses. TEST_P(ProtocolIntegrationTest, HeadersWithUnderscoresDropped) { @@ -5096,12 +5125,13 @@ TEST_P(ProtocolIntegrationTest, ServerHalfCloseBeforeClientWithErrorAndBufferedR } else if (downstreamProtocol() == Http::CodecType::HTTP2) { ASSERT_TRUE(response->waitForReset()); } else if (downstreamProtocol() == Http::CodecType::HTTP3) { - // Unlike H/2 codec H/3 codec attempts to send pending data before the reset - // So it needs window to push the data to the client and then the reset - // which just gets discarded since end_stream has been received just before - // reset. + // Unlike H/2, H/3 client codec only stops sending request upon STOP_SENDING frame but still + // attempts to finish receiving response. So resume reading in order to fully close the + // stream after receiving both STOP_SENDING and end stream. request_encoder_->getStream().readDisable(false); ASSERT_TRUE(response->waitForEndStream()); + // Following STOP_SENDING will be propagated via reset callback. + ASSERT_TRUE(response->waitForReset()); } } else if (fake_upstreams_[0]->httpType() == Http::CodecType::HTTP2 || fake_upstreams_[0]->httpType() == Http::CodecType::HTTP3) { diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index c28b810f5cb2..0478224d73a8 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -982,10 +982,19 @@ TEST_P(TcpTunnelingIntegrationTest, UpstreamHttpFiltersPauseAndResume) { // Send upgrade headers downstream, fully establishing the connection. upstream_request_->encodeHeaders(default_response_headers_, false); + bool verify_no_remote_close = true; + if (upstreamProtocol() == Http::CodecType::HTTP1) { + // in HTTP1 case, the connection is closed on stream reset and therefore, it + // is possible to detect a remote close if remote FIN event gets processed before local close + // socket event. By sending verify_no_remote_close as false to the write function, we are + // allowing the test to pass even if remote close is detected. + verify_no_remote_close = false; + } // send some data to pause the filter - ASSERT_TRUE(tcp_client_->write("hello", false)); + ASSERT_TRUE(tcp_client_->write("hello", false, verify_no_remote_close)); // send end stream to resume the filter - ASSERT_TRUE(tcp_client_->write("hello", true)); + ASSERT_TRUE(tcp_client_->write("hello", true, verify_no_remote_close)); + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 10)); // Finally close and clean up. diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index 4852b6099686..97e88b127446 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -36,9 +36,9 @@ aio-api-nist==0.0.4 \ --hash=sha256:1f2909d60ed4fdb3a3ffc37ad6012666f34078b71648394be91f5e67bbf8b6ca \ --hash=sha256:c948ee597b9e7cda7982e17bc4aca509b8aa68510899b42e2d382c10fb0d6f89 # via envoy-dependency-check -aio-core==0.10.2 \ - --hash=sha256:2274534e5a06acb4c4a238269eb79f4e9b70f54e20c0aa75c357244786ec90c4 \ - --hash=sha256:8bdc8795dccb52003045f38f718c7a9be52413c2923c2ce96821b9272b7e3646 +aio-core==0.10.3 \ + --hash=sha256:9f5e87347e396a00829e83bbee3769fccdc3ead24522c429e7dfe53d67a50e07 \ + --hash=sha256:b51d32426247f3c265015c5c4b776f9feef15e01c28ca4e868c2ba7b67c556cc # via # -r requirements.in # aio-api-bazel @@ -258,74 +258,74 @@ certifi==2024.7.4 \ # via # aioquic # requests -cffi==1.17.0 \ - --hash=sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f \ - --hash=sha256:0a048d4f6630113e54bb4b77e315e1ba32a5a31512c31a273807d0027a7e69ab \ - --hash=sha256:0bb15e7acf8ab35ca8b24b90af52c8b391690ef5c4aec3d31f38f0d37d2cc499 \ - --hash=sha256:0d46ee4764b88b91f16661a8befc6bfb24806d885e27436fdc292ed7e6f6d058 \ - --hash=sha256:0e60821d312f99d3e1569202518dddf10ae547e799d75aef3bca3a2d9e8ee693 \ - --hash=sha256:0fdacad9e0d9fc23e519efd5ea24a70348305e8d7d85ecbb1a5fa66dc834e7fb \ - --hash=sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377 \ - --hash=sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885 \ - --hash=sha256:1a2ddbac59dc3716bc79f27906c010406155031a1c801410f1bafff17ea304d2 \ - --hash=sha256:2404f3de742f47cb62d023f0ba7c5a916c9c653d5b368cc966382ae4e57da401 \ - --hash=sha256:24658baf6224d8f280e827f0a50c46ad819ec8ba380a42448e24459daf809cf4 \ - --hash=sha256:24aa705a5f5bd3a8bcfa4d123f03413de5d86e497435693b638cbffb7d5d8a1b \ - --hash=sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59 \ - --hash=sha256:331ad15c39c9fe9186ceaf87203a9ecf5ae0ba2538c9e898e3a6967e8ad3db6f \ - --hash=sha256:3aa9d43b02a0c681f0bfbc12d476d47b2b2b6a3f9287f11ee42989a268a1833c \ - --hash=sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555 \ - --hash=sha256:4304d4416ff032ed50ad6bb87416d802e67139e31c0bde4628f36a47a3164bfa \ - --hash=sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424 \ - --hash=sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb \ - --hash=sha256:48b389b1fd5144603d61d752afd7167dfd205973a43151ae5045b35793232aa2 \ - --hash=sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8 \ - --hash=sha256:516a405f174fd3b88829eabfe4bb296ac602d6a0f68e0d64d5ac9456194a5b7e \ - --hash=sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9 \ - --hash=sha256:5bdc0f1f610d067c70aa3737ed06e2726fd9d6f7bfee4a351f4c40b6831f4e82 \ - --hash=sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828 \ - --hash=sha256:6327b572f5770293fc062a7ec04160e89741e8552bf1c358d1a23eba68166759 \ - --hash=sha256:669b29a9eca6146465cc574659058ed949748f0809a2582d1f1a324eb91054dc \ - --hash=sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118 \ - --hash=sha256:6d872186c1617d143969defeadac5a904e6e374183e07977eedef9c07c8953bf \ - --hash=sha256:6f76a90c345796c01d85e6332e81cab6d70de83b829cf1d9762d0a3da59c7932 \ - --hash=sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a \ - --hash=sha256:7cbc78dc018596315d4e7841c8c3a7ae31cc4d638c9b627f87d52e8abaaf2d29 \ - --hash=sha256:856bf0924d24e7f93b8aee12a3a1095c34085600aa805693fb7f5d1962393206 \ - --hash=sha256:8a98748ed1a1df4ee1d6f927e151ed6c1a09d5ec21684de879c7ea6aa96f58f2 \ - --hash=sha256:93a7350f6706b31f457c1457d3a3259ff9071a66f312ae64dc024f049055f72c \ - --hash=sha256:964823b2fc77b55355999ade496c54dde161c621cb1f6eac61dc30ed1b63cd4c \ - --hash=sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0 \ - --hash=sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a \ - --hash=sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195 \ - --hash=sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6 \ - --hash=sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9 \ - --hash=sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc \ - --hash=sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb \ - --hash=sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0 \ - --hash=sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7 \ - --hash=sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb \ - --hash=sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a \ - --hash=sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492 \ - --hash=sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720 \ - --hash=sha256:c815270206f983309915a6844fe994b2fa47e5d05c4c4cef267c3b30e34dbe42 \ - --hash=sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7 \ - --hash=sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d \ - --hash=sha256:d6bdcd415ba87846fd317bee0774e412e8792832e7805938987e4ede1d13046d \ - --hash=sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb \ - --hash=sha256:dbc183e7bef690c9abe5ea67b7b60fdbca81aa8da43468287dae7b5c046107d4 \ - --hash=sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2 \ - --hash=sha256:dec6b307ce928e8e112a6bb9921a1cb00a0e14979bf28b98e084a4b8a742bd9b \ - --hash=sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8 \ - --hash=sha256:e4094c7b464cf0a858e75cd14b03509e84789abf7b79f8537e6a72152109c76e \ - --hash=sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204 \ - --hash=sha256:eb09b82377233b902d4c3fbeeb7ad731cdab579c6c6fda1f763cd779139e47c3 \ - --hash=sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150 \ - --hash=sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4 \ - --hash=sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76 \ - --hash=sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e \ - --hash=sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb \ - --hash=sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91 +cffi==1.17.1 \ + --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ + --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \ + --hash=sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1 \ + --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \ + --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \ + --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \ + --hash=sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8 \ + --hash=sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36 \ + --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \ + --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \ + --hash=sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc \ + --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \ + --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \ + --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \ + --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \ + --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \ + --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \ + --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \ + --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \ + --hash=sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b \ + --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \ + --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \ + --hash=sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c \ + --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \ + --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \ + --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \ + --hash=sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8 \ + --hash=sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1 \ + --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \ + --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \ + --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \ + --hash=sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595 \ + --hash=sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0 \ + --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \ + --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \ + --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \ + --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \ + --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \ + --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \ + --hash=sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16 \ + --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \ + --hash=sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e \ + --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \ + --hash=sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964 \ + --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \ + --hash=sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576 \ + --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \ + --hash=sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3 \ + --hash=sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662 \ + --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \ + --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \ + --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \ + --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \ + --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \ + --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \ + --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \ + --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \ + --hash=sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9 \ + --hash=sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7 \ + --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \ + --hash=sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a \ + --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \ + --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \ + --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \ + --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \ + --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ + --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b # via # -r requirements.in # cryptography @@ -1516,99 +1516,99 @@ yapf==0.40.2 \ # via # -r requirements.in # envoy-code-check -yarl==1.9.7 \ - --hash=sha256:03e917cc44a01e1be60a83ee1a17550b929490aaa5df2a109adc02137bddf06b \ - --hash=sha256:050f3e4d886be55728fef268587d061c5ce6f79a82baba71840801b63441c301 \ - --hash=sha256:0a1b8fd849567be56342e988e72c9d28bd3c77b9296c38b9b42d2fe4813c9d3f \ - --hash=sha256:0d8cf3d0b67996edc11957aece3fbce4c224d0451c7c3d6154ec3a35d0e55f6b \ - --hash=sha256:0fdb156a06208fc9645ae7cc0fca45c40dd40d7a8c4db626e542525489ca81a9 \ - --hash=sha256:10452727843bc847596b75e30a7fe92d91829f60747301d1bd60363366776b0b \ - --hash=sha256:1787dcfdbe730207acb454548a6e19f80ae75e6d2d1f531c5a777bc1ab6f7952 \ - --hash=sha256:1cd450e10cb53d63962757c3f6f7870be49a3e448c46621d6bd46f8088d532de \ - --hash=sha256:1d5594512541e63188fea640b7f066c218d2176203d6e6f82abf702ae3dca3b2 \ - --hash=sha256:1fc728857df4087da6544fc68f62d7017fa68d74201d5b878e18ed4822c31fb3 \ - --hash=sha256:23404842228e6fa8ace235024519df37f3f8e173620407644d40ddca571ff0f4 \ - --hash=sha256:25508739e9b44d251172145f54c084b71747b09e4d237dc2abb045f46c36a66e \ - --hash=sha256:29c80890e0a64fb0e5f71350d48da330995073881f8b8e623154aef631febfb0 \ - --hash=sha256:2d71a5d818d82586ac46265ae01466e0bda0638760f18b21f1174e0dd58a9d2f \ - --hash=sha256:2ead2f87a1174963cc406d18ac93d731fbb190633d3995fa052d10cefae69ed8 \ - --hash=sha256:316c82b499b6df41444db5dea26ee23ece9356e38cea43a8b2af9e6d8a3558e4 \ - --hash=sha256:34736fcc9d6d7080ebbeb0998ecb91e4f14ad8f18648cf0b3099e2420a225d86 \ - --hash=sha256:36b16884336c15adf79a4bf1d592e0c1ffdb036a760e36a1361565b66785ec6c \ - --hash=sha256:395ab0d8ce6d104a988da429bcbfd445e03fb4c911148dfd523f69d13f772e47 \ - --hash=sha256:3a7748cd66fef49c877e59503e0cc76179caf1158d1080228e67e1db14554f08 \ - --hash=sha256:3dba2ebac677184d56374fa3e452b461f5d6a03aa132745e648ae8859361eb6b \ - --hash=sha256:3f53df493ec80b76969d6e1ae6e4411a55ab1360e02b80c84bd4b33d61a567ba \ - --hash=sha256:4052dbd0c900bece330e3071c636f99dff06e4628461a29b38c6e222a427cf98 \ - --hash=sha256:48ce93947554c2c85fe97fc4866646ec90840bc1162e4db349b37d692a811755 \ - --hash=sha256:48f7a158f3ca67509d21cb02a96964e4798b6f133691cc0c86cf36e26e26ec8f \ - --hash=sha256:49827dfccbd59c4499605c13805e947349295466e490860a855b7c7e82ec9c75 \ - --hash=sha256:49935cc51d272264358962d050d726c3e5603a616f53e52ea88e9df1728aa2ee \ - --hash=sha256:4a6fa3aeca8efabb0fbbb3b15e0956b0cb77f7d9db67c107503c30af07cd9e00 \ - --hash=sha256:4db97210433366dfba55590e48285b89ad0146c52bf248dd0da492dd9f0f72cf \ - --hash=sha256:522fa3d300d898402ae4e0fa7c2c21311248ca43827dc362a667de87fdb4f1be \ - --hash=sha256:58e3f01673873b8573da3abe138debc63e4e68541b2104a55df4c10c129513a4 \ - --hash=sha256:596069ddeaf72b5eb36cd714dcd2b5751d0090d05a8d65113b582ed9e1c801fb \ - --hash=sha256:5d585c7d834c13f24c7e3e0efaf1a4b7678866940802e11bd6c4d1f99c935e6b \ - --hash=sha256:5e338b6febbae6c9fe86924bac3ea9c1944e33255c249543cd82a4af6df6047b \ - --hash=sha256:60c04415b31a1611ef5989a6084dd6f6b95652c6a18378b58985667b65b2ecb6 \ - --hash=sha256:60f3b5aec3146b6992640592856414870f5b20eb688c1f1d5f7ac010a7f86561 \ - --hash=sha256:62440431741d0b7d410e5cbad800885e3289048140a43390ecab4f0b96dde3bb \ - --hash=sha256:628619008680a11d07243391271b46f07f13b75deb9fe92ef342305058c70722 \ - --hash=sha256:62e110772330d7116f91e79cd83fef92545cb2f36414c95881477aa01971f75f \ - --hash=sha256:653597b615809f2e5f4dba6cd805608b6fd3597128361a22cc612cf7c7a4d1bf \ - --hash=sha256:65e3098969baf221bb45e3b2f60735fc2b154fc95902131ebc604bae4c629ea6 \ - --hash=sha256:6639444d161c693cdabb073baaed1945c717d3982ecedf23a219bc55a242e728 \ - --hash=sha256:71bb1435a84688ed831220c5305d96161beb65cac4a966374475348aa3de4575 \ - --hash=sha256:71d33fd1c219b5b28ee98cd76da0c9398a4ed4792fd75c94135237db05ba5ca8 \ - --hash=sha256:74d3ef5e81f81507cea04bf5ae22f18ef538607a7c754aac2b6e3029956a2842 \ - --hash=sha256:78250f635f221dde97d02c57aade3313310469bc291888dfe32acd1012594441 \ - --hash=sha256:78805148e780a9ca66f3123e04741e344b66cf06b4fb13223e3a209f39a6da55 \ - --hash=sha256:7ab906a956d2109c6ea11e24c66592b06336e2743509290117f0f7f47d2c1dd3 \ - --hash=sha256:7fc441408ed0d9c6d2d627a02e281c21f5de43eb5209c16636a17fc704f7d0f8 \ - --hash=sha256:808eddabcb6f7b2cdb6929b3e021ac824a2c07dc7bc83f7618e18438b1b65781 \ - --hash=sha256:8525f955a2dcc281573b6aadeb8ab9c37e2d3428b64ca6a2feec2a794a69c1da \ - --hash=sha256:867b13c1b361f9ba5d2f84dc5408082f5d744c83f66de45edc2b96793a9c5e48 \ - --hash=sha256:87aa5308482f248f8c3bd9311cd6c7dfd98ea1a8e57e35fb11e4adcac3066003 \ - --hash=sha256:8af0bbd4d84f8abdd9b11be9488e32c76b1501889b73c9e2292a15fb925b378b \ - --hash=sha256:8e8916b1ff7680b1f2b1608c82dc15c569b9f2cb2da100c747c291f1acf18a14 \ - --hash=sha256:91567ff4fce73d2e7ac67ed5983ad26ba2343bc28cb22e1e1184a9677df98d7c \ - --hash=sha256:9163d21aa40ff8528db2aee2b0b6752efe098055b41ab8e5422b2098457199fe \ - --hash=sha256:9c2743e43183e4afbb07d5605693299b8756baff0b086c25236c761feb0e3c56 \ - --hash=sha256:9d319ac113ca47352319cbea92d1925a37cb7bd61a8c2f3e3cd2e96eb33cccae \ - --hash=sha256:a48d2b9f0ae29a456fb766ae461691378ecc6cf159dd9f938507d925607591c3 \ - --hash=sha256:a564155cc2194ecd9c0d8f8dc57059b822a507de5f08120063675eb9540576aa \ - --hash=sha256:a95167ae34667c5cc7d9206c024f793e8ffbadfb307d5c059de470345de58a21 \ - --hash=sha256:a9552367dc440870556da47bb289a806f08ad06fbc4054072d193d9e5dd619ba \ - --hash=sha256:a99cecfb51c84d00132db909e83ae388793ca86e48df7ae57f1be0beab0dcce5 \ - --hash=sha256:b1557456afce5db3d655b5f8a31cdcaae1f47e57958760525c44b76e812b4987 \ - --hash=sha256:bc23d870864971c8455cfba17498ccefa53a5719ea9f5fce5e7e9c1606b5755f \ - --hash=sha256:bc9233638b07c2e4a3a14bef70f53983389bffa9e8cb90a2da3f67ac9c5e1842 \ - --hash=sha256:c81c28221a85add23a0922a6aeb2cdda7f9723e03e2dfae06fee5c57fe684262 \ - --hash=sha256:ca5e86be84492fa403c4dcd4dcaf8e1b1c4ffc747b5176f7c3d09878c45719b0 \ - --hash=sha256:cb870907e8b86b2f32541403da9455afc1e535ce483e579bea0e6e79a0cc751c \ - --hash=sha256:cddebd096effe4be90fd378e4224cd575ac99e1c521598a6900e94959006e02e \ - --hash=sha256:cf37dd0008e5ac5c3880198976063c491b6a15b288d150d12833248cf2003acb \ - --hash=sha256:cf85599c9336b89b92c313519bcaa223d92fa5d98feb4935a47cce2e8722b4b8 \ - --hash=sha256:d06d6a8f98dd87646d98f0c468be14b201e47ec6092ad569adf835810ad0dffb \ - --hash=sha256:d0aabe557446aa615693a82b4d3803c102fd0e7a6a503bf93d744d182a510184 \ - --hash=sha256:d35f9cdab0ec5e20cf6d2bd46456cf599052cf49a1698ef06b9592238d1cf1b1 \ - --hash=sha256:d8ad761493d5aaa7ab2a09736e62b8a220cb0b10ff8ccf6968c861cd8718b915 \ - --hash=sha256:daa69a3a2204355af39f4cfe7f3870d87c53d77a597b5100b97e3faa9460428b \ - --hash=sha256:dd08da4f2d171e19bd02083c921f1bef89f8f5f87000d0ffc49aa257bc5a9802 \ - --hash=sha256:df47612129e66f7ce7c9994d4cd4e6852f6e3bf97699375d86991481796eeec8 \ - --hash=sha256:e649d37d04665dddb90994bbf0034331b6c14144cc6f3fbce400dc5f28dc05b7 \ - --hash=sha256:e7f9cabfb8b980791b97a3ae3eab2e38b2ba5eab1af9b7495bdc44e1ce7c89e3 \ - --hash=sha256:e8362c941e07fbcde851597672a5e41b21dc292b7d5a1dc439b7a93c9a1af5d9 \ - --hash=sha256:eefda67ba0ba44ab781e34843c266a76f718772b348f7c5d798d8ea55b95517f \ - --hash=sha256:f28e602edeeec01fc96daf7728e8052bc2e12a672e2a138561a1ebaf30fd9df7 \ - --hash=sha256:f3aaf9fa960d55bd7876d55d7ea3cc046f3660df1ff73fc1b8c520a741ed1f21 \ - --hash=sha256:f5ddad20363f9f1bbedc95789c897da62f939e6bc855793c3060ef8b9f9407bf \ - --hash=sha256:f6b8bbdd425d0978311520ea99fb6c0e9e04e64aee84fac05f3157ace9f81b05 \ - --hash=sha256:f87d8645a7a806ec8f66aac5e3b1dcb5014849ff53ffe2a1f0b86ca813f534c7 \ - --hash=sha256:f9d715b2175dff9a49c6dafdc2ab3f04850ba2f3d4a77f69a5a1786b057a9d45 \ - --hash=sha256:fcd3d94b848cba132f39a5b40d80b0847d001a91a6f35a2204505cdd46afe1b2 \ - --hash=sha256:ff03f1c1ac474c66d474929ae7e4dd195592c1c7cc8c36418528ed81b1ca0a79 +yarl==1.9.11 \ + --hash=sha256:0324506afab4f2e176a93cb08b8abcb8b009e1f324e6cbced999a8f5dd9ddb76 \ + --hash=sha256:0a205ec6349879f5e75dddfb63e069a24f726df5330b92ce76c4752a436aac01 \ + --hash=sha256:0b0c70c451d2a86f8408abced5b7498423e2487543acf6fcf618b03f6e669b0a \ + --hash=sha256:0b2a8e5eb18181060197e3d5db7e78f818432725c0759bc1e5a9d603d9246389 \ + --hash=sha256:0cbcc2c54084b2bda4109415631db017cf2960f74f9e8fd1698e1400e4f8aae2 \ + --hash=sha256:17107b4b8c43e66befdcbe543fff2f9c93f7a3a9f8e3a9c9ac42bffeba0e8828 \ + --hash=sha256:1c82126817492bb2ebc946e74af1ffa10aacaca81bee360858477f96124be39a \ + --hash=sha256:1cdb8f5bb0534986776a43df84031da7ff04ac0cf87cb22ae8a6368231949c40 \ + --hash=sha256:21e56c30e39a1833e4e3fd0112dde98c2abcbc4c39b077e6105c76bb63d2aa04 \ + --hash=sha256:224f8186c220ff00079e64bf193909829144d4e5174bb58665ef0da8bf6955c4 \ + --hash=sha256:2d1c81c3b92bef0c1c180048e43a5a85754a61b4f69d6f84df8e4bd615bef25d \ + --hash=sha256:30f201bc65941a4aa59c1236783efe89049ec5549dafc8cd2b63cc179d3767b0 \ + --hash=sha256:3a26a24bbd19241283d601173cea1e5b93dec361a223394e18a1e8e5b0ef20bd \ + --hash=sha256:3fcd056cb7dff3aea5b1ee1b425b0fbaa2fbf6a1c6003e88caf524f01de5f395 \ + --hash=sha256:441049d3a449fb8756b0535be72c6a1a532938a33e1cf03523076700a5f87a01 \ + --hash=sha256:4567cc08f479ad80fb07ed0c9e1bcb363a4f6e3483a490a39d57d1419bf1c4c7 \ + --hash=sha256:475e09a67f8b09720192a170ad9021b7abf7827ffd4f3a83826317a705be06b7 \ + --hash=sha256:47c0a3dc8076a8dd159de10628dea04215bc7ddaa46c5775bf96066a0a18f82b \ + --hash=sha256:4915818ac850c3b0413e953af34398775b7a337babe1e4d15f68c8f5c4872553 \ + --hash=sha256:498439af143b43a2b2314451ffd0295410aa0dcbdac5ee18fc8633da4670b605 \ + --hash=sha256:4ae079573efeaa54e5978ce86b77f4175cd32f42afcaf9bfb8a0677e91f84e4e \ + --hash=sha256:4d368e3b9ecd50fa22017a20c49e356471af6ae91c4d788c6e9297e25ddf5a62 \ + --hash=sha256:4e4f820fde9437bb47297194f43d29086433e6467fa28fe9876366ad357bd7bb \ + --hash=sha256:504d19320c92532cabc3495fb7ed6bb599f3c2bfb45fed432049bf4693dbd6d0 \ + --hash=sha256:51a6f770ac86477cd5c553f88a77a06fe1f6f3b643b053fcc7902ab55d6cbe14 \ + --hash=sha256:545f2fbfa0c723b446e9298b5beba0999ff82ce2c126110759e8dac29b5deaf4 \ + --hash=sha256:54cc24be98d7f4ff355ca2e725a577e19909788c0db6beead67a0dda70bd3f82 \ + --hash=sha256:55a67dd29367ce7c08a0541bb602ec0a2c10d46c86b94830a1a665f7fd093dfa \ + --hash=sha256:569309a3efb8369ff5d32edb2a0520ebaf810c3059f11d34477418c90aa878fd \ + --hash=sha256:58081cea14b8feda57c7ce447520e9d0a96c4d010cce54373d789c13242d7083 \ + --hash=sha256:5b593acd45cdd4cf6664d342ceacedf25cd95263b83b964fddd6c78930ea5211 \ + --hash=sha256:5c23f6dc3d7126b4c64b80aa186ac2bb65ab104a8372c4454e462fb074197bc6 \ + --hash=sha256:614fa50fd0db41b79f426939a413d216cdc7bab8d8c8a25844798d286a999c5a \ + --hash=sha256:61ec0e80970b21a8f3c4b97fa6c6d181c6c6a135dbc7b4a601a78add3feeb209 \ + --hash=sha256:63a5dc2866791236779d99d7a422611d22bb3a3d50935bafa4e017ea13e51469 \ + --hash=sha256:675004040f847c0284827f44a1fa92d8baf425632cc93e7e0aa38408774b07c1 \ + --hash=sha256:67abcb7df27952864440c9c85f1c549a4ad94afe44e2655f77d74b0d25895454 \ + --hash=sha256:6de3fa29e76fd1518a80e6af4902c44f3b1b4d7fed28eb06913bba4727443de3 \ + --hash=sha256:6ff184002ee72e4b247240e35d5dce4c2d9a0e81fdbef715dde79ab4718aa541 \ + --hash=sha256:70194da6e99713250aa3f335a7fa246b36adf53672a2bcd0ddaa375d04e53dc0 \ + --hash=sha256:7230007ab67d43cf19200ec15bc6b654e6b85c402f545a6fc565d254d34ff754 \ + --hash=sha256:735b285ea46ca7e86ad261a462a071d0968aade44e1a3ea2b7d4f3d63b5aab12 \ + --hash=sha256:752c0d33b4aacdb147871d0754b88f53922c6dc2aff033096516b3d5f0c02a0f \ + --hash=sha256:752f4b5cf93268dc73c2ae994cc6d684b0dad5118bc87fbd965fd5d6dca20f45 \ + --hash=sha256:755ae9cff06c429632d750aa8206f08df2e3d422ca67be79567aadbe74ae64cc \ + --hash=sha256:79e08c691deae6fcac2fdde2e0515ac561dd3630d7c8adf7b1e786e22f1e193b \ + --hash=sha256:7d2dee7d6485807c0f64dd5eab9262b7c0b34f760e502243dd83ec09d647d5e1 \ + --hash=sha256:8503989860d7ac10c85cb5b607fec003a45049cf7a5b4b72451e87893c6bb990 \ + --hash=sha256:85333d38a4fa5997fa2ff6fd169be66626d814b34fa35ec669e8c914ca50a097 \ + --hash=sha256:8c2cf0c7ad745e1c6530fe6521dfb19ca43338239dfcc7da165d0ef2332c0882 \ + --hash=sha256:8d6e1c1562b53bd26efd38e886fc13863b8d904d559426777990171020c478a9 \ + --hash=sha256:8d7b717f77846a9631046899c6cc730ea469c0e2fb252ccff1cc119950dbc296 \ + --hash=sha256:8e8ed183c7a8f75e40068333fc185566472a8f6c77a750cf7541e11810576ea5 \ + --hash=sha256:9137975a4ccc163ad5d7a75aad966e6e4e95dedee08d7995eab896a639a0bce2 \ + --hash=sha256:91c478741d7563a12162f7a2db96c0d23d93b0521563f1f1f0ece46ea1702d33 \ + --hash=sha256:922ba3b74f0958a0b5b9c14ff1ef12714a381760c08018f2b9827632783a590c \ + --hash=sha256:94f71d54c5faf715e92c8434b4a0b968c4d1043469954d228fc031d51086f143 \ + --hash=sha256:95adc179a02949c4560ef40f8f650a008380766eb253d74232eb9c024747c111 \ + --hash=sha256:9636e4519f6c7558fdccf8f91e6e3b98df2340dc505c4cc3286986d33f2096c2 \ + --hash=sha256:9e290de5db4fd4859b4ed57cddfe793fcb218504e65781854a8ac283ab8d5518 \ + --hash=sha256:9fae7ec5c9a4fe22abb995804e6ce87067dfaf7e940272b79328ce37c8f22097 \ + --hash=sha256:a5706821e1cf3c70dfea223e4e0958ea354f4e2af9420a1bd45c6b547297fb97 \ + --hash=sha256:a744bdeda6c86cf3025c94eb0e01ccabe949cf385cd75b6576a3ac9669404b68 \ + --hash=sha256:aaeffcb84faceb2923a94a8a9aaa972745d3c728ab54dd011530cc30a3d5d0c1 \ + --hash=sha256:aeba4aaa59cb709edb824fa88a27cbbff4e0095aaf77212b652989276c493c00 \ + --hash=sha256:afcac5bda602b74ff701e1f683feccd8cce0d5a21dbc68db81bf9bd8fd93ba56 \ + --hash=sha256:b30703a7ade2b53f02e09a30685b70cd54f65ed314a8d9af08670c9a5391af1b \ + --hash=sha256:b3dfe17b4aed832c627319da22a33f27f282bd32633d6b145c726d519c89fbaf \ + --hash=sha256:b4a0e724a28d7447e4d549c8f40779f90e20147e94bf949d490402eee09845c6 \ + --hash=sha256:b8f847cc092c2b85d22e527f91ea83a6cf51533e727e2461557a47a859f96734 \ + --hash=sha256:c189bf01af155ac9882e128d9f3b3ad68a1f2c2f51404afad7201305df4e12b1 \ + --hash=sha256:c1db9a4384694b5d20bdd9cb53f033b0831ac816416ab176c8d0997835015d22 \ + --hash=sha256:c305c1bdf10869b5e51facf50bd5b15892884aeae81962ae4ba061fc11217103 \ + --hash=sha256:c335342d482e66254ae94b1231b1532790afb754f89e2e0c646f7f19d09740aa \ + --hash=sha256:c59b23886234abeba62087fd97d10fb6b905d9e36e2f3465d1886ce5c0ca30df \ + --hash=sha256:c5b7b307140231ea4f7aad5b69355aba2a67f2d7bc34271cffa3c9c324d35b27 \ + --hash=sha256:c6f6c87665a9e18a635f0545ea541d9640617832af2317d4f5ad389686b4ed3d \ + --hash=sha256:c7548a90cb72b67652e2cd6ae80e2683ee08fde663104528ac7df12d8ef271d2 \ + --hash=sha256:ca35996e0a4bed28fa0640d9512d37952f6b50dea583bcc167d4f0b1e112ac7f \ + --hash=sha256:cc295969f8c2172b5d013c0871dccfec7a0e1186cf961e7ea575d47b4d5cbd32 \ + --hash=sha256:ce2bd986b1e44528677c237b74d59f215c8bfcdf2d69442aa10f62fd6ab2951c \ + --hash=sha256:d65ad67f981e93ea11f87815f67d086c4f33da4800cf2106d650dd8a0b79dda4 \ + --hash=sha256:d93c612b2024ac25a3dc01341fd98fdd19c8c5e2011f3dcd084b3743cba8d756 \ + --hash=sha256:ddad5cfcda729e22422bb1c85520bdf2770ce6d975600573ac9017fe882f4b7e \ + --hash=sha256:dfa9b9d5c9c0dbe69670f5695264452f5e40947590ec3a38cfddc9640ae8ff89 \ + --hash=sha256:e4a8c3dedd081cca134a21179aebe58b6e426e8d1e0202da9d1cafa56e01af3c \ + --hash=sha256:e5f50a2e26cc2b89186f04c97e0ec0ba107ae41f1262ad16832d46849864f914 \ + --hash=sha256:e700eb26635ce665c018c8cfea058baff9b843ed0cc77aa61849d807bb82a64c \ + --hash=sha256:ef9610b2f5a73707d4d8bac040f0115ca848e510e3b1f45ca53e97f609b54130 \ + --hash=sha256:f568d70b7187f4002b6b500c0996c37674a25ce44b20716faebe5fdb8bd356e7 \ + --hash=sha256:fee45b3bd4d8d5786472e056aa1359cc4dc9da68aded95a10cd7929a0ec661fe \ + --hash=sha256:ff64f575d71eacb5a4d6f0696bfe991993d979423ea2241f23ab19ff63f0f9d1 # via # -r requirements.in # aiohttp @@ -1717,7 +1717,7 @@ zstandard==0.23.0 \ # via envoy-base-utils # The following packages are considered to be unsafe in a requirements file: -setuptools==74.0.0 \ - --hash=sha256:0274581a0037b638b9fc1c6883cc71c0210865aaa76073f7882376b641b84e8f \ - --hash=sha256:a85e96b8be2b906f3e3e789adec6a9323abf79758ecfa3065bd740d81158b11e +setuptools==74.1.2 \ + --hash=sha256:5f4c08aa4d3ebcb57a50c33b1b07e94315d7fc7230f7115e47fc99776c8ce308 \ + --hash=sha256:95b40ed940a1c67eb70fc099094bd6e99c6ee7c23aa2306f4d2697ba7916f9c6 # via -r requirements.in diff --git a/tools/code_format/check_format_test.sh b/tools/code_format/check_format_test.sh deleted file mode 100755 index 2d843c3c9214..000000000000 --- a/tools/code_format/check_format_test.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -tools="$(dirname "$(dirname "$(realpath "$0")")")" -root=$(realpath "$tools/..") -ci="${root}/ci" -export ci -cd "$root" || exit 1 -exec ./ci/run_envoy_docker.sh ./tools/code_format/check_format_test_helper.sh "$@" diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py deleted file mode 100755 index aeb6b70f25aa..000000000000 --- a/tools/code_format/check_format_test_helper.py +++ /dev/null @@ -1,335 +0,0 @@ -#!/usr/bin/env python3 - -# Tests check_format.py. This must be run in a context where the clang -# version and settings are compatible with the one in the Envoy -# docker. Normally this is run via check_format_test.sh, which -# executes it in under docker. - -from __future__ import print_function - -from run_command import run_command -import argparse -import logging -import os -import shutil -import sys -import tempfile - -curr_dir = os.path.dirname(os.path.realpath(__file__)) -tools = os.path.dirname(curr_dir) -src = os.path.join(tools, 'testdata', 'check_format') -check_format = f"{sys.executable} {os.path.join(curr_dir, 'check_format.py')}" -check_format_config = f"{os.path.join(curr_dir, 'config.yaml')}" -errors = 0 - - -# Runs the 'check_format' operation, on the specified file, printing -# the comamnd run and the status code as well as the stdout, and returning -# all of that to the caller. -def run_check_format(operation, filename): - command = f"{check_format} {operation} {filename} --config_path={check_format_config}" - status, stdout, stderr = run_command(command) - return (command, status, stdout + stderr) - - -def get_input_file(filename, extra_input_files=None): - files_to_copy = [filename] - if extra_input_files is not None: - files_to_copy.extend(extra_input_files) - for f in files_to_copy: - infile = os.path.join(src, f) - directory = os.path.dirname(f) - if not directory == '' and not os.path.isdir(directory): - os.makedirs(directory) - shutil.copyfile(infile, f) - return filename - - -# Attempts to fix file, returning a 4-tuple: the command, input file name, -# output filename, captured stdout as an array of lines, and the error status -# code. -def fix_file_helper(filename, extra_input_files=None): - command, status, stdout = run_check_format( - "fix", get_input_file(filename, extra_input_files=extra_input_files)) - infile = os.path.join(src, filename) - return command, infile, filename, status, stdout - - -# Attempts to fix a file, returning the status code and the generated output. -# If the fix was successful, the diff is returned as a string-array. If the file -# was not fixable, the error-messages are returned as a string-array. -def fix_file_expecting_success(file, extra_input_files=None): - command, infile, outfile, status, stdout = fix_file_helper( - file, extra_input_files=extra_input_files) - if status != 0: - print("FAILED: " + infile) - emit_stdout_as_error(stdout) - return 1 - status, stdout, stderr = run_command('diff ' + outfile + ' ' + infile + '.gold') - if status != 0: - print("FAILED: " + infile) - emit_stdout_as_error(stdout + stderr) - return 1 - return 0 - - -def fix_file_expecting_no_change(file): - command, infile, outfile, status, stdout = fix_file_helper(file) - if status != 0: - return 1 - status, stdout, stderr = run_command('diff ' + outfile + ' ' + infile) - if status != 0: - logging.error(file + ': expected file to remain unchanged') - return 1 - return 0 - - -def emit_stdout_as_error(stdout): - logging.error("\n".join(stdout)) - - -def expect_error(filename, status, stdout, expected_substring): - if status == 0: - logging.error("%s: Expected failure `%s`, but succeeded" % (filename, expected_substring)) - return 1 - for line in stdout: - if expected_substring in line: - return 0 - logging.error("%s: Could not find '%s' in:\n" % (filename, expected_substring)) - emit_stdout_as_error(stdout) - return 1 - - -def fix_file_expecting_failure(filename, expected_substring): - command, infile, outfile, status, stdout = fix_file_helper(filename) - return expect_error(filename, status, stdout, expected_substring) - - -def check_file_expecting_error(filename, expected_substring, extra_input_files=None): - command, status, stdout = run_check_format( - "check", get_input_file(filename, extra_input_files=extra_input_files)) - return expect_error(filename, status, stdout, expected_substring) - - -def check_and_fix_error(filename, expected_substring, extra_input_files=None): - errors = check_file_expecting_error( - filename, expected_substring, extra_input_files=extra_input_files) - errors += fix_file_expecting_success(filename, extra_input_files=extra_input_files) - return errors - - -def check_tool_not_found_error(): - # Temporarily change PATH to test the error about lack of external tools. - oldPath = os.environ["PATH"] - os.environ["PATH"] = "/sbin:/usr/sbin" - clang_format = os.getenv("CLANG_FORMAT", "clang-format") - # If CLANG_FORMAT points directly to the binary, skip this test. - if os.path.isfile(clang_format) and os.access(clang_format, os.X_OK): - os.environ["PATH"] = oldPath - return 0 - errors = check_file_expecting_error( - "no_namespace_envoy.cc", "Command %s not found." % clang_format) - os.environ["PATH"] = oldPath - return errors - - -def check_unfixable_error(filename, expected_substring): - errors = check_file_expecting_error(filename, expected_substring) - errors += fix_file_expecting_failure(filename, expected_substring) - return errors - - -def check_file_expecting_ok(filename): - command, status, stdout = run_check_format("check", get_input_file(filename)) - if status != 0: - logging.error("Expected %s to have no errors; status=%d, output:\n" % (filename, status)) - emit_stdout_as_error(stdout) - return status + fix_file_expecting_no_change(filename) - - -def run_checks(): - errors = 0 - - # The following error is the error about unavailability of external tools. - errors += check_tool_not_found_error() - - # The following errors can be detected but not fixed automatically. - errors += check_unfixable_error( - "no_namespace_envoy.cc", "Unable to find Envoy namespace or NOLINT(namespace-envoy)") - errors += check_unfixable_error("mutex.cc", "Don't use or ") - errors += check_unfixable_error( - "condition_variable.cc", "Don't use or ") - errors += check_unfixable_error( - "condition_variable_any.cc", "Don't use or ") - errors += check_unfixable_error("shared_mutex.cc", "shared_mutex") - errors += check_unfixable_error("shared_mutex.cc", "shared_mutex") - real_time_inject_error = ( - "Don't reference real-world time sources; use TimeSystem::advanceTime(Wait|Async)") - errors += check_unfixable_error("real_time_source.cc", real_time_inject_error) - errors += check_unfixable_error("real_time_system.cc", real_time_inject_error) - errors += check_unfixable_error( - "duration_value.cc", - "Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)" - ) - errors += check_unfixable_error("system_clock.cc", real_time_inject_error) - errors += check_unfixable_error("steady_clock.cc", real_time_inject_error) - errors += check_unfixable_error( - "unpack_to.cc", "Don't use UnpackTo() directly, use MessageUtil::unpackToNoThrow() instead") - errors += check_unfixable_error( - "condvar_wait_for.cc", "Don't use CondVar::waitFor(); use TimeSystem::waitFor() instead.") - errors += check_unfixable_error("sleep.cc", real_time_inject_error) - errors += check_unfixable_error("std_atomic_free_functions.cc", "std::atomic_*") - errors += check_unfixable_error("std_get_time.cc", "std::get_time") - errors += check_unfixable_error( - "no_namespace_envoy.cc", "Unable to find Envoy namespace or NOLINT(namespace-envoy)") - errors += check_unfixable_error("bazel_tools.BUILD", "unexpected @bazel_tools reference") - errors += check_unfixable_error( - "proto.BUILD", "unexpected direct external dependency on protobuf") - errors += check_unfixable_error( - "proto_deps.cc", "unexpected direct dependency on google.protobuf") - errors += check_unfixable_error("attribute_packed.cc", "Don't use __attribute__((packed))") - errors += check_unfixable_error( - "designated_initializers.cc", "Don't use designated initializers") - errors += check_unfixable_error("elvis_operator.cc", "Don't use the '?:' operator") - errors += check_unfixable_error( - "testing_test.cc", "Don't use 'using testing::Test;, elaborate the type instead") - errors += check_unfixable_error( - "serialize_as_string.cc", - "Don't use MessageLite::SerializeAsString for generating deterministic serialization") - errors += check_unfixable_error( - "counter_from_string.cc", - "Don't lookup stats by name at runtime; use StatName saved during construction") - errors += check_unfixable_error( - "gauge_from_string.cc", - "Don't lookup stats by name at runtime; use StatName saved during construction") - errors += check_unfixable_error( - "histogram_from_string.cc", - "Don't lookup stats by name at runtime; use StatName saved during construction") - errors += check_unfixable_error( - "regex.cc", "Don't use std::regex in code that handles untrusted input. Use RegexMatcher") - errors += check_unfixable_error( - "grpc_init.cc", - "Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. " - + "See #8282") - errors += check_unfixable_error( - "grpc_shutdown.cc", - "Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. " - + "See #8282") - errors += check_unfixable_error( - "source/raw_try.cc", - "Don't use raw try, use TRY_ASSERT_MAIN_THREAD if on the main thread otherwise don't use exceptions." - ) - errors += check_unfixable_error("clang_format_double_off.cc", "clang-format nested off") - errors += check_unfixable_error("clang_format_trailing_off.cc", "clang-format remains off") - errors += check_unfixable_error("clang_format_double_on.cc", "clang-format nested on") - errors += check_unfixable_error( - "proto_enum_mangling.cc", "Don't use mangled Protobuf names for enum constants") - errors += check_unfixable_error( - "test_naming.cc", "Test names should be CamelCase, starting with a capital letter") - errors += check_unfixable_error("mock_method_n.cc", "use MOCK_METHOD() instead") - errors += check_unfixable_error("for_each_n.cc", "use an alternative for loop instead") - errors += check_unfixable_error( - "test/register_factory.cc", - "Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, use " - "Registry::InjectFactory instead.") - errors += check_unfixable_error( - "strerror.cc", "Don't use strerror; use Envoy::errorDetails instead") - errors += check_unfixable_error( - "std_unordered_map.cc", "Don't use std::unordered_map; use absl::flat_hash_map instead " - + "or absl::node_hash_map if pointer stability of keys/values is required") - errors += check_unfixable_error( - "std_unordered_set.cc", "Don't use std::unordered_set; use absl::flat_hash_set instead " - + "or absl::node_hash_set if pointer stability of keys/values is required") - errors += check_unfixable_error("std_any.cc", "Don't use std::any; use absl::any instead") - errors += check_unfixable_error( - "std_get_if.cc", "Don't use std::get_if; use absl::get_if instead") - errors += check_unfixable_error( - "std_holds_alternative.cc", - "Don't use std::holds_alternative; use absl::holds_alternative instead") - errors += check_unfixable_error( - "std_make_optional.cc", "Don't use std::make_optional; use absl::make_optional instead") - errors += check_unfixable_error( - "std_monostate.cc", "Don't use std::monostate; use absl::monostate instead") - errors += check_unfixable_error( - "std_optional.cc", "Don't use std::optional; use absl::optional instead") - errors += check_unfixable_error( - "std_string_view.cc", - "Don't use std::string_view or toStdStringView; use absl::string_view instead") - errors += check_unfixable_error( - "std_variant.cc", "Don't use std::variant; use absl::variant instead") - errors += check_unfixable_error("std_visit.cc", "Don't use std::visit; use absl::visit instead") - errors += check_unfixable_error( - "throw.cc", "Don't introduce throws into exception-free files, use error statuses instead.") - errors += check_file_expecting_ok("commented_throw.cc") - errors += check_unfixable_error( - "repository_url.bzl", "Only repository_locations.bzl may contains URL references") - errors += check_unfixable_error( - "repository_urls.bzl", "Only repository_locations.bzl may contains URL references") - - # The following files have errors that can be automatically fixed. - errors += check_and_fix_error( - "over_enthusiastic_spaces.cc", "./over_enthusiastic_spaces.cc:3: over-enthusiastic spaces") - errors += check_and_fix_error( - "extra_enthusiastic_spaces.cc", - "./extra_enthusiastic_spaces.cc:3: over-enthusiastic spaces") - errors += check_and_fix_error( - "angle_bracket_include.cc", "envoy includes should not have angle brackets") - errors += check_and_fix_error("proto_style.cc", "incorrect protobuf type reference") - errors += check_and_fix_error("long_line.cc", "clang-format check failed") - errors += check_and_fix_error("header_order.cc", "header_order.py check failed") - errors += check_and_fix_error( - "clang_format_on.cc", "./clang_format_on.cc:7: over-enthusiastic spaces") - # Validate that a missing license is added. - errors += check_and_fix_error("license.BUILD", "envoy_build_fixer check failed") - # Validate that an incorrect license is replaced and reordered. - errors += check_and_fix_error("update_license.BUILD", "envoy_build_fixer check failed") - # Validate that envoy_package() is added where there is an envoy_* rule occurring. - errors += check_and_fix_error("add_envoy_package.BUILD", "envoy_build_fixer check failed") - # Validate that we don't add envoy_package() when no envoy_* rule. - errors += check_file_expecting_ok("skip_envoy_package.BUILD") - # Validate that we clean up gratuitous blank lines. - errors += check_and_fix_error("canonical_spacing.BUILD", "envoy_build_fixer check failed") - # Validate that unused loads are removed. - errors += check_and_fix_error("remove_unused_loads.BUILD", "envoy_build_fixer check failed") - # Validate that API proto package deps are computed automagically. - errors += check_and_fix_error( - "canonical_api_deps.BUILD", - "envoy_build_fixer check failed", - extra_input_files=[ - "canonical_api_deps.cc", "canonical_api_deps.h", "canonical_api_deps.other.cc" - ]) - errors += check_and_fix_error("bad_envoy_build_sys_ref.BUILD", "Superfluous '@envoy//' prefix") - errors += check_and_fix_error("proto_format.proto", "clang-format check failed") - errors += check_and_fix_error( - "cpp_std.cc", - "term absl::make_unique< should be replaced with standard library term std::make_unique<") - errors += check_and_fix_error( - "code_conventions.cc", "term .Times(1); should be replaced with preferred term ;") - errors += check_and_fix_error( - "code_conventions.cc", - "term Stats::ScopePtr should be replaced with preferred term Stats::ScopeSharedPtr") - - errors += check_file_expecting_ok("real_time_source_override.cc") - errors += check_file_expecting_ok("duration_value_zero.cc") - errors += check_file_expecting_ok("time_system_wait_for.cc") - errors += check_file_expecting_ok("clang_format_off.cc") - return errors - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='tester for check_format.py.') - parser.add_argument('--log', choices=['INFO', 'WARN', 'ERROR'], default='INFO') - args = parser.parse_args() - logging.basicConfig(format='%(message)s', level=args.log) - - # Now create a temp directory to copy the input files, so we can fix them - # without actually fixing our testdata. This requires chdiring to the temp - # directory, so it's annoying to comingle check-tests and fix-tests. - with tempfile.TemporaryDirectory() as tmp: - os.chdir(tmp) - errors = run_checks() - - if errors != 0: - logging.error("%d FAILURES" % errors) - exit(1) - logging.warning("PASS") diff --git a/tools/code_format/check_format_test_helper.sh b/tools/code_format/check_format_test_helper.sh deleted file mode 100755 index 7868efbf99cd..000000000000 --- a/tools/code_format/check_format_test_helper.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -tools="$(dirname "$(dirname "$(realpath "$0")")")" -root=$(realpath "$tools/..") - -cd "$root" || exit 1 -# to satisfy dependency on run_command -export PYTHONPATH="$tools" -./tools/code_format/check_format_test_helper.py "$@"