diff --git a/.circleci/config.yml b/.circleci/config.yml index de09fac702..5afbb5d6f3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -704,9 +704,20 @@ jobs: echo ${GITHUB_OCI_TOKEN} | docker login ghcr.io -u apollo-bot2 --password-stdin # TODO: Can't figure out how to build multi-arch image from ARTIFACT_URL right now. Figure out later... # Build and push debug image - docker buildx build --platform linux/amd64 --push --build-arg ARTIFACT_URL="${ARTIFACT_URL}" --build-arg DEBUG_IMAGE="true" --build-arg ROUTER_RELEASE=${VERSION} -f dockerfiles/Dockerfile.router -t ${ROUTER_TAG}:${VERSION}-debug . + docker buildx build --load --platform linux/amd64 --build-arg ARTIFACT_URL="${ARTIFACT_URL}" --build-arg DEBUG_IMAGE="true" --build-arg ROUTER_RELEASE=${VERSION} -f dockerfiles/Dockerfile.router -t ${ROUTER_TAG}:${VERSION}-debug . + docker push ${ROUTER_TAG}:${VERSION}-debug # Build and push release image - docker buildx build --platform linux/amd64 --push --build-arg ARTIFACT_URL="${ARTIFACT_URL}" --build-arg ROUTER_RELEASE=${VERSION} -f dockerfiles/Dockerfile.router -t ${ROUTER_TAG}:${VERSION} . + docker buildx build --load --platform linux/amd64 --build-arg ARTIFACT_URL="${ARTIFACT_URL}" --build-arg ROUTER_RELEASE=${VERSION} -f dockerfiles/Dockerfile.router -t ${ROUTER_TAG}:${VERSION} . + docker push ${ROUTER_TAG}:${VERSION} + # save containers for analysis + mkdir built-containers + docker save -o built-containers/router_${VERSION}-debug.tar ${ROUTER_TAG}:${VERSION}-debug + docker save -o built-containers/router_${VERSION}.tar ${ROUTER_TAG}:${VERSION} + + - persist_to_workspace: + root: . + paths: + - "built-containers/*.tar" - run: name: Helm build command: | @@ -946,6 +957,25 @@ workflows: parameters: platform: [ macos_build, windows_build, amd_linux_build, arm_linux_build ] + - secops/wiz-docker: + context: + - platform-docker-ro + - wiz + - github-orb + requires: + - build_release + container-dir: /tmp/workspace/built-containers + # Disables all PR comments from this job + do-pr-comments: false + # Scan job will return 1 if findings violating the Wiz policy are found. + # Toggle off to prevent any CI failures OR + # contact Apollo's Security team to adjust what violates the + # Wiz policy used in this scan. + fail-on-findings: true + # Configure scan job to use a policy specific to apollo-router. + # This allows us to tailor the policy applied during the scans to router. + wiz-policies: Apollo-Router-Vulnerabilities-Policy + release: when: diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e7d0e9640..a79f2bd29f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,161 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.40.0] - 2024-02-14 + +## 🚀 Features + +### GraphOS entity caching ([Issue #4478](https://github.com/apollographql/router/issues/4478)) + +> ⚠️ This is a preview for an [Enterprise feature](https://www.apollographql.com/blog/platform/evaluating-apollo-router-understanding-free-and-open-vs-commercial-features/) of the Apollo Router. It requires an organization with a [GraphOS Enterprise plan](https://www.apollographql.com/pricing/). +> +> If your organization doesn't currently have an Enterprise plan, you can test out this functionality by signing up for a free Enterprise trial. + +The Apollo Router can now cache fine-grained subgraph responses at the entity level, which are reusable between requests. + +Caching federated GraphQL responses can be done at the HTTP level, but it's inefficient because a lot of data can be shared between different requests. The Apollo Router now contains an entity cache that works at the subgraph level: it caches subgraph responses, splits them by entities, and reuses entities across subgraph requests. +Along with reducing the cache size, the router's entity cache brings more flexibility in how and what to cache. It allows the router to store different parts of a response with different expiration dates, and link the cache with the authorization context to avoid serving stale, unauthorized data. + +As a preview feature, it's subject to our [Preview launch stage](https://www.apollographql.com/docs/resources/product-launch-stages/#preview) expectations. It doesn't support cache invalidation. We're making it available to test and gather feedback. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4195 + + +### Graduate distributed query plan caching from experimental ([Issue #4575](https://github.com/apollographql/router/issues/4575)) + +[Distributed query plan caching] (https://www.apollographql.com/docs/router/configuration/distributed-caching#distributed-query-plan-caching) has been validated in production deployments and is now a fully supported, non-experimental Enterprise feature of the Apollo Router. + +To migrate your router configuration, replace `supergraph.query_planning.experimental_cache` with `supergraph.query_planning.cache`. + +This release also adds improvements to the distributed cache: + 1. The `.` separator is replaced with `:` in the Redis cache key to align with conventions. + 2. The cache key length is reduced. + 3. A federation version is added to the cache key to prevent confusion when routers with different federation versions (and potentially different ways to generate a query plan) target the same cache. + 4. Cache insertion is moved to a parallel task. Once the query plan is created, this allows a request to be processed immediately instead of waiting for cache insertion to finish. This improvement has also been applied to the APQ cache. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4583 + +### Replace selector to extract body elements from subgraph responses via JSONPath ([Issue #4443](https://github.com/apollographql/router/issues/4443)) + +The `subgraph_response_body` [selector](https://www.apollographql.com/docs/router/configuration/telemetry/instrumentation/selectors/) has been deprecated and replaced with selectors for a response body's constituent elements: `subgraph_response_data` and `subgraph_response_errors`. + +When configuring `subgraph_response_data` and `subgraph_response_errors`, both use a JSONPath expression to fetch data or errors from a subgraph response. + +An example configuration: + +```yaml +telemetry: + instrumentation: + spans: + subgraph: + attributes: + "my_attribute": + subgraph_response_data: "$.productName" + subgraph_response_errors: "$.[0].message" +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/4579 + +### Add a `.remove` method for headers in Rhai + +The router supports a new `.remove` method that enables users to remove headers in a Rhai script. + +For example: + +``` rust +fn supergraph_service(service) { + print("registering callbacks for operation timing"); + + const request_callback = Fn("process_request"); + service.map_request(request_callback); + + const response_callback = Fn("process_response"); + service.map_response(response_callback); +} + +fn process_request(request) { + request.context["request_start"] = Router.APOLLO_START.elapsed; +} + +fn process_response(response) { + response.headers.remove("x-custom-header") +} +``` + +By [@lrlna](https://github.com/lrlna) in https://github.com/apollographql/router/pull/4632 + +### Helm update to allow a list of gateways to `VirtualService` ([Issue #4464](https://github.com/apollographql/router/issues/4464)) + +Configuration of the router's Helm chart has been updated to allow multiple gateways. This enables configuration of multiple gateways in an Istio `VirtualService`. + +The previous configuration for a single `virtualservice.gatewayName` has been deprecated in favor of a configuration for an array of `virtualservice.gatewayNames`. + +By [@marcantoine-bibeau](https://github.com/marcantoine-bibeau) in https://github.com/apollographql/router/pull/4520 + +### Configure logging format automatically based on terminal ([Issue #4369](https://github.com/apollographql/router/issues/4369)) + +You can configure the logging output format when running with an interactive shell. + +If both `format` and `tty_format` are configured, then the format used depends on how the router is run: + +* If running with an interactive shell, then `tty_format` takes precedence. +* If running with a non-interactive shell, then `format` takes precedence. + +You can explicitly set the format in `router.yaml` with `telemetry.exporters.logging.stdout.tty_format`: + +```yaml title="router.yaml" +telemetry: + exporters: + logging: + stdout: + enabled: true + format: json + tty_format: text +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/4567 + +### Add configurable histogram buckets per metric ([Issue #4543](https://github.com/apollographql/router/issues/4543)) + +The router supports overriding instrument settings for metrics with [OpenTelemetry views](https://opentelemetry.io/docs/concepts/signals/metrics/#views). You can use views to override default histogram buckets. + +Configure views with the `views` option. For example: + +```yaml +telemetry: + exporters: + metrics: + common: + service_name: apollo-router + views: + - name: apollo_router_http_request_duration_seconds # Instrument name you want to edit. You can use wildcard in names. If you want to target all instruments just use '*' + unit: "ms" # (Optional) override the unit + description: "my new description of this metric" # (Optional) override the description + aggregation: # (Optional) + histogram: + buckets: # Override default buckets configured for this histogram + - 1 + - 2 + - 3 + - 4 + - 5 + allowed_attribute_keys: # (Optional) Keep only listed attributes on the metric + - status +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/4572 + +## 🐛 Fixes + +### Fix `active_session_count` when future is dropped ([Issue #4601](https://github.com/apollographql/router/issues/4601)) + +Fixes [an issue](https://github.com/apollographql/router/issues/4601) where `apollo_router_session_count_active` would increase indefinitely due +to the request future getting dropped before a counter could be decremented. + +By [@xuorig](https://github.com/xuorig) in https://github.com/apollographql/router/pull/4619 + + + # [1.39.1] - 2024-02-08 ## 🐛 Fixes diff --git a/Cargo.lock b/Cargo.lock index c7e7a1e2bf..a7db32334e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -198,7 +198,7 @@ checksum = "e2f12a04dc5164646affc9ef40f519741321b59d3503d0ca06719ce750741594" dependencies = [ "apollo-parser", "ariadne", - "indexmap 2.1.0", + "indexmap 2.2.3", "rowan", "salsa", "serde", @@ -226,7 +226,7 @@ checksum = "1e1f1a447ab0e78b150b076fa53fd0ac0a58563a493c9e588f447db842c18486" dependencies = [ "apollo-compiler", "derive_more", - "indexmap 2.1.0", + "indexmap 2.2.3", "lazy_static", "petgraph", "salsa", @@ -249,7 +249,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.39.1" +version = "1.40.0" dependencies = [ "access-json", "anyhow", @@ -266,6 +266,7 @@ dependencies = [ "aws-types", "axum", "base64 0.21.7", + "basic-toml", "bloomfilter", "brotli", "buildstructor", @@ -297,9 +298,9 @@ dependencies = [ "humantime-serde", "hyper", "hyper-rustls", - "indexmap 2.1.0", + "indexmap 2.2.3", "insta", - "itertools 0.12.0", + "itertools 0.12.1", "jsonpath-rust", "jsonpath_lib", "jsonschema", @@ -375,7 +376,6 @@ dependencies = [ "tokio-stream", "tokio-tungstenite", "tokio-util", - "toml", "tonic 0.9.2", "tonic-build", "tower", @@ -404,7 +404,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.39.1" +version = "1.40.0" dependencies = [ "apollo-parser", "apollo-router", @@ -420,7 +420,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.39.1" +version = "1.40.0" dependencies = [ "anyhow", "cargo-scaffold", @@ -585,7 +585,7 @@ dependencies = [ "async-lock 3.1.2", "async-task", "concurrent-queue", - "fastrand 2.0.0", + "fastrand 2.0.1", "futures-lite 2.0.0", "slab", ] @@ -638,7 +638,7 @@ dependencies = [ "futures-lite 2.0.0", "parking", "polling 3.3.1", - "rustix 0.38.30", + "rustix 0.38.31", "slab", "tracing", "windows-sys 0.52.0", @@ -677,7 +677,7 @@ dependencies = [ "cfg-if", "event-listener 3.1.0", "futures-lite 1.13.0", - "rustix 0.38.30", + "rustix 0.38.31", "windows-sys 0.48.0", ] @@ -693,7 +693,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.30", + "rustix 0.38.31", "signal-hook-registry", "slab", "windows-sys 0.48.0", @@ -790,9 +790,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "aws-config" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b30c39ebe61f75d1b3785362b1586b41991873c9ab3e317a9181c246fb71d82" +checksum = "7af266887e24cd5f6d2ea7433cacd25dcd4773b7f70e488701968a7cdf51df57" dependencies = [ "aws-credential-types", "aws-runtime", @@ -807,7 +807,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.0.0", + "fastrand 2.0.1", "hex", "http 0.2.11", "hyper", @@ -820,9 +820,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33cc49dcdd31c8b6e79850a179af4c367669150c7ac0135f176c61bec81a70f7" +checksum = "2d56f287a9e65e4914bfedb5b22c056b65e4c232fca512d5509a9df36386759f" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -832,9 +832,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb031bff99877c26c28895766f7bb8484a05e24547e370768d6cc9db514662aa" +checksum = "2d6a29eca8ea8982028a4df81883e7001e250a21d323b86418884b5345950a4b" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -844,7 +844,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.0.0", + "fastrand 2.0.1", "http 0.2.11", "http-body", "percent-encoding", @@ -855,9 +855,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f486420a66caad72635bc2ce0ff6581646e0d32df02aa39dc983bfe794955a5b" +checksum = "e2d7f527c7b28af1a641f7d89f9e6a4863e8ec00f39d2b731b056fc5ec5ce829" dependencies = [ "aws-credential-types", "aws-runtime", @@ -877,9 +877,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39ddccf01d82fce9b4a15c8ae8608211ee7db8ed13a70b514bbfe41df3d24841" +checksum = "0d0be3224cd574ee8ab5fd7c32087876f25c134c27ac603fcb38669ed8d346b0" dependencies = [ "aws-credential-types", "aws-runtime", @@ -899,9 +899,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a591f8c7e6a621a501b2b5d2e88e1697fcb6274264523a6ad4d5959889a41ce" +checksum = "5b3167c60d82a13bbaef569da06041644ff41e85c6377e5dad53fa2526ccfe9d" dependencies = [ "aws-credential-types", "aws-runtime", @@ -922,9 +922,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c371c6b0ac54d4605eb6f016624fb5c7c2925d315fdf600ac1bf21b19d5f1742" +checksum = "54b1cbe0eee57a213039088dbdeca7be9352f24e0d72332d961e8a1cb388f82d" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -945,9 +945,9 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ee2d09cce0ef3ae526679b522835d63e75fb427aca5413cd371e490d52dcc6" +checksum = "426a5bc369ca7c8d3686439e46edc727f397a47ab3696b13f3ae8c81b3b36132" dependencies = [ "futures-util", "pin-project-lite", @@ -956,9 +956,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.60.4" +version = "0.60.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dab56aea3cd9e1101a0a999447fb346afb680ab1406cebc44b32346e25b4117d" +checksum = "85d6a0619f7b67183067fa3b558f94f90753da2df8c04aeb7336d673f804b0b8" dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", @@ -976,18 +976,18 @@ dependencies = [ [[package]] name = "aws-smithy-json" -version = "0.60.4" +version = "0.60.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3898ca6518f9215f62678870064398f00031912390efd03f1f6ef56d83aa8e" +checksum = "a1c1b5186b6f5c579bf0de1bcca9dd3d946d6d51361ea1d18131f6a0b64e13ae" dependencies = [ "aws-smithy-types", ] [[package]] name = "aws-smithy-query" -version = "0.60.4" +version = "0.60.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda4b1dfc9810e35fba8a620e900522cd1bd4f9578c446e82f49d1ce41d2e9f9" +checksum = "1c0a2ce65882e788d2cf83ff28b9b16918de0460c47bf66c5da4f6c17b4c9694" dependencies = [ "aws-smithy-types", "urlencoding", @@ -995,16 +995,16 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fafdab38f40ad7816e7da5dec279400dd505160780083759f01441af1bbb10ea" +checksum = "b4cb6b3afa5fc9825a75675975dcc3e21764b5476bc91dbc63df4ea3d30a576e" dependencies = [ "aws-smithy-async", "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", "bytes", - "fastrand 2.0.0", + "fastrand 2.0.1", "h2", "http 0.2.11", "http-body", @@ -1020,9 +1020,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c18276dd28852f34b3bf501f4f3719781f4999a51c7bff1a5c6dc8c4529adc29" +checksum = "23165433e80c04e8c09cee66d171292ae7234bae05fa9d5636e33095eae416b2" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -1036,9 +1036,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb3e134004170d3303718baa2a4eb4ca64ee0a1c0a7041dca31b38be0fb414f3" +checksum = "c94a5bec34850b92c9a054dad57b95c1d47f25125f55973e19f6ad788f0381ff" dependencies = [ "base64-simd", "bytes", @@ -1057,18 +1057,18 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.60.4" +version = "0.60.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8604a11b25e9ecaf32f9aa56b9fe253c5e2f606a3477f0071e96d3155a5ed218" +checksum = "d16f94c9673412b7a72e3c3efec8de89081c320bf59ea12eed34c417a62ad600" dependencies = [ "xmlparser", ] [[package]] name = "aws-types" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "789bbe008e65636fe1b6dbbb374c40c8960d1232b96af5ff4aec349f9c4accf4" +checksum = "0ff7e122ee50ca962e9de91f5850cc37e2184b1219611eef6d44aa85929b54f6" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -1250,7 +1250,7 @@ dependencies = [ "async-channel 2.1.1", "async-lock 3.1.2", "async-task", - "fastrand 2.0.0", + "fastrand 2.0.1", "futures-io", "futures-lite 2.0.0", "piper", @@ -1463,9 +1463,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.18" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" +checksum = "80c21025abd42669a92efc996ef13cfb2c5c627858421ea58d5c3b331a6c134f" dependencies = [ "clap_builder", "clap_derive", @@ -1473,9 +1473,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.18" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" +checksum = "458bf1f341769dfcf849846f65dffdf9146daa56bcd2a47cb4e1de9915567c99" dependencies = [ "anstream", "anstyle", @@ -1485,9 +1485,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" dependencies = [ "heck 0.4.1", "proc-macro2 1.0.76", @@ -1497,9 +1497,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "cmake" @@ -2188,9 +2188,9 @@ dependencies = [ [[package]] name = "dhat" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f2aaf837aaf456f6706cb46386ba8dffd4013a757e36f4ea05c20dd46b209a3" +checksum = "98cd11d84628e233de0ce467de10b8633f4ddaecafadefc86e13b84b8739b827" dependencies = [ "backtrace", "lazy_static", @@ -2555,9 +2555,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "ff" @@ -2826,7 +2826,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c1155db57329dca6d018b61e76b1488ce9a2e5e44028cac420a5898f4fcef63" dependencies = [ - "fastrand 2.0.0", + "fastrand 2.0.1", "futures-core", "futures-io", "memchr", @@ -3102,7 +3102,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.11", - "indexmap 2.1.0", + "indexmap 2.2.3", "slab", "tokio", "tokio-util", @@ -3463,9 +3463,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", "hashbrown 0.14.1", @@ -3598,7 +3598,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.30", + "rustix 0.38.31", "windows-sys 0.48.0", ] @@ -3631,9 +3631,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] @@ -3716,13 +3716,14 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "8.3.0" +version = "9.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4" dependencies = [ "base64 0.21.7", + "js-sys", "pem", - "ring 0.16.20", + "ring 0.17.5", "serde", "serde_json", "simple_asn1", @@ -3803,9 +3804,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libfuzzer-sys" @@ -3936,9 +3937,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" +checksum = "db2c024b41519440580066ba82aab04092b333e09066a5eb86c7c4890df31f22" dependencies = [ "hashbrown 0.14.1", ] @@ -3987,9 +3988,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "mediatype" -version = "0.19.17" +version = "0.19.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a018c36a54f4e12c30464bbc59311f85d3f6f4d6c1b4fa4ea9db2b174ddefc" +checksum = "8878cd8d1b3c8c8ae4b2ba0a36652b7cf192f618a599a7fbdfa25cffd4ea72dd" [[package]] name = "memchr" @@ -4248,6 +4249,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" version = "0.1.45" @@ -4283,9 +4290,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", "libm", @@ -4707,11 +4714,12 @@ checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pem" -version = "1.1.1" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" dependencies = [ - "base64 0.13.1", + "base64 0.21.7", + "serde", ] [[package]] @@ -4789,7 +4797,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.1.0", + "indexmap 2.2.3", "serde", "serde_derive", ] @@ -4833,7 +4841,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" dependencies = [ "atomic-waker", - "fastrand 2.0.0", + "fastrand 2.0.1", "futures-io", ] @@ -4945,7 +4953,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "pin-project-lite", - "rustix 0.38.30", + "rustix 0.38.31", "tracing", "windows-sys 0.52.0", ] @@ -5348,15 +5356,6 @@ dependencies = [ "bitflags 1.3.2", ] -[[package]] -name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_users" version = "0.4.3" @@ -5420,9 +5419,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.23" +version = "0.11.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" +checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" dependencies = [ "base64 0.21.7", "bytes", @@ -5447,6 +5446,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", + "sync_wrapper", "system-configuration", "tokio", "tokio-rustls", @@ -5500,9 +5500,9 @@ dependencies = [ [[package]] name = "rhai" -version = "1.16.3" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3625f343d89990133d013e39c46e350915178cf94f1bec9f49b0cbef98a3e3c" +checksum = "f6273372244d04a8a4b0bec080ea1e710403e88c5d9d83f9808b2bfa64f0982a" dependencies = [ "ahash", "bitflags 2.4.0", @@ -5513,6 +5513,7 @@ dependencies = [ "serde", "smallvec", "smartstring", + "thin-vec", ] [[package]] @@ -5577,13 +5578,13 @@ dependencies = [ [[package]] name = "rhai_codegen" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db74e3fdd29d969a0ec1f8e79171a6f0f71d0429293656901db382d248c4c021" +checksum = "9db7f8dc4c9d48183a17ce550574c42995252b82d267eaca3fcd1b979159856c" dependencies = [ "proc-macro2 1.0.76", "quote 1.0.35", - "syn 1.0.109", + "syn 2.0.48", ] [[package]] @@ -5791,9 +5792,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.30" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ "bitflags 2.4.0", "errno", @@ -6019,9 +6020,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] @@ -6037,9 +6038,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2 1.0.76", "quote 1.0.35", @@ -6071,11 +6072,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.111" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.3", "itoa", "ryu", "serde", @@ -6088,7 +6089,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "feb260b2939374fad6f939f803662d4971d03395fcd03752b674bdba06565779" dependencies = [ "bytes", - "indexmap 2.1.0", + "indexmap 2.2.3", "serde", "serde_json", ] @@ -6165,9 +6166,9 @@ dependencies = [ [[package]] name = "serial_test" -version = "2.0.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" +checksum = "953ad9342b3aaca7cb43c45c097dd008d4907070394bd0751a0aa8817e5a018d" dependencies = [ "dashmap", "futures", @@ -6179,9 +6180,9 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "2.0.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" +checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" dependencies = [ "proc-macro2 1.0.76", "quote 1.0.35", @@ -6415,9 +6416,9 @@ dependencies = [ [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" [[package]] name = "strum" @@ -6531,14 +6532,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.9.0" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" dependencies = [ "cfg-if", - "fastrand 2.0.0", - "redox_syscall 0.4.1", - "rustix 0.38.30", + "fastrand 2.0.1", + "rustix 0.38.31", "windows-sys 0.52.0", ] @@ -6626,20 +6626,29 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f18aa187839b2bdb1ad2fa35ead8c4c2976b64e4363c386d45ac0f7ee85c9233" +[[package]] +name = "thin-vec" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" +dependencies = [ + "serde", +] + [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2 1.0.76", "quote 1.0.35", @@ -6719,12 +6728,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.31" +version = "0.3.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -6739,10 +6749,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" +checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" dependencies = [ + "num-conv", "time-core", ] @@ -6782,9 +6793,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -6913,7 +6924,7 @@ version = "0.19.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.3", "toml_datetime", "winnow", ] @@ -6924,7 +6935,7 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c9ffdf896f8daaabf9b66ba8e77ea1ed5ed0f72821b398aba62352e95062951" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.3", "serde", "serde_spanned", "toml_datetime", @@ -7702,9 +7713,9 @@ checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-streams" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7" +checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" dependencies = [ "futures-util", "js-sys", @@ -7738,7 +7749,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.30", + "rustix 0.38.31", ] [[package]] diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index 0def550d61..bce7baaefe 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.39.1" +version = "1.40.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 661466ca60..abe7fab1d1 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.39.1" +version = "1.40.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" @@ -8,11 +8,11 @@ publish = false [dependencies] anyhow = "1.0.79" -clap = { version = "4.4.18", features = ["derive"] } +clap = { version = "4.5.0", features = ["derive"] } cargo-scaffold = { version = "0.11.0", default-features = false } regex = "1" str_inflector = "0.12.0" toml = "0.8.10" [dev-dependencies] -tempfile = "3.9.0" +tempfile = "3.10.0" copy_dir = "0.1.3" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index 4cf069a3a6..1bc2bf97ef 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.39.1" +apollo-router = "1.40.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index b4b8c140e3..3a7f7389f9 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.39.1" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.40.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 903b4ca93c..f306fd03d8 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.39.1" +version = "1.40.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" @@ -79,7 +79,7 @@ base64 = "0.21.7" bloomfilter = "1.0.13" buildstructor = "0.5.4" bytes = "1.5.0" -clap = { version = "4.4.18", default-features = false, features = [ +clap = { version = "4.5.0", default-features = false, features = [ "env", "derive", "std", @@ -93,7 +93,7 @@ derive_more = { version = "0.99.17", default-features = false, features = [ "from", "display", ] } -dhat = { version = "0.3.2", optional = true } +dhat = { version = "0.3.3", optional = true } diff = "0.1.13" directories = "5.0.1" displaydoc = "0.2" @@ -109,18 +109,18 @@ humantime = "2.1.0" humantime-serde = "1.1.1" hyper = { version = "0.14.28", features = ["server", "client"] } hyper-rustls = { version = "0.24.2", features = ["http1", "http2"] } -indexmap = { version = "2.1.0", features = ["serde"] } -itertools = "0.12.0" +indexmap = { version = "2.2.3", features = ["serde"] } +itertools = "0.12.1" jsonpath_lib = "0.3.0" jsonpath-rust = "0.3.5" jsonschema = { version = "0.17.1", default-features = false } -jsonwebtoken = "8.3.0" +jsonwebtoken = "9.2.0" lazy_static = "1.4.0" -libc = "0.2.152" +libc = "0.2.153" linkme = "0.3.22" -lru = "0.12.1" +lru = "0.12.2" maplit = "1.0.2" -mediatype = "0.19.17" +mediatype = "0.19.18" mockall = "0.11.4" mime = "0.3.17" multer = "2.1.0" @@ -175,9 +175,9 @@ prost = "0.12.3" prost-types = "0.12.3" proteus = "0.5.0" rand = "0.8.5" -rhai = { version = "1.16.3", features = ["sync", "serde", "internals"] } +rhai = { version = "1.17.1", features = ["sync", "serde", "internals"] } regex = "1.10.3" -reqwest = { version = "0.11.23", default-features = false, features = [ +reqwest = { version = "0.11.24", default-features = false, features = [ "rustls-tls", "rustls-native-certs", "json", @@ -192,10 +192,10 @@ schemars = { version = "0.8.16", features = ["url"] } shellexpand = "3.1.0" sha2 = "0.10.8" semver = "1.0.21" -serde = { version = "1.0.195", features = ["derive", "rc"] } +serde = { version = "1.0.196", features = ["derive", "rc"] } serde_derive_default = "0.1" serde_json_bytes = { version = "0.2.2", features = ["preserve_order"] } -serde_json = { version = "1.0.111", features = [ +serde_json = { version = "1.0.113", features = [ "preserve_order", "float_roundtrip", ] } @@ -204,8 +204,8 @@ serde_yaml = "0.8.26" static_assertions = "1.1.0" strum_macros = "0.25.3" sys-info = "0.9.1" -thiserror = "1.0.56" -tokio = { version = "1.35.1", features = ["full"] } +thiserror = "1.0.57" +tokio = { version = "1.36.0", features = ["full"] } tokio-stream = { version = "0.1.14", features = ["sync", "net"] } tokio-util = { version = "0.7.10", features = ["net", "codec", "time"] } tonic = { version = "0.9.2", features = [ @@ -252,14 +252,14 @@ brotli = "3.4.0" zstd = "0.13.0" zstd-safe = "7.0.0" # note: AWS dependencies should always use the same version -aws-sigv4 = "1.1.4" -aws-credential-types = "1.1.4" -aws-config = "1.1.4" -aws-types = "1.1.4" -aws-smithy-runtime-api = { version = "1.1.4", features = ["client"] } +aws-sigv4 = "1.1.5" +aws-credential-types = "1.1.5" +aws-config = "1.1.5" +aws-types = "1.1.5" +aws-smithy-runtime-api = { version = "1.1.5", features = ["client"] } sha1 = "0.10.6" tracing-serde = "0.1.3" -time = { version = "0.3.31", features = ["serde"] } +time = { version = "0.3.34", features = ["serde"] } similar = { version = "2.4.0", features = ["inline"] } console = "0.15.8" @@ -280,35 +280,35 @@ axum = { version = "0.6.20", features = [ "ws", ] } ecdsa = { version = "0.16.9", features = ["signing", "pem", "pkcs8"] } -fred = { version = "7.1.0", features = ["enable-rustls", "mocks"] } +fred = { version = "7.1.2", features = ["enable-rustls", "mocks"] } futures-test = "0.3.30" insta = { version = "1.34.0", features = ["json", "redactions", "yaml"] } maplit = "1.0.2" memchr = { version = "2.7.1", default-features = false } mockall = "0.11.4" -num-traits = "0.2.17" +num-traits = "0.2.18" once_cell = "1.19.0" opentelemetry-stdout = { version = "0.1.0", features = ["trace"] } opentelemetry = { version = "0.20.0", features = ["testing"] } p256 = "0.13.2" rand_core = "0.6.4" -reqwest = { version = "0.11.23", default-features = false, features = [ +reqwest = { version = "0.11.24", default-features = false, features = [ "json", "stream", ] } -rhai = { version = "1.16.3", features = [ +rhai = { version = "1.17.1", features = [ "sync", "serde", "internals", "testing-environ", ] } -serial_test = { version = "2.0.0" } -tempfile = "3.9.0" +serial_test = { version = "3.0.0" } +tempfile = "3.10.0" test-log = { version = "0.2.14", default-features = false, features = [ "trace", ] } test-span = "0.7" -toml = "0.8.8" +basic-toml = "0.1" tower-test = "0.4.0" # See note above in this file about `^tracing` packages which also applies to @@ -327,7 +327,8 @@ rstack = { version = "0.3.3", features = ["dw"], default-features = false } [build-dependencies] tonic-build = "0.9.2" - +basic-toml = "0.1" +serde_json = "1.0.113" [[test]] name = "integration_tests" diff --git a/apollo-router/build/main.rs b/apollo-router/build/main.rs index b323c668bb..763d894df0 100644 --- a/apollo-router/build/main.rs +++ b/apollo-router/build/main.rs @@ -1,5 +1,37 @@ +use std::fs; +use std::path::PathBuf; + mod studio; fn main() -> Result<(), Box> { + let cargo_manifest: serde_json::Value = basic_toml::from_str( + &fs::read_to_string(PathBuf::from(&env!("CARGO_MANIFEST_DIR")).join("Cargo.toml")) + .expect("could not read Cargo.toml"), + ) + .expect("could not parse Cargo.toml"); + + let router_bridge = cargo_manifest + .get("dependencies") + .expect("Cargo.toml does not contain dependencies") + .as_object() + .expect("Cargo.toml dependencies key is not an object") + .get("router-bridge") + .expect("Cargo.toml dependencies does not have an entry for router-bridge"); + let router_bridge_version = router_bridge + .as_str() + .or_else(|| { + router_bridge + .as_object() + .and_then(|o| o.get("version")) + .and_then(|version| version.as_str()) + }) + .expect("router-bridge does not have a version"); + + let mut it = router_bridge_version.split('+'); + let _ = it.next(); + let fed_version = it.next().expect("invalid router-bridge version format"); + + println!("cargo:rustc-env=FEDERATION_VERSION={fed_version}"); + studio::main() } diff --git a/apollo-router/feature_discussions.json b/apollo-router/feature_discussions.json index 1b6f7b5717..59f5a84608 100644 --- a/apollo-router/feature_discussions.json +++ b/apollo-router/feature_discussions.json @@ -5,5 +5,7 @@ "experimental_when_header": "https://github.com/apollographql/router/discussions/1961", "experimental_batching": "https://github.com/apollographql/router/discussions/3840" }, - "preview": {} + "preview": { + "preview_entity_cache": "https://github.com/apollographql/router/discussions/4592" + } } \ No newline at end of file diff --git a/apollo-router/src/axum_factory/axum_http_server_factory.rs b/apollo-router/src/axum_factory/axum_http_server_factory.rs index 1ba7d70669..d0632fe963 100644 --- a/apollo-router/src/axum_factory/axum_http_server_factory.rs +++ b/apollo-router/src/axum_factory/axum_http_server_factory.rs @@ -64,6 +64,23 @@ use crate::uplink::license_enforcement::LICENSE_EXPIRED_SHORT_MESSAGE; static ACTIVE_SESSION_COUNT: AtomicU64 = AtomicU64::new(0); +struct SessionCountGuard; + +impl SessionCountGuard { + fn start() -> Self { + let session_count = ACTIVE_SESSION_COUNT.fetch_add(1, Ordering::Acquire) + 1; + tracing::info!(value.apollo_router_session_count_active = session_count,); + Self + } +} + +impl Drop for SessionCountGuard { + fn drop(&mut self) { + let session_count = ACTIVE_SESSION_COUNT.fetch_sub(1, Ordering::Acquire) - 1; + tracing::info!(value.apollo_router_session_count_active = session_count,); + } +} + /// A basic http server using Axum. /// Uses streaming as primary method of response. #[derive(Debug, Default)] @@ -554,8 +571,7 @@ async fn handle_graphql( service: router::BoxService, http_request: Request, ) -> impl IntoResponse { - let session_count = ACTIVE_SESSION_COUNT.fetch_add(1, Ordering::Acquire) + 1; - tracing::info!(value.apollo_router_session_count_active = session_count,); + let _guard = SessionCountGuard::start(); let request: router::Request = http_request.into(); let context = request.context.clone(); @@ -573,9 +589,6 @@ async fn handle_graphql( match res { Err(e) => { - let session_count = ACTIVE_SESSION_COUNT.fetch_sub(1, Ordering::Acquire) - 1; - tracing::info!(value.apollo_router_session_count_active = session_count,); - if let Some(source_err) = e.source() { if source_err.is::() { return RateLimited::new().into_response(); @@ -615,10 +628,6 @@ async fn handle_graphql( } }; - // FIXME: we should instead reduce it after the response has been entirely written - let session_count = ACTIVE_SESSION_COUNT.fetch_sub(1, Ordering::Acquire) - 1; - tracing::info!(value.apollo_router_session_count_active = session_count,); - http::Response::from_parts(parts, body).into_response() } } diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 9e2275d60b..537d9f5ca9 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -280,7 +280,7 @@ impl InstrumentData { populate_config_instrument!( apollo.router.config.entity_cache, - "$.experimental_entity_cache", + "$.preview_entity_cache", opt.enabled, "$[?(@.enabled)]", opt.subgraph.enabled, diff --git a/apollo-router/src/configuration/migrations/0022-query-planner-cache.yaml b/apollo-router/src/configuration/migrations/0022-query-planner-cache.yaml new file mode 100644 index 0000000000..0d8ea6c614 --- /dev/null +++ b/apollo-router/src/configuration/migrations/0022-query-planner-cache.yaml @@ -0,0 +1,5 @@ +description: The query plan cache is no longer experimental +actions: + - type: move + from: supergraph.query_planning.experimental_cache + to: supergraph.query_planning.cache diff --git a/apollo-router/src/configuration/migrations/0022-spans_subgraph_response_body.yaml b/apollo-router/src/configuration/migrations/0022-spans_subgraph_response_body.yaml new file mode 100644 index 0000000000..31a83dd328 --- /dev/null +++ b/apollo-router/src/configuration/migrations/0022-spans_subgraph_response_body.yaml @@ -0,0 +1,6 @@ +description: log warning because span selector `subgraph_response_body` is deprecated +actions: + - type: log + level: warn + path: telemetry.instrumentation.spans.subgraph.attributes.*.subgraph_response_body + log: "'subgraph_response_body' span selector is deprecated, please use 'subgraph_response_data' or subgraph_response_error' instead.\n\n List of available selectors https://www.apollographql.com/docs/router/configuration/telemetry/instrumentation/selectors" diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index ec2dbbf23c..15e5bc0e2a 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -848,7 +848,7 @@ impl Default for Apq { #[serde(deny_unknown_fields, default)] pub(crate) struct QueryPlanning { /// Cache configuration - pub(crate) experimental_cache: Cache, + pub(crate) cache: Cache, /// Warms up the cache on reloads by running the query plan over /// a list of the most used queries (from the in memory cache) /// Configures the number of queries warmed up. Defaults to 1/3 of diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index e3d79b384f..8b1fb6c7ca 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1,6 +1,5 @@ --- source: apollo-router/src/configuration/tests.rs -assertion_line: 31 expression: "&schema" --- { @@ -1310,149 +1309,6 @@ expression: "&schema" }, "additionalProperties": false }, - "experimental_entity_cache": { - "description": "Configuration for entity caching", - "type": "object", - "required": [ - "redis" - ], - "properties": { - "enabled": { - "description": "activates caching for all subgraphs, unless overriden in subgraph specific configuration", - "default": null, - "type": "boolean", - "nullable": true - }, - "metrics": { - "description": "Entity caching evaluation metrics", - "type": "object", - "properties": { - "enabled": { - "description": "enables metrics evaluating the benefits of entity caching", - "default": false, - "type": "boolean" - }, - "separate_per_type": { - "description": "Adds the entity type name to attributes. This can greatly increase the cardinality", - "default": false, - "type": "boolean" - }, - "ttl": { - "description": "Metrics counter TTL", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "redis": { - "description": "Redis cache configuration", - "type": "object", - "required": [ - "urls" - ], - "properties": { - "namespace": { - "description": "namespace used to prefix Redis keys", - "type": "string", - "nullable": true - }, - "password": { - "description": "Redis password if not provided in the URLs. This field takes precedence over the password in the URL", - "type": "string", - "nullable": true - }, - "timeout": { - "description": "Redis request timeout (default: 2ms)", - "default": null, - "type": "string", - "nullable": true - }, - "tls": { - "description": "TLS client configuration", - "default": null, - "type": "object", - "properties": { - "certificate_authorities": { - "description": "list of certificate authorities in PEM format", - "default": null, - "type": "string", - "nullable": true - }, - "client_authentication": { - "description": "client certificate authentication", - "default": null, - "type": "object", - "required": [ - "certificate_chain", - "key" - ], - "properties": { - "certificate_chain": { - "description": "list of certificates in PEM format", - "writeOnly": true, - "type": "string" - }, - "key": { - "description": "key in PEM format", - "writeOnly": true, - "type": "string" - } - }, - "additionalProperties": false, - "nullable": true - } - }, - "additionalProperties": false, - "nullable": true - }, - "ttl": { - "description": "TTL for entries", - "default": null, - "type": "string", - "nullable": true - }, - "urls": { - "description": "List of URLs to the Redis cluster", - "type": "array", - "items": { - "type": "string", - "format": "uri" - } - }, - "username": { - "description": "Redis username if not provided in the URLs. This field takes precedence over the username in the URL", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "subgraphs": { - "description": "Per subgraph configuration", - "type": "object", - "additionalProperties": { - "description": "Per subgraph configuration for entity caching", - "type": "object", - "properties": { - "enabled": { - "description": "activates caching for this subgraph, overrides the global configuration", - "default": null, - "type": "boolean", - "nullable": true - }, - "ttl": { - "description": "expiration for all keys", - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, "experimental_graphql_validation_mode": { "description": "Set the GraphQL validation implementation to use.", "default": "both", @@ -2165,6 +2021,149 @@ expression: "&schema" }, "additionalProperties": false }, + "preview_entity_cache": { + "description": "Configuration for entity caching", + "type": "object", + "required": [ + "redis" + ], + "properties": { + "enabled": { + "description": "activates caching for all subgraphs, unless overriden in subgraph specific configuration", + "default": null, + "type": "boolean", + "nullable": true + }, + "metrics": { + "description": "Entity caching evaluation metrics", + "type": "object", + "properties": { + "enabled": { + "description": "enables metrics evaluating the benefits of entity caching", + "default": false, + "type": "boolean" + }, + "separate_per_type": { + "description": "Adds the entity type name to attributes. This can greatly increase the cardinality", + "default": false, + "type": "boolean" + }, + "ttl": { + "description": "Metrics counter TTL", + "type": "string", + "nullable": true + } + }, + "additionalProperties": false + }, + "redis": { + "description": "Redis cache configuration", + "type": "object", + "required": [ + "urls" + ], + "properties": { + "namespace": { + "description": "namespace used to prefix Redis keys", + "type": "string", + "nullable": true + }, + "password": { + "description": "Redis password if not provided in the URLs. This field takes precedence over the password in the URL", + "type": "string", + "nullable": true + }, + "timeout": { + "description": "Redis request timeout (default: 2ms)", + "default": null, + "type": "string", + "nullable": true + }, + "tls": { + "description": "TLS client configuration", + "default": null, + "type": "object", + "properties": { + "certificate_authorities": { + "description": "list of certificate authorities in PEM format", + "default": null, + "type": "string", + "nullable": true + }, + "client_authentication": { + "description": "client certificate authentication", + "default": null, + "type": "object", + "required": [ + "certificate_chain", + "key" + ], + "properties": { + "certificate_chain": { + "description": "list of certificates in PEM format", + "writeOnly": true, + "type": "string" + }, + "key": { + "description": "key in PEM format", + "writeOnly": true, + "type": "string" + } + }, + "additionalProperties": false, + "nullable": true + } + }, + "additionalProperties": false, + "nullable": true + }, + "ttl": { + "description": "TTL for entries", + "default": null, + "type": "string", + "nullable": true + }, + "urls": { + "description": "List of URLs to the Redis cluster", + "type": "array", + "items": { + "type": "string", + "format": "uri" + } + }, + "username": { + "description": "Redis username if not provided in the URLs. This field takes precedence over the username in the URL", + "type": "string", + "nullable": true + } + }, + "additionalProperties": false + }, + "subgraphs": { + "description": "Per subgraph configuration", + "type": "object", + "additionalProperties": { + "description": "Per subgraph configuration for entity caching", + "type": "object", + "properties": { + "enabled": { + "description": "activates caching for this subgraph, overrides the global configuration", + "default": null, + "type": "boolean", + "nullable": true + }, + "ttl": { + "description": "expiration for all keys", + "type": "string", + "nullable": true + } + }, + "additionalProperties": false + } + } + }, + "additionalProperties": false + }, "progressive_override": { "description": "Configuration for the progressive override plugin", "type": "object" @@ -2373,7 +2372,7 @@ expression: "&schema" "experimental_reuse_query_fragments": null, "defer_support": true, "query_planning": { - "experimental_cache": { + "cache": { "in_memory": { "limit": 512 }, @@ -2424,7 +2423,7 @@ expression: "&schema" "query_planning": { "description": "Query planning options", "default": { - "experimental_cache": { + "cache": { "in_memory": { "limit": 512 }, @@ -2436,7 +2435,7 @@ expression: "&schema" }, "type": "object", "properties": { - "experimental_cache": { + "cache": { "description": "Cache configuration", "default": { "in_memory": { @@ -3001,22 +3000,215 @@ expression: "&schema" "type": "string" } }, - "additionalProperties": false - } - ] - } - }, - "stdout": { - "description": "Settings for logging to stdout.", - "type": "object", - "properties": { - "enabled": { - "description": "Set to true to log to stdout.", - "default": true, - "type": "boolean" + "additionalProperties": false + } + ] + } + }, + "stdout": { + "description": "Settings for logging to stdout.", + "type": "object", + "properties": { + "enabled": { + "description": "Set to true to log to stdout.", + "default": true, + "type": "boolean" + }, + "format": { + "description": "The format to log to stdout.", + "oneOf": [ + { + "description": "Tracing subscriber https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/format/struct.Json.html", + "type": "object", + "required": [ + "json" + ], + "properties": { + "json": { + "type": "object", + "properties": { + "display_current_span": { + "description": "Include the current span in this log event.", + "default": false, + "type": "boolean" + }, + "display_filename": { + "description": "Include the filename with the log event.", + "default": false, + "type": "boolean" + }, + "display_level": { + "description": "Include the level with the log event. (default: true)", + "default": true, + "type": "boolean" + }, + "display_line_number": { + "description": "Include the line number with the log event.", + "default": false, + "type": "boolean" + }, + "display_resource": { + "description": "Include the resource with the log event. (default: true)", + "default": true, + "type": "boolean" + }, + "display_span_list": { + "description": "Include all of the containing span information with the log event. (default: true)", + "default": true, + "type": "boolean" + }, + "display_target": { + "description": "Include the target with the log event. (default: true)", + "default": true, + "type": "boolean" + }, + "display_thread_id": { + "description": "Include the thread_id with the log event.", + "default": false, + "type": "boolean" + }, + "display_thread_name": { + "description": "Include the thread_name with the log event.", + "default": false, + "type": "boolean" + }, + "display_timestamp": { + "description": "Include the timestamp with the log event. (default: true)", + "default": true, + "type": "boolean" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Tracing subscriber https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/format/struct.Json.html", + "type": "string", + "enum": [ + "json" + ] + }, + { + "description": "Tracing subscriber https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/format/struct.Full.html", + "type": "object", + "required": [ + "text" + ], + "properties": { + "text": { + "type": "object", + "properties": { + "ansi_escape_codes": { + "description": "Process ansi escapes (default: true)", + "default": true, + "type": "boolean" + }, + "display_current_span": { + "description": "Include the current span in this log event. (default: true)", + "default": true, + "type": "boolean" + }, + "display_filename": { + "description": "Include the filename with the log event.", + "default": false, + "type": "boolean" + }, + "display_level": { + "description": "Include the level with the log event. (default: true)", + "default": true, + "type": "boolean" + }, + "display_line_number": { + "description": "Include the line number with the log event.", + "default": false, + "type": "boolean" + }, + "display_resource": { + "description": "Include the resource with the log event.", + "default": false, + "type": "boolean" + }, + "display_service_name": { + "description": "Include the service name with the log event.", + "default": false, + "type": "boolean" + }, + "display_service_namespace": { + "description": "Include the service namespace with the log event.", + "default": false, + "type": "boolean" + }, + "display_span_list": { + "description": "Include all of the containing span information with the log event. (default: true)", + "default": true, + "type": "boolean" + }, + "display_target": { + "description": "Include the target with the log event.", + "default": false, + "type": "boolean" + }, + "display_thread_id": { + "description": "Include the thread_id with the log event.", + "default": false, + "type": "boolean" + }, + "display_thread_name": { + "description": "Include the thread_name with the log event.", + "default": false, + "type": "boolean" + }, + "display_timestamp": { + "description": "Include the timestamp with the log event. (default: true)", + "default": true, + "type": "boolean" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Tracing subscriber https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/format/struct.Full.html", + "type": "string", + "enum": [ + "text" + ] + } + ] + }, + "rate_limit": { + "description": "Log rate limiting. The limit is set per type of log message", + "type": "object", + "properties": { + "capacity": { + "description": "Number of log lines allowed in interval per message", + "default": 1, + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "enabled": { + "description": "Set to true to limit the rate of log messages", + "default": false, + "type": "boolean" + }, + "interval": { + "description": "Interval for rate limiting", + "default": { + "secs": 1, + "nanos": 0 + }, + "type": "string" + } + }, + "additionalProperties": false }, - "format": { - "description": "The format to log to stdout.", + "tty_format": { + "description": "The format to log to stdout when you're running on an interactive terminal. When configured it will automatically use this `tty_format`` instead of the original `format` when an interactive terminal is detected", "oneOf": [ { "description": "Tracing subscriber https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/format/struct.Json.html", @@ -3179,34 +3371,8 @@ expression: "&schema" "text" ] } - ] - }, - "rate_limit": { - "description": "Log rate limiting. The limit is set per type of log message", - "type": "object", - "properties": { - "capacity": { - "description": "Number of log lines allowed in interval per message", - "default": 1, - "type": "integer", - "format": "uint32", - "minimum": 0.0 - }, - "enabled": { - "description": "Set to true to limit the rate of log messages", - "default": false, - "type": "boolean" - }, - "interval": { - "description": "Interval for rate limiting", - "default": { - "secs": 1, - "nanos": 0 - }, - "type": "string" - } - }, - "additionalProperties": false + ], + "nullable": true } }, "additionalProperties": false @@ -5164,7 +5330,7 @@ expression: "&schema" "additionalProperties": false }, "buckets": { - "description": "Custom buckets for histograms", + "description": "Custom buckets for all histograms", "default": [ 0.001, 0.005, @@ -5258,6 +5424,74 @@ expression: "&schema" "default": null, "type": "string", "nullable": true + }, + "views": { + "description": "Views applied on metrics", + "type": "array", + "items": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "aggregation": { + "description": "New aggregation settings to set", + "oneOf": [ + { + "description": "An aggregation that summarizes a set of measurements as an histogram with explicitly defined buckets.", + "type": "object", + "required": [ + "histogram" + ], + "properties": { + "histogram": { + "type": "object", + "required": [ + "buckets" + ], + "properties": { + "buckets": { + "type": "array", + "items": { + "type": "number", + "format": "double" + } + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ], + "nullable": true + }, + "allowed_attribute_keys": { + "description": "An allow-list of attribute keys that will be preserved for the instrument.\n\nAny attribute recorded for the instrument with a key not in this set will be dropped. If the set is empty, all attributes will be dropped, if `None` all attributes will be kept.", + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true, + "nullable": true + }, + "description": { + "description": "New description to set to the instrument", + "type": "string", + "nullable": true + }, + "name": { + "description": "The instrument name you're targeting", + "type": "string" + }, + "unit": { + "description": "New unit to set to the instrument", + "type": "string", + "nullable": true + } + }, + "additionalProperties": false + } } }, "additionalProperties": false @@ -6892,6 +7126,7 @@ expression: "&schema" "additionalProperties": false }, { + "description": "Deprecated, use SubgraphResponseData and SubgraphResponseError instead", "type": "object", "required": [ "subgraph_response_body" @@ -6963,6 +7198,150 @@ expression: "&schema" }, "additionalProperties": false }, + { + "type": "object", + "required": [ + "subgraph_response_data" + ], + "properties": { + "default": { + "description": "Optional default value.", + "anyOf": [ + { + "description": "bool values", + "type": "boolean" + }, + { + "description": "i64 values", + "type": "integer", + "format": "int64" + }, + { + "description": "f64 values", + "type": "number", + "format": "double" + }, + { + "description": "String values", + "type": "string" + }, + { + "description": "Array of homogeneous values", + "anyOf": [ + { + "description": "Array of bools", + "type": "array", + "items": { + "type": "boolean" + } + }, + { + "description": "Array of integers", + "type": "array", + "items": { + "type": "integer", + "format": "int64" + } + }, + { + "description": "Array of floats", + "type": "array", + "items": { + "type": "number", + "format": "double" + } + }, + { + "description": "Array of strings", + "type": "array", + "items": { + "type": "string" + } + } + ] + } + ], + "nullable": true + }, + "subgraph_response_data": { + "description": "The subgraph response body json path.", + "type": "string" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "subgraph_response_errors" + ], + "properties": { + "default": { + "description": "Optional default value.", + "anyOf": [ + { + "description": "bool values", + "type": "boolean" + }, + { + "description": "i64 values", + "type": "integer", + "format": "int64" + }, + { + "description": "f64 values", + "type": "number", + "format": "double" + }, + { + "description": "String values", + "type": "string" + }, + { + "description": "Array of homogeneous values", + "anyOf": [ + { + "description": "Array of bools", + "type": "array", + "items": { + "type": "boolean" + } + }, + { + "description": "Array of integers", + "type": "array", + "items": { + "type": "integer", + "format": "int64" + } + }, + { + "description": "Array of floats", + "type": "array", + "items": { + "type": "number", + "format": "double" + } + }, + { + "description": "Array of strings", + "type": "array", + "items": { + "type": "string" + } + } + ] + } + ], + "nullable": true + }, + "subgraph_response_errors": { + "description": "The subgraph response body json path.", + "type": "string" + } + }, + "additionalProperties": false + }, { "type": "object", "required": [ diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@query_plan_cache.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@query_plan_cache.yaml.snap new file mode 100644 index 0000000000..f24d2ed026 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@query_plan_cache.yaml.snap @@ -0,0 +1,11 @@ +--- +source: apollo-router/src/configuration/tests.rs +expression: new_config +--- +--- +supergraph: + query_planning: + cache: + redis: + urls: [] + diff --git a/apollo-router/src/configuration/testdata/metrics/entities.router.yaml b/apollo-router/src/configuration/testdata/metrics/entities.router.yaml index 0d1675ed87..2539a571ce 100644 --- a/apollo-router/src/configuration/testdata/metrics/entities.router.yaml +++ b/apollo-router/src/configuration/testdata/metrics/entities.router.yaml @@ -1,4 +1,4 @@ -experimental_entity_cache: +preview_entity_cache: redis: urls: [ "redis://localhost:6379" ] timeout: 5ms diff --git a/apollo-router/src/configuration/testdata/migrations/query_plan_cache.yaml b/apollo-router/src/configuration/testdata/migrations/query_plan_cache.yaml new file mode 100644 index 0000000000..80400e7371 --- /dev/null +++ b/apollo-router/src/configuration/testdata/migrations/query_plan_cache.yaml @@ -0,0 +1,5 @@ +supergraph: + query_planning: + experimental_cache: + redis: + urls: [] \ No newline at end of file diff --git a/apollo-router/src/plugin/mod.rs b/apollo-router/src/plugin/mod.rs index a362f2a50f..0807a1856c 100644 --- a/apollo-router/src/plugin/mod.rs +++ b/apollo-router/src/plugin/mod.rs @@ -529,6 +529,15 @@ pub(crate) trait PluginPrivate: Send + Sync + 'static { service } + /// This service handles HTTP communication + fn http_client_service( + &self, + _subgraph_name: &str, + service: crate::services::http::BoxService, + ) -> crate::services::http::BoxService { + service + } + /// Return the name of the plugin. fn name(&self) -> &'static str where @@ -627,6 +636,13 @@ pub(crate) trait DynPlugin: Send + Sync + 'static { service: subgraph::BoxService, ) -> subgraph::BoxService; + /// This service handles HTTP communication + fn http_client_service( + &self, + _subgraph_name: &str, + service: crate::services::http::BoxService, + ) -> crate::services::http::BoxService; + /// Return the name of the plugin. fn name(&self) -> &'static str; @@ -662,6 +678,15 @@ where self.subgraph_service(name, service) } + /// This service handles HTTP communication + fn http_client_service( + &self, + name: &str, + service: crate::services::http::BoxService, + ) -> crate::services::http::BoxService { + self.http_client_service(name, service) + } + fn name(&self) -> &'static str { self.name() } diff --git a/apollo-router/src/plugin/serde.rs b/apollo-router/src/plugin/serde.rs index 63ce64b311..5c627de5d6 100644 --- a/apollo-router/src/plugin/serde.rs +++ b/apollo-router/src/plugin/serde.rs @@ -6,6 +6,7 @@ use std::str::FromStr; use access_json::JSONQuery; use http::header::HeaderName; use http::HeaderValue; +use jsonpath_rust::JsonPathInst; use regex::Regex; use serde::de; use serde::de::Error; @@ -209,3 +210,27 @@ where } deserializer.deserialize_str(RegexVisitor) } + +pub(crate) fn deserialize_jsonpath<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + deserializer.deserialize_str(JSONPathVisitor) +} + +struct JSONPathVisitor; + +impl<'de> serde::de::Visitor<'de> for JSONPathVisitor { + type Value = JsonPathInst; + + fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result { + write!(formatter, "a JSON path") + } + + fn visit_str(self, s: &str) -> Result + where + E: serde::de::Error, + { + JsonPathInst::from_str(s).map_err(serde::de::Error::custom) + } +} diff --git a/apollo-router/src/plugins/authentication/mod.rs b/apollo-router/src/plugins/authentication/mod.rs index 3d9b81fa7c..03693a1106 100644 --- a/apollo-router/src/plugins/authentication/mod.rs +++ b/apollo-router/src/plugins/authentication/mod.rs @@ -15,6 +15,7 @@ use jsonwebtoken::errors::Error as JWTError; use jsonwebtoken::jwk::AlgorithmParameters; use jsonwebtoken::jwk::EllipticCurve; use jsonwebtoken::jwk::Jwk; +use jsonwebtoken::jwk::KeyAlgorithm; use jsonwebtoken::jwk::KeyOperations; use jsonwebtoken::jwk::PublicKeyUse; use jsonwebtoken::Algorithm; @@ -57,7 +58,7 @@ pub(crate) const APOLLO_AUTHENTICATION_JWT_CLAIMS: &str = "apollo_authentication const HEADER_TOKEN_TRUNCATED: &str = "(truncated)"; #[derive(Debug, Display, Error)] -enum AuthenticationError<'a> { +pub(crate) enum AuthenticationError<'a> { /// Configured header is not convertible to a string CannotConvertToString, @@ -90,6 +91,9 @@ enum AuthenticationError<'a> { /// Invalid issuer: the token's `iss` was '{token}', but signed with a key from '{expected}' InvalidIssuer { expected: String, token: String }, + + /// Unsupported key algorithm: {0} + UnsupportedKeyAlgorithm(KeyAlgorithm), } const DEFAULT_AUTHENTICATION_NETWORK_TIMEOUT: Duration = Duration::from_secs(15); @@ -235,9 +239,9 @@ fn search_jwks( // Furthermore, we would like our algorithms to match, or at least the kty // If we have an algorithm that matches, boost the score - match key.common.algorithm { + match key.common.key_algorithm { Some(algorithm) => { - if algorithm != criteria.alg { + if convert_key_algorithm(algorithm) != Some(criteria.alg) { continue; } key_score += 1; @@ -256,7 +260,7 @@ fn search_jwks( Algorithm::HS256 | Algorithm::HS384 | Algorithm::HS512, AlgorithmParameters::OctetKey(_), ) => { - key.common.algorithm = Some(criteria.alg); + key.common.key_algorithm = Some(convert_algorithm(criteria.alg)); } ( Algorithm::RS256 @@ -267,21 +271,21 @@ fn search_jwks( | Algorithm::PS512, AlgorithmParameters::RSA(_), ) => { - key.common.algorithm = Some(criteria.alg); + key.common.key_algorithm = Some(convert_algorithm(criteria.alg)); } (Algorithm::ES256, AlgorithmParameters::EllipticCurve(params)) => { if params.curve == EllipticCurve::P256 { - key.common.algorithm = Some(criteria.alg); + key.common.key_algorithm = Some(convert_algorithm(criteria.alg)); } } (Algorithm::ES384, AlgorithmParameters::EllipticCurve(params)) => { if params.curve == EllipticCurve::P384 { - key.common.algorithm = Some(criteria.alg); + key.common.key_algorithm = Some(convert_algorithm(criteria.alg)); } } (Algorithm::EdDSA, AlgorithmParameters::EllipticCurve(params)) => { if params.curve == EllipticCurve::Ed25519 { - key.common.algorithm = Some(criteria.alg); + key.common.key_algorithm = Some(convert_algorithm(criteria.alg)); } } _ => { @@ -317,9 +321,9 @@ fn search_jwks( .map(|(score, (_, candidate))| ( score, &candidate.common.key_id, - candidate.common.algorithm + candidate.common.key_algorithm )) - .collect::, Option)>>() + .collect::, Option)>>() ); if candidates.is_empty() { @@ -438,7 +442,7 @@ impl Plugin for AuthenticationPlugin { ServiceBuilder::new() .instrument(authentication_service_span()) .checkpoint(move |request: router::Request| { - authenticate(&configuration, &jwks_manager, request) + Ok(authenticate(&configuration, &jwks_manager, request)) }) .service(service) .boxed() @@ -464,7 +468,7 @@ fn authenticate( config: &JWTConf, jwks_manager: &JwksManager, request: router::Request, -) -> Result, BoxError> { +) -> ControlFlow { const AUTHENTICATION_KIND: &str = "JWT"; // We are going to do a lot of similar checking so let's define a local function @@ -473,7 +477,7 @@ fn authenticate( context: Context, error: AuthenticationError, status: StatusCode, - ) -> Result, BoxError> { + ) -> ControlFlow { // This is a metric and will not appear in the logs tracing::info!( monotonic_counter.apollo_authentication_failure_count = 1u64, @@ -489,7 +493,7 @@ fn authenticate( authentication.jwt.failed = true ); tracing::info!(message = %error, "jwt authentication failure"); - let response = router::Response::error_builder() + let response = router::Response::infallible_builder() .error( graphql::Error::builder() .message(error.to_string()) @@ -498,8 +502,8 @@ fn authenticate( ) .status_code(status) .context(context) - .build()?; - Ok(ControlFlow::Break(response)) + .build(); + ControlFlow::Break(response) } // The http_request is stored in a `Router::Request` context. @@ -507,7 +511,7 @@ fn authenticate( let jwt_value_result = match request.router_request.headers().get(&config.header_name) { Some(value) => value.to_str(), None => { - return Ok(ControlFlow::Continue(request)); + return ControlFlow::Continue(request); } }; @@ -638,7 +642,7 @@ fn authenticate( kind = %AUTHENTICATION_KIND ); tracing::info!(monotonic_counter.apollo.router.operations.jwt = 1u64); - return Ok(ControlFlow::Continue(request)); + return ControlFlow::Continue(request); } // We can't find a key to process this JWT. @@ -675,7 +679,7 @@ fn decode_jwt( } }; - let algorithm = match jwk.common.algorithm { + let key_algorithm = match jwk.common.key_algorithm { Some(a) => a, None => { error = Some(( @@ -686,8 +690,22 @@ fn decode_jwt( } }; + let algorithm = match convert_key_algorithm(key_algorithm) { + Some(a) => a, + None => { + error = Some(( + AuthenticationError::UnsupportedKeyAlgorithm(key_algorithm), + StatusCode::INTERNAL_SERVER_ERROR, + )); + continue; + } + }; + let mut validation = Validation::new(algorithm); validation.validate_nbf = true; + // if set to true, it will reject tokens containing an `aud` claim if the validation does not specify an audience + // we don't validate audience yet, so this is deactivated + validation.validate_aud = false; match decode::(jwt, &decoding_key, &validation) { Ok(v) => return Ok((issuer, v)), @@ -754,6 +772,45 @@ pub(crate) fn jwt_expires_in(context: &Context) -> Duration { } } +//Apparently the jsonwebtoken crate now has 2 different enums for algorithms +pub(crate) fn convert_key_algorithm(algorithm: KeyAlgorithm) -> Option { + Some(match algorithm { + jsonwebtoken::jwk::KeyAlgorithm::HS256 => jsonwebtoken::Algorithm::HS256, + jsonwebtoken::jwk::KeyAlgorithm::HS384 => jsonwebtoken::Algorithm::HS384, + jsonwebtoken::jwk::KeyAlgorithm::HS512 => jsonwebtoken::Algorithm::HS512, + jsonwebtoken::jwk::KeyAlgorithm::ES256 => jsonwebtoken::Algorithm::ES256, + jsonwebtoken::jwk::KeyAlgorithm::ES384 => jsonwebtoken::Algorithm::ES384, + jsonwebtoken::jwk::KeyAlgorithm::RS256 => jsonwebtoken::Algorithm::RS256, + jsonwebtoken::jwk::KeyAlgorithm::RS384 => jsonwebtoken::Algorithm::RS384, + jsonwebtoken::jwk::KeyAlgorithm::RS512 => jsonwebtoken::Algorithm::RS512, + jsonwebtoken::jwk::KeyAlgorithm::PS256 => jsonwebtoken::Algorithm::PS256, + jsonwebtoken::jwk::KeyAlgorithm::PS384 => jsonwebtoken::Algorithm::PS384, + jsonwebtoken::jwk::KeyAlgorithm::PS512 => jsonwebtoken::Algorithm::PS512, + jsonwebtoken::jwk::KeyAlgorithm::EdDSA => jsonwebtoken::Algorithm::EdDSA, + // we do not use the encryption algorithms + jsonwebtoken::jwk::KeyAlgorithm::RSA1_5 + | jsonwebtoken::jwk::KeyAlgorithm::RSA_OAEP + | jsonwebtoken::jwk::KeyAlgorithm::RSA_OAEP_256 => return None, + }) +} + +pub(crate) fn convert_algorithm(algorithm: Algorithm) -> KeyAlgorithm { + match algorithm { + jsonwebtoken::Algorithm::HS256 => jsonwebtoken::jwk::KeyAlgorithm::HS256, + jsonwebtoken::Algorithm::HS384 => jsonwebtoken::jwk::KeyAlgorithm::HS384, + jsonwebtoken::Algorithm::HS512 => jsonwebtoken::jwk::KeyAlgorithm::HS512, + jsonwebtoken::Algorithm::ES256 => jsonwebtoken::jwk::KeyAlgorithm::ES256, + jsonwebtoken::Algorithm::ES384 => jsonwebtoken::jwk::KeyAlgorithm::ES384, + jsonwebtoken::Algorithm::RS256 => jsonwebtoken::jwk::KeyAlgorithm::RS256, + jsonwebtoken::Algorithm::RS384 => jsonwebtoken::jwk::KeyAlgorithm::RS384, + jsonwebtoken::Algorithm::RS512 => jsonwebtoken::jwk::KeyAlgorithm::RS512, + jsonwebtoken::Algorithm::PS256 => jsonwebtoken::jwk::KeyAlgorithm::PS256, + jsonwebtoken::Algorithm::PS384 => jsonwebtoken::jwk::KeyAlgorithm::PS384, + jsonwebtoken::Algorithm::PS512 => jsonwebtoken::jwk::KeyAlgorithm::PS512, + jsonwebtoken::Algorithm::EdDSA => jsonwebtoken::jwk::KeyAlgorithm::EdDSA, + } +} + // This macro allows us to use it in our plugin registry! // register_plugin takes a group name, and a plugin name. // diff --git a/apollo-router/src/plugins/authentication/snapshots/apollo_router__plugins__authentication__tests__parse_failure_logs.snap b/apollo-router/src/plugins/authentication/snapshots/apollo_router__plugins__authentication__tests__parse_failure_logs.snap index 5ac1a11e4b..125add8b16 100644 --- a/apollo-router/src/plugins/authentication/snapshots/apollo_router__plugins__authentication__tests__parse_failure_logs.snap +++ b/apollo-router/src/plugins/authentication/snapshots/apollo_router__plugins__authentication__tests__parse_failure_logs.snap @@ -7,7 +7,7 @@ keys: alg: HS256 kid: key1 kty: oct - k: c2VjcmV0Cg== + k: c2VjcmV0Cg - use: sig key_ops: - verify diff --git a/apollo-router/src/plugins/authentication/snapshots/apollo_router__plugins__authentication__tests__parse_failure_logs@logs.snap b/apollo-router/src/plugins/authentication/snapshots/apollo_router__plugins__authentication__tests__parse_failure_logs@logs.snap index 7fdc48fc30..f46ba8763c 100644 --- a/apollo-router/src/plugins/authentication/snapshots/apollo_router__plugins__authentication__tests__parse_failure_logs@logs.snap +++ b/apollo-router/src/plugins/authentication/snapshots/apollo_router__plugins__authentication__tests__parse_failure_logs@logs.snap @@ -4,7 +4,7 @@ expression: yaml --- - fields: alg: UnknownAlg - err: "unknown variant `UnknownAlg`, expected one of `HS256`, `HS384`, `HS512`, `ES256`, `ES384`, `RS256`, `RS384`, `RS512`, `PS256`, `PS384`, `PS512`, `EdDSA`" + err: "unknown variant `UnknownAlg`, expected one of `HS256`, `HS384`, `HS512`, `ES256`, `ES384`, `RS256`, `RS384`, `RS512`, `PS256`, `PS384`, `PS512`, `EdDSA`, `RSA1_5`, `RSA-OAEP`, `RSA-OAEP-256`" index: 2 level: WARN message: "ignoring a key since it is not valid, enable debug logs to full content" diff --git a/apollo-router/src/plugins/authentication/testdata/jwks.json b/apollo-router/src/plugins/authentication/testdata/jwks.json index b4f238efc4..71ef0a9a55 100644 --- a/apollo-router/src/plugins/authentication/testdata/jwks.json +++ b/apollo-router/src/plugins/authentication/testdata/jwks.json @@ -4,7 +4,7 @@ "kty": "oct", "kid": "key1", "alg": "HS256", - "k": "c2VjcmV0Cg==", + "k": "c2VjcmV0Cg", "use": "sig" }, { diff --git a/apollo-router/src/plugins/authentication/tests.rs b/apollo-router/src/plugins/authentication/tests.rs index 3aba921745..af3cd14590 100644 --- a/apollo-router/src/plugins/authentication/tests.rs +++ b/apollo-router/src/plugins/authentication/tests.rs @@ -626,7 +626,7 @@ async fn it_finds_key_with_criteria_kid_and_algorithm() { .expect("found a key") .pop() .expect("list isn't empty"); - assert_eq!(Algorithm::HS256, key.common.algorithm.unwrap()); + assert_eq!(KeyAlgorithm::HS256, key.common.key_algorithm.unwrap()); assert_eq!("key2", key.common.key_id.unwrap()); } @@ -643,7 +643,7 @@ async fn it_finds_best_matching_key_with_criteria_algorithm() { .expect("found a key") .pop() .expect("list isn't empty"); - assert_eq!(Algorithm::HS256, key.common.algorithm.unwrap()); + assert_eq!(KeyAlgorithm::HS256, key.common.key_algorithm.unwrap()); assert_eq!("key1", key.common.key_id.unwrap()); } @@ -672,7 +672,7 @@ async fn it_finds_key_with_criteria_algorithm_ec() { .expect("found a key") .pop() .expect("list isn't empty"); - assert_eq!(Algorithm::ES256, key.common.algorithm.unwrap()); + assert_eq!(KeyAlgorithm::ES256, key.common.key_algorithm.unwrap()); assert_eq!( "afda85e09a320cf748177874592de64d", key.common.key_id.unwrap() @@ -692,7 +692,7 @@ async fn it_finds_key_with_criteria_algorithm_rsa() { .expect("found a key") .pop() .expect("list isn't empty"); - assert_eq!(Algorithm::RS256, key.common.algorithm.unwrap()); + assert_eq!(KeyAlgorithm::RS256, key.common.key_algorithm.unwrap()); assert_eq!( "022516583d56b68faf40260fda72978a", key.common.key_id.unwrap() @@ -735,7 +735,7 @@ async fn issuer_check() { common: CommonParameters { public_key_use: Some(PublicKeyUse::Signature), key_operations: Some(vec![KeyOperations::Verify]), - algorithm: Some(Algorithm::ES256), + key_algorithm: Some(KeyAlgorithm::ES256), key_id: Some("hello".to_string()), ..Default::default() }, @@ -767,7 +767,7 @@ async fn issuer_check() { .build() .unwrap(); - match authenticate(&JWTConf::default(), &manager, request.try_into().unwrap()).unwrap() { + match authenticate(&JWTConf::default(), &manager, request.try_into().unwrap()) { ControlFlow::Break(res) => { panic!("unexpected response: {res:?}"); } @@ -800,7 +800,7 @@ async fn issuer_check() { .build() .unwrap(); - match authenticate(&JWTConf::default(), &manager, request.try_into().unwrap()).unwrap() { + match authenticate(&JWTConf::default(), &manager, request.try_into().unwrap()) { ControlFlow::Break(res) => { let response: graphql::Response = serde_json::from_slice( &hyper::body::to_bytes(res.response.into_body()) @@ -840,7 +840,7 @@ async fn issuer_check() { .build() .unwrap(); - match authenticate(&JWTConf::default(), &manager, request.try_into().unwrap()).unwrap() { + match authenticate(&JWTConf::default(), &manager, request.try_into().unwrap()) { ControlFlow::Break(res) => { let response: graphql::Response = serde_json::from_slice( &hyper::body::to_bytes(res.response.into_body()) @@ -875,7 +875,7 @@ async fn issuer_check() { .build() .unwrap(); - match authenticate(&JWTConf::default(), &manager, request.try_into().unwrap()).unwrap() { + match authenticate(&JWTConf::default(), &manager, request.try_into().unwrap()) { ControlFlow::Break(res) => { let response: graphql::Response = serde_json::from_slice( &hyper::body::to_bytes(res.response.into_body()) diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs index 2d5219a776..e54077c9d6 100644 --- a/apollo-router/src/plugins/cache/entity.rs +++ b/apollo-router/src/plugins/cache/entity.rs @@ -43,7 +43,7 @@ pub(crate) const ENTITIES: &str = "_entities"; pub(crate) const REPRESENTATIONS: &str = "representations"; pub(crate) const CONTEXT_CACHE_KEY: &str = "apollo_entity_cache::key"; -register_plugin!("apollo", "experimental_entity_cache", EntityCache); +register_plugin!("apollo", "preview_entity_cache", EntityCache); pub(crate) struct EntityCache { storage: RedisCacheStorage, diff --git a/apollo-router/src/plugins/csrf.rs b/apollo-router/src/plugins/csrf.rs index 98a2fe1181..0362f44a6e 100644 --- a/apollo-router/src/plugins/csrf.rs +++ b/apollo-router/src/plugins/csrf.rs @@ -119,11 +119,11 @@ impl Plugin for Csrf { )) .extension_code("CSRF_ERROR") .build(); - let res = SupergraphResponse::builder() + let res = SupergraphResponse::infallible_builder() .error(error) .status_code(StatusCode::BAD_REQUEST) .context(req.context) - .build()?; + .build(); Ok(ControlFlow::Break(res)) } }) diff --git a/apollo-router/src/plugins/rhai/engine.rs b/apollo-router/src/plugins/rhai/engine.rs index 90d360ebcb..65a73326c8 100644 --- a/apollo-router/src/plugins/rhai/engine.rs +++ b/apollo-router/src/plugins/rhai/engine.rs @@ -234,6 +234,19 @@ mod router_header_map { } } + // Register a remove function for HeaderMap + #[rhai_fn(name = "remove", pure, return_raw)] + pub(crate) fn header_map_remove( + x: &mut HeaderMap, + key: &str, + ) -> Result> { + Ok(x.remove(key) + .ok_or("")? + .to_str() + .map_err(|e| e.to_string())? + .to_string()) + } + // Register a HeaderMap indexer so we can get/set headers #[rhai_fn(index_get, pure, return_raw)] pub(crate) fn header_map_get( diff --git a/apollo-router/src/plugins/rhai/tests.rs b/apollo-router/src/plugins/rhai/tests.rs index 6f1924dda1..a03192563f 100644 --- a/apollo-router/src/plugins/rhai/tests.rs +++ b/apollo-router/src/plugins/rhai/tests.rs @@ -812,3 +812,44 @@ fn it_can_compare_method_strings() { .expect("can compare properly"); assert!(method); } + +#[tokio::test] +async fn test_router_service_adds_timestamp_header() -> Result<(), BoxError> { + let mut mock_service = MockSupergraphService::new(); + mock_service + .expect_call() + .times(1) + .returning(move |req: SupergraphRequest| { + Ok(SupergraphResponse::fake_builder() + .header("x-custom-header", "CUSTOM_VALUE") + .context(req.context) + .build() + .unwrap()) + }); + + let dyn_plugin: Box = crate::plugin::plugins() + .find(|factory| factory.name == "apollo.rhai") + .expect("Plugin not found") + .create_instance_without_schema( + &Value::from_str(r#"{"scripts":"tests/fixtures", "main":"remove_header.rhai"}"#) + .unwrap(), + ) + .await + .unwrap(); + + let mut router_service = dyn_plugin.supergraph_service(BoxService::new(mock_service)); + let context = Context::new(); + context.insert("test", 5i64).unwrap(); + let supergraph_req = SupergraphRequest::fake_builder() + .header("x-custom-header", "CUSTOM_VALUE") + .context(context) + .build()?; + + let service_response = router_service.ready().await?.call(supergraph_req).await?; + assert_eq!(StatusCode::OK, service_response.response.status()); + + let headers = service_response.response.headers().clone(); + assert!(headers.get("x-custom-header").is_none()); + + Ok(()) +} diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 0dc8f8d228..7de9717745 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -1,10 +1,18 @@ //! Configuration for the telemetry plugin. use std::collections::BTreeMap; +use std::collections::HashSet; use axum::headers::HeaderName; +use opentelemetry::sdk::metrics::new_view; +use opentelemetry::sdk::metrics::Aggregation; +use opentelemetry::sdk::metrics::Instrument; +use opentelemetry::sdk::metrics::Stream; +use opentelemetry::sdk::metrics::View; use opentelemetry::sdk::trace::SpanLimits; use opentelemetry::Array; use opentelemetry::Value; +use opentelemetry_api::metrics::MetricsError; +use opentelemetry_api::metrics::Unit; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; @@ -108,8 +116,10 @@ pub(crate) struct MetricsCommon { pub(crate) service_namespace: Option, /// The Open Telemetry resource pub(crate) resource: BTreeMap, - /// Custom buckets for histograms + /// Custom buckets for all histograms pub(crate) buckets: Vec, + /// Views applied on metrics + pub(crate) views: Vec, } impl Default for MetricsCommon { @@ -119,6 +129,7 @@ impl Default for MetricsCommon { service_name: None, service_namespace: None, resource: BTreeMap::new(), + views: Vec::with_capacity(0), buckets: vec![ 0.001, 0.005, 0.015, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 1.0, 5.0, 10.0, ], @@ -126,6 +137,64 @@ impl Default for MetricsCommon { } } +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq)] +#[serde(deny_unknown_fields)] +pub(crate) struct MetricView { + /// The instrument name you're targeting + pub(crate) name: String, + /// New description to set to the instrument + pub(crate) description: Option, + /// New unit to set to the instrument + pub(crate) unit: Option, + /// New aggregation settings to set + pub(crate) aggregation: Option, + /// An allow-list of attribute keys that will be preserved for the instrument. + /// + /// Any attribute recorded for the instrument with a key not in this set will be + /// dropped. If the set is empty, all attributes will be dropped, if `None` all + /// attributes will be kept. + pub(crate) allowed_attribute_keys: Option>, +} + +impl TryInto> for MetricView { + type Error = MetricsError; + + fn try_into(self) -> Result, Self::Error> { + let aggregation = self + .aggregation + .map( + |MetricAggregation::Histogram { buckets }| Aggregation::ExplicitBucketHistogram { + boundaries: buckets, + record_min_max: true, + }, + ); + let mut instrument = Instrument::new().name(self.name); + if let Some(desc) = self.description { + instrument = instrument.description(desc); + } + if let Some(unit) = self.unit { + instrument = instrument.unit(Unit::new(unit)); + } + let mut mask = Stream::new(); + if let Some(aggregation) = aggregation { + mask = mask.aggregation(aggregation); + } + if let Some(allowed_attribute_keys) = self.allowed_attribute_keys { + mask = mask.allowed_attribute_keys(allowed_attribute_keys.into_iter().map(Key::new)); + } + + new_view(instrument, mask) + } +} + +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq)] +#[serde(deny_unknown_fields, rename_all = "snake_case")] +pub(crate) enum MetricAggregation { + /// An aggregation that summarizes a set of measurements as an histogram with + /// explicitly defined buckets. + Histogram { buckets: Vec }, +} + /// Tracing configuration #[derive(Clone, Default, Debug, Deserialize, JsonSchema)] #[serde(deny_unknown_fields, default)] diff --git a/apollo-router/src/plugins/telemetry/config_new/logging.rs b/apollo-router/src/plugins/telemetry/config_new/logging.rs index 71de7c8fcc..ecfd7b0a59 100644 --- a/apollo-router/src/plugins/telemetry/config_new/logging.rs +++ b/apollo-router/src/plugins/telemetry/config_new/logging.rs @@ -105,6 +105,8 @@ pub(crate) struct StdOut { pub(crate) enabled: bool, /// The format to log to stdout. pub(crate) format: Format, + /// The format to log to stdout when you're running on an interactive terminal. When configured it will automatically use this `tty_format`` instead of the original `format` when an interactive terminal is detected + pub(crate) tty_format: Option, /// Log rate limiting. The limit is set per type of log message pub(crate) rate_limit: RateLimit, } @@ -114,6 +116,7 @@ impl Default for StdOut { StdOut { enabled: true, format: Format::default(), + tty_format: None, rate_limit: RateLimit::default(), } } diff --git a/apollo-router/src/plugins/telemetry/config_new/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/selectors.rs index 3791cf74b5..87845f977c 100644 --- a/apollo-router/src/plugins/telemetry/config_new/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/selectors.rs @@ -1,4 +1,7 @@ use access_json::JSONQuery; +use derivative::Derivative; +use jsonpath_rust::JsonPathFinder; +use jsonpath_rust::JsonPathInst; use schemars::JsonSchema; use serde::Deserialize; #[cfg(test)] @@ -9,6 +12,7 @@ use sha2::Digest; use crate::context::OPERATION_KIND; use crate::context::OPERATION_NAME; use crate::plugin::serde::deserialize_json_query; +use crate::plugin::serde::deserialize_jsonpath; use crate::plugins::telemetry::config::AttributeValue; use crate::plugins::telemetry::config_new::get_baggage; use crate::plugins::telemetry::config_new::trace_id; @@ -240,8 +244,9 @@ pub(crate) enum SupergraphSelector { Static(String), } -#[derive(Deserialize, JsonSchema, Clone, Debug)] +#[derive(Deserialize, JsonSchema, Clone, Derivative)] #[serde(deny_unknown_fields, rename_all = "snake_case", untagged)] +#[derivative(Debug)] pub(crate) enum SubgraphSelector { SubgraphOperationName { /// The operation name from the subgraph query. @@ -281,6 +286,7 @@ pub(crate) enum SubgraphSelector { /// Optional default value. default: Option, }, + /// Deprecated, use SubgraphResponseData and SubgraphResponseError instead SubgraphResponseBody { /// The subgraph response body json path. #[schemars(with = "String")] @@ -293,6 +299,32 @@ pub(crate) enum SubgraphSelector { /// Optional default value. default: Option, }, + SubgraphResponseData { + /// The subgraph response body json path. + #[schemars(with = "String")] + #[derivative(Debug = "ignore")] + #[serde(deserialize_with = "deserialize_jsonpath")] + subgraph_response_data: JsonPathInst, + #[serde(skip)] + #[allow(dead_code)] + /// Optional redaction pattern. + redact: Option, + /// Optional default value. + default: Option, + }, + SubgraphResponseErrors { + /// The subgraph response body json path. + #[schemars(with = "String")] + #[derivative(Debug = "ignore")] + #[serde(deserialize_with = "deserialize_jsonpath")] + subgraph_response_errors: JsonPathInst, + #[serde(skip)] + #[allow(dead_code)] + /// Optional redaction pattern. + redact: Option, + /// Optional default value. + default: Option, + }, SubgraphRequestHeader { /// The name of a subgraph request header. subgraph_request_header: String, @@ -781,6 +813,49 @@ impl Selector for SubgraphSelector { .as_ref() .and_then(|v| v.maybe_to_otel_value()) .or_else(|| default.maybe_to_otel_value()), + SubgraphSelector::SubgraphResponseData { + subgraph_response_data, + default, + .. + } => if let Some(data) = &response.response.body().data { + let data: serde_json::Value = serde_json::to_value(data.clone()).ok()?; + let mut val = + JsonPathFinder::new(Box::new(data), Box::new(subgraph_response_data.clone())) + .find(); + if let serde_json::Value::Array(array) = &mut val { + if array.len() == 1 { + val = array + .pop() + .expect("already checked the array had a length of 1; qed"); + } + } + + val.maybe_to_otel_value() + } else { + None + } + .or_else(|| default.maybe_to_otel_value()), + SubgraphSelector::SubgraphResponseErrors { + subgraph_response_errors: subgraph_response_error, + default, + .. + } => { + let errors = response.response.body().errors.clone(); + let data: serde_json::Value = serde_json::to_value(errors).ok()?; + let mut val = + JsonPathFinder::new(Box::new(data), Box::new(subgraph_response_error.clone())) + .find(); + if let serde_json::Value::Array(array) = &mut val { + if array.len() == 1 { + val = array + .pop() + .expect("already checked the array had a length of 1; qed"); + } + } + + val.maybe_to_otel_value() + } + .or_else(|| default.maybe_to_otel_value()), SubgraphSelector::ResponseContext { response_context, default, @@ -801,9 +876,11 @@ impl Selector for SubgraphSelector { #[cfg(test)] mod test { + use std::str::FromStr; use std::sync::Arc; use http::StatusCode; + use jsonpath_rust::JsonPathInst; use opentelemetry::baggage::BaggageExt; use opentelemetry::trace::SpanContext; use opentelemetry::trace::SpanId; @@ -813,6 +890,7 @@ mod test { use opentelemetry::trace::TraceState; use opentelemetry::Context; use opentelemetry::KeyValue; + use opentelemetry_api::StringValue; use serde_json::json; use tracing::span; use tracing::subscriber; @@ -1956,6 +2034,92 @@ mod test { ); } + #[test] + fn subgraph_subgraph_response_data() { + let selector = SubgraphSelector::SubgraphResponseData { + subgraph_response_data: JsonPathInst::from_str("$.hello").unwrap(), + redact: None, + default: None, + }; + assert_eq!( + selector + .on_response( + &crate::services::SubgraphResponse::fake_builder() + .data(serde_json_bytes::json!({ + "hello": "bonjour" + })) + .build() + ) + .unwrap(), + opentelemetry::Value::String("bonjour".into()) + ); + + assert_eq!( + selector + .on_response( + &crate::services::SubgraphResponse::fake_builder() + .data(serde_json_bytes::json!({ + "hello": ["bonjour", "hello", "ciao"] + })) + .build() + ) + .unwrap(), + opentelemetry::Value::Array( + vec![ + StringValue::from("bonjour"), + StringValue::from("hello"), + StringValue::from("ciao") + ] + .into() + ) + ); + + assert!(selector + .on_response( + &crate::services::SubgraphResponse::fake_builder() + .data(serde_json_bytes::json!({ + "hi": ["bonjour", "hello", "ciao"] + })) + .build() + ) + .is_none()); + + let selector = SubgraphSelector::SubgraphResponseData { + subgraph_response_data: JsonPathInst::from_str("$.hello.*.greeting").unwrap(), + redact: None, + default: None, + }; + assert_eq!( + selector + .on_response( + &crate::services::SubgraphResponse::fake_builder() + .data(serde_json_bytes::json!({ + "hello": { + "french": { + "greeting": "bonjour" + }, + "english": { + "greeting": "hello" + }, + "italian": { + "greeting": "ciao" + } + } + })) + .build() + ) + .unwrap(), + opentelemetry::Value::Array( + vec![ + StringValue::from("bonjour"), + StringValue::from("hello"), + StringValue::from("ciao") + ] + .into() + ) + ); + } + #[test] fn router_response_status_reason() { let selector = RouterSelector::ResponseStatus { diff --git a/apollo-router/src/plugins/telemetry/fmt_layer.rs b/apollo-router/src/plugins/telemetry/fmt_layer.rs index e5c6035c8b..ff28719fdb 100644 --- a/apollo-router/src/plugins/telemetry/fmt_layer.rs +++ b/apollo-router/src/plugins/telemetry/fmt_layer.rs @@ -1,6 +1,7 @@ use std::cell::RefCell; use std::collections::HashMap; use std::collections::HashSet; +use std::io::IsTerminal; use std::marker::PhantomData; use opentelemetry::Key; @@ -36,32 +37,42 @@ pub(crate) fn create_fmt_layer( StdOut { enabled, format, + tty_format, rate_limit, - } if *enabled => match format { - Format::Json(format_config) => { - let format = Json::new( - config.exporters.logging.common.to_resource(), - format_config.clone(), - ); - FmtLayer::new( - FilteringFormatter::new(format, filter_metric_events, rate_limit), - std::io::stdout, - ) - .boxed() - } + } if *enabled => { + let format = if std::io::stdout().is_terminal() && tty_format.is_some() { + tty_format + .as_ref() + .expect("checked previously in the if; qed") + } else { + format + }; + match format { + Format::Json(format_config) => { + let format = Json::new( + config.exporters.logging.common.to_resource(), + format_config.clone(), + ); + FmtLayer::new( + FilteringFormatter::new(format, filter_metric_events, rate_limit), + std::io::stdout, + ) + .boxed() + } - Format::Text(format_config) => { - let format = Text::new( - config.exporters.logging.common.to_resource(), - format_config.clone(), - ); - FmtLayer::new( - FilteringFormatter::new(format, filter_metric_events, rate_limit), - std::io::stdout, - ) - .boxed() + Format::Text(format_config) => { + let format = Text::new( + config.exporters.logging.common.to_resource(), + format_config.clone(), + ); + FmtLayer::new( + FilteringFormatter::new(format, filter_metric_events, rate_limit), + std::io::stdout, + ) + .boxed() + } } - }, + } _ => NoOpLayer.boxed(), } } diff --git a/apollo-router/src/plugins/telemetry/metrics/otlp.rs b/apollo-router/src/plugins/telemetry/metrics/otlp.rs index 12ef76e8c1..b83b7d7b01 100644 --- a/apollo-router/src/plugins/telemetry/metrics/otlp.rs +++ b/apollo-router/src/plugins/telemetry/metrics/otlp.rs @@ -1,5 +1,6 @@ use opentelemetry::runtime; use opentelemetry::sdk::metrics::PeriodicReader; +use opentelemetry::sdk::metrics::View; use opentelemetry_otlp::HttpExporterBuilder; use opentelemetry_otlp::MetricsExporterBuilder; use opentelemetry_otlp::TonicExporterBuilder; @@ -63,6 +64,11 @@ impl MetricsConfigurator for super::super::otlp::Config { .with_timeout(self.batch_processor.max_export_timeout) .build(), ); + for metric_view in metrics_config.views.clone() { + let view: Box = metric_view.try_into()?; + builder.public_meter_provider_builder = + builder.public_meter_provider_builder.with_view(view); + } Ok(builder) } None => Err("otlp metric export does not support http yet".into()), diff --git a/apollo-router/src/plugins/telemetry/metrics/prometheus.rs b/apollo-router/src/plugins/telemetry/metrics/prometheus.rs index 4825934271..cabcc83658 100644 --- a/apollo-router/src/plugins/telemetry/metrics/prometheus.rs +++ b/apollo-router/src/plugins/telemetry/metrics/prometheus.rs @@ -6,6 +6,7 @@ use futures::future::BoxFuture; use http::StatusCode; use once_cell::sync::Lazy; use opentelemetry::sdk::metrics::MeterProvider; +use opentelemetry::sdk::metrics::View; use opentelemetry::sdk::Resource; use prometheus::Encoder; use prometheus::Registry; @@ -16,6 +17,7 @@ use tower::BoxError; use tower::ServiceExt; use tower_service::Service; +use crate::plugins::telemetry::config::MetricView; use crate::plugins::telemetry::config::MetricsCommon; use crate::plugins::telemetry::metrics::CustomAggregationSelector; use crate::plugins::telemetry::metrics::MetricsBuilder; @@ -58,6 +60,7 @@ static NEW_PROMETHEUS: Lazy>> = struct PrometheusConfig { resource: Resource, buckets: Vec, + views: Vec, } pub(crate) fn commit_prometheus() { @@ -86,6 +89,7 @@ impl MetricsConfigurator for Config { let prometheus_config = PrometheusConfig { resource: builder.resource.clone(), buckets: metrics_config.buckets.clone(), + views: metrics_config.views.clone(), }; // Check the last registry to see if the resources are the same, if they are we can use it as is. @@ -131,10 +135,14 @@ impl MetricsConfigurator for Config { .with_registry(registry.clone()) .build()?; - let meter_provider = MeterProvider::builder() + let mut meter_provider_builder = MeterProvider::builder() .with_reader(exporter) - .with_resource(builder.resource.clone()) - .build(); + .with_resource(builder.resource.clone()); + for metric_view in metrics_config.views.clone() { + let view: Box = metric_view.try_into()?; + meter_provider_builder = meter_provider_builder.with_view(view); + } + let meter_provider = meter_provider_builder.build(); builder.custom_endpoints.insert( self.listen.clone(), Endpoint::from_router_service( diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index d52c65bb11..f35c75b8b5 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -2259,6 +2259,22 @@ mod tests { .await; } + #[tokio::test(flavor = "multi_thread")] + async fn it_test_prometheus_metrics_custom_buckets_for_specific_metrics() { + async { + let plugin = create_plugin_with_config(include_str!( + "testdata/prometheus_custom_buckets_specific_metrics.router.yaml" + )) + .await; + make_supergraph_request(plugin.as_ref()).await; + let prometheus_metrics = get_prometheus_metrics(plugin.as_ref()).await; + + assert_snapshot!(prometheus_metrics); + } + .with_metrics() + .await; + } + #[test] fn it_test_send_headers_to_studio() { let fw_headers = ForwardHeaders::Only(vec![ diff --git a/apollo-router/src/plugins/telemetry/snapshots/apollo_router__plugins__telemetry__tests__it_test_prometheus_metrics_custom_buckets_for_specific_metrics.snap b/apollo-router/src/plugins/telemetry/snapshots/apollo_router__plugins__telemetry__tests__it_test_prometheus_metrics_custom_buckets_for_specific_metrics.snap new file mode 100644 index 0000000000..49dfbd2b76 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/snapshots/apollo_router__plugins__telemetry__tests__it_test_prometheus_metrics_custom_buckets_for_specific_metrics.snap @@ -0,0 +1,10 @@ +--- +source: apollo-router/src/plugins/telemetry/mod.rs +expression: prometheus_metrics +--- +apollo_router_http_request_duration_seconds_bucket{otel_scope_name="apollo/router",le="+Inf"} 1 +apollo_router_http_request_duration_seconds_bucket{otel_scope_name="apollo/router",le="1"} 1 +apollo_router_http_request_duration_seconds_bucket{otel_scope_name="apollo/router",le="2"} 1 +apollo_router_http_request_duration_seconds_bucket{otel_scope_name="apollo/router",le="3"} 1 +apollo_router_http_request_duration_seconds_bucket{otel_scope_name="apollo/router",le="4"} 1 +apollo_router_http_request_duration_seconds_bucket{otel_scope_name="apollo/router",le="5"} 1 diff --git a/apollo-router/src/plugins/telemetry/testdata/prometheus_custom_buckets_specific_metrics.router.yaml b/apollo-router/src/plugins/telemetry/testdata/prometheus_custom_buckets_specific_metrics.router.yaml new file mode 100644 index 0000000000..06a868748d --- /dev/null +++ b/apollo-router/src/plugins/telemetry/testdata/prometheus_custom_buckets_specific_metrics.router.yaml @@ -0,0 +1,22 @@ +telemetry: + apollo: + client_name_header: name_header + client_version_header: version_header + exporters: + metrics: + common: + service_name: apollo-router + views: + - name: apollo_router_http_request_duration_seconds + aggregation: + histogram: + buckets: + - 1 + - 2 + - 3 + - 4 + - 5 + allowed_attribute_keys: + - otel_scope_name + prometheus: + enabled: true diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index 817de0979b..840fe57223 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -39,8 +39,8 @@ use crate::error::ConfigurationError; use crate::plugin::Plugin; use crate::plugin::PluginInit; use crate::register_plugin; +use crate::services::http::service::Compression; use crate::services::subgraph; -use crate::services::subgraph_service::Compression; use crate::services::supergraph; use crate::services::SubgraphRequest; diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index 19190331b7..da0d493981 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -1247,16 +1247,16 @@ mod tests { #[test] fn router_bridge_dependency_is_pinned() { - let cargo_manifest: toml::Value = - fs::read_to_string(PathBuf::from(&env!("CARGO_MANIFEST_DIR")).join("Cargo.toml")) - .expect("could not read Cargo.toml") - .parse() - .expect("could not parse Cargo.toml"); + let cargo_manifest: serde_json::Value = basic_toml::from_str( + &fs::read_to_string(PathBuf::from(&env!("CARGO_MANIFEST_DIR")).join("Cargo.toml")) + .expect("could not read Cargo.toml"), + ) + .expect("could not parse Cargo.toml"); let router_bridge_version = cargo_manifest .get("dependencies") .expect("Cargo.toml does not contain dependencies") - .as_table() - .expect("Cargo.toml dependencies key is not a table") + .as_object() + .expect("Cargo.toml dependencies key is not an object") .get("router-bridge") .expect("Cargo.toml dependencies does not have an entry for router-bridge") .as_str() diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index d2e7da4655..ba2f92dbbc 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -76,7 +76,7 @@ where ) -> CachingQueryPlanner { let cache = Arc::new( DeduplicatingCache::from_configuration( - &configuration.supergraph.query_planning.experimental_cache, + &configuration.supergraph.query_planning.cache, "query planner", ) .await, @@ -182,7 +182,9 @@ where let err_res = Query::check_errors(&doc); if let Err(error) = err_res { let e = Arc::new(QueryPlannerError::SpecError(error)); - entry.insert(Err(e)).await; + tokio::spawn(async move { + entry.insert(Err(e)).await; + }); continue; } @@ -208,15 +210,19 @@ where match res { Ok(QueryPlannerResponse { content, .. }) => { - if let Some(content) = &content { + if let Some(content) = content.clone() { count += 1; - entry.insert(Ok(content.clone())).await; + tokio::spawn(async move { + entry.insert(Ok(content.clone())).await; + }); } } Err(error) => { count += 1; let e = Arc::new(error); - entry.insert(Err(e.clone())).await; + tokio::spawn(async move { + entry.insert(Err(e)).await; + }); } } } @@ -355,7 +361,10 @@ where referenced_fields_by_type: HashMap::new(), }); let e = Arc::new(QueryPlannerError::SpecError(error)); - entry.insert(Err(e.clone())).await; + let err = e.clone(); + tokio::spawn(async move { + entry.insert(Err(err)).await; + }); return Err(CacheResolverError::RetrievalError(e)); } @@ -367,8 +376,10 @@ where context, errors, }) => { - if let Some(content) = &content { - entry.insert(Ok(content.clone())).await; + if let Some(content) = content.clone() { + tokio::spawn(async move { + entry.insert(Ok(content)).await; + }); } if let Some(QueryPlannerContent::Plan { plan, .. }) = &content { @@ -385,7 +396,10 @@ where } Err(error) => { let e = Arc::new(error); - entry.insert(Err(e.clone())).await; + let err = e.clone(); + tokio::spawn(async move { + entry.insert(Err(err)).await; + }); Err(CacheResolverError::RetrievalError(e)) } } @@ -459,6 +473,8 @@ pub(crate) struct CachingQueryKey { pub(crate) plan_options: PlanOptions, } +const FEDERATION_VERSION: &str = std::env!("FEDERATION_VERSION"); + impl std::fmt::Display for CachingQueryKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut hasher = Sha256::new(); @@ -471,22 +487,19 @@ impl std::fmt::Display for CachingQueryKey { let mut hasher = Sha256::new(); hasher.update(&serde_json::to_vec(&self.metadata).expect("serialization should not fail")); - let metadata = hex::encode(hasher.finalize()); - - let mut hasher = Sha256::new(); hasher.update( &serde_json::to_vec(&self.plan_options).expect("serialization should not fail"), ); - let plan_options = hex::encode(hasher.finalize()); + let metadata = hex::encode(hasher.finalize()); write!( f, - "plan.{}.{}.{}.{}.{}", + "plan:{}:{}:{}:{}:{}", + FEDERATION_VERSION, self.schema_id.as_deref().unwrap_or("-"), query, operation, metadata, - plan_options, ) } } diff --git a/apollo-router/src/query_planner/execution.rs b/apollo-router/src/query_planner/execution.rs index 3ce7f6ffe4..d1793dafd7 100644 --- a/apollo-router/src/query_planner/execution.rs +++ b/apollo-router/src/query_planner/execution.rs @@ -218,7 +218,7 @@ impl PlanNode { PlanNode::Fetch(fetch_node) => { let fetch_time_offset = parameters.context.created_at.elapsed().as_nanos() as i64; - match fetch_node + let (v, e) = fetch_node .fetch_node(parameters, parent_value, current_dir) .instrument(tracing::info_span!( FETCH_SPAN_NAME, @@ -226,18 +226,9 @@ impl PlanNode { "apollo.subgraph.name" = fetch_node.service_name.as_str(), "apollo_private.sent_time_offset" = fetch_time_offset )) - .await - { - Ok((v, e)) => { - value = v; - errors = e; - } - Err(err) => { - failfast_error!("Fetch error: {}", err); - errors = vec![err.to_graphql_error(Some(current_dir.to_owned()))]; - value = Value::default(); - } - } + .await; + value = v; + errors = e; } PlanNode::Defer { primary: diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 9754a91187..a4c88c546d 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -236,7 +236,7 @@ impl FetchNode { parameters: &'a ExecutionParameters<'a>, data: &'a Value, current_dir: &'a Path, - ) -> Result<(Value, Vec), FetchError> { + ) -> (Value, Vec) { let FetchNode { operation, operation_kind, @@ -260,7 +260,7 @@ impl FetchNode { ) { Some(variables) => variables, None => { - return Ok((Value::Object(Object::default()), Vec::new())); + return (Value::Object(Object::default()), Vec::new()); } }; @@ -302,8 +302,7 @@ impl FetchNode { .create(service_name) .expect("we already checked that the service exists during planning; qed"); - // TODO not sure if we need a RouterReponse here as we don't do anything with it - let (_parts, response) = service + let (_parts, response) = match service .oneshot(subgraph_request) .instrument(tracing::trace_span!("subfetch_stream")) .await @@ -325,16 +324,26 @@ impl FetchNode { service: service_name.to_string(), reason: e.to_string(), }, - })? - .response - .into_parts(); + }) { + Err(e) => { + return ( + Value::default(), + vec![e.to_graphql_error(Some(current_dir.to_owned()))], + ); + } + Ok(res) => res.response.into_parts(), + }; super::log::trace_subfetch(service_name, operation, &variables, &response); if !response.is_primary() { - return Err(FetchError::SubrequestUnexpectedPatchResponse { - service: service_name.to_owned(), - }); + return ( + Value::default(), + vec![FetchError::SubrequestUnexpectedPatchResponse { + service: service_name.to_owned(), + } + .to_graphql_error(Some(current_dir.to_owned()))], + ); } let (value, errors) = @@ -347,7 +356,7 @@ impl FetchNode { } } } - Ok((value, errors)) + (value, errors) } #[instrument(skip_all, level = "debug", name = "response_insert")] diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 654817ae54..95182a34a7 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -31,6 +31,7 @@ use crate::plugins::traffic_shaping::APOLLO_TRAFFIC_SHAPING; use crate::query_planner::BridgeQueryPlanner; use crate::services::apollo_graph_reference; use crate::services::apollo_key; +use crate::services::http::HttpClientServiceFactory; use crate::services::layers::persisted_queries::PersistedQueryLayer; use crate::services::layers::query_analysis::QueryAnalysisLayer; use crate::services::new_service::ServiceFactory; @@ -356,14 +357,23 @@ pub(crate) async fn create_subgraph_services( let mut subgraph_services = IndexMap::new(); for (name, _) in schema.subgraphs() { + let http_service = crate::services::http::HttpClientService::from_config( + name, + configuration, + &tls_root_store, + shaping.enable_subgraph_http2(name), + )?; + + let http_service_factory = + HttpClientServiceFactory::new(Arc::new(http_service), Arc::new(IndexMap::new())); + let subgraph_service = shaping.subgraph_service_internal( name, SubgraphService::from_config( name, configuration, - &tls_root_store, - shaping.enable_subgraph_http2(name), subscription_plugin_conf.clone(), + http_service_factory, )?, ); subgraph_services.insert(name.clone(), subgraph_service); @@ -615,7 +625,7 @@ pub(crate) async fn create_plugins( add_optional_apollo_plugin!("override_subgraph_url"); add_optional_apollo_plugin!("authorization"); add_optional_apollo_plugin!("authentication"); - add_optional_apollo_plugin!("experimental_entity_cache"); + add_optional_apollo_plugin!("preview_entity_cache"); add_mandatory_apollo_plugin!("progressive_override"); // This relative ordering is documented in `docs/source/customizations/native.mdx`: diff --git a/apollo-router/src/services/execution/service.rs b/apollo-router/src/services/execution/service.rs index 0f7603479e..4dcdb94d94 100644 --- a/apollo-router/src/services/execution/service.rs +++ b/apollo-router/src/services/execution/service.rs @@ -104,13 +104,13 @@ impl Service for ExecutionService { let clone = self.clone(); let mut this = std::mem::replace(self, clone); - let fut = async move { this.call_inner(req).await }.in_current_span(); + let fut = async move { Ok(this.call_inner(req).await) }.in_current_span(); Box::pin(fut) } } impl ExecutionService { - async fn call_inner(&mut self, req: ExecutionRequest) -> Result { + async fn call_inner(&mut self, req: ExecutionRequest) -> ExecutionResponse { let context = req.context; let ctx = context.clone(); let variables = req.supergraph_request.body().variables.clone(); @@ -123,7 +123,7 @@ impl ExecutionService { let is_subscription = req.query_plan.is_subscription(operation_name.as_deref()); let mut claims = None; if is_deferred { - claims = context.get(APOLLO_AUTHENTICATION_JWT_CLAIMS)? + claims = context.get(APOLLO_AUTHENTICATION_JWT_CLAIMS).ok().flatten() } let (tx_close_signal, subscription_handle) = if is_subscription { let (tx_close_signal, rx_close_signal) = broadcast::channel(1); @@ -173,10 +173,7 @@ impl ExecutionService { }; if has_initial_data { - return Ok(ExecutionResponse::new_from_response( - http::Response::new(stream as _), - ctx, - )); + return ExecutionResponse::new_from_response(http::Response::new(stream as _), ctx); } let schema = self.schema.clone(); @@ -239,10 +236,7 @@ impl ExecutionService { }) .boxed(); - Ok(ExecutionResponse::new_from_response( - http::Response::new(stream as _), - ctx, - )) + ExecutionResponse::new_from_response(http::Response::new(stream as _), ctx) } fn process_graphql_response( diff --git a/apollo-router/src/services/http.rs b/apollo-router/src/services/http.rs new file mode 100644 index 0000000000..6f6f399ca6 --- /dev/null +++ b/apollo-router/src/services/http.rs @@ -0,0 +1,86 @@ +#![allow(dead_code)] +use std::sync::Arc; + +use hyper::Body; +use tower::BoxError; +use tower::ServiceExt; +use tower_service::Service; + +use super::Plugins; +use crate::Context; + +pub(crate) mod service; +#[cfg(test)] +mod tests; + +pub(crate) use service::HttpClientService; + +pub(crate) type BoxService = tower::util::BoxService; +pub(crate) type BoxCloneService = tower::util::BoxCloneService; +pub(crate) type ServiceResult = Result; + +#[non_exhaustive] +pub(crate) struct HttpRequest { + pub(crate) http_request: http::Request, + pub(crate) context: Context, +} + +#[non_exhaustive] +pub(crate) struct HttpResponse { + pub(crate) http_response: http::Response, + pub(crate) context: Context, +} + +#[derive(Clone)] +pub(crate) struct HttpClientServiceFactory { + pub(crate) service: Arc, + pub(crate) plugins: Arc, +} + +impl HttpClientServiceFactory { + pub(crate) fn new(service: Arc, plugins: Arc) -> Self { + HttpClientServiceFactory { service, plugins } + } + + #[cfg(test)] + pub(crate) fn from_config( + service: impl Into, + configuration: &crate::Configuration, + http2: crate::plugins::traffic_shaping::Http2Config, + ) -> Self { + use indexmap::IndexMap; + + let service = HttpClientService::from_config(service, configuration, &None, http2).unwrap(); + + HttpClientServiceFactory { + service: Arc::new(service), + plugins: Arc::new(IndexMap::new()), + } + } + + pub(crate) fn create(&self, name: &str) -> BoxService { + let service = self.service.make(); + self.plugins + .iter() + .rev() + .fold(service, |acc, (_, e)| e.http_client_service(name, acc)) + } +} + +pub(crate) trait MakeHttpService: Send + Sync + 'static { + fn make(&self) -> BoxService; +} + +impl MakeHttpService for S +where + S: Service + + Clone + + Send + + Sync + + 'static, + >::Future: Send, +{ + fn make(&self) -> BoxService { + self.clone().boxed() + } +} diff --git a/apollo-router/src/services/http/service.rs b/apollo-router/src/services/http/service.rs new file mode 100644 index 0000000000..e2f3628dea --- /dev/null +++ b/apollo-router/src/services/http/service.rs @@ -0,0 +1,388 @@ +use std::fmt::Display; +use std::sync::Arc; +use std::task::Poll; +use std::time::Duration; + +use ::serde::Deserialize; +use async_compression::tokio::write::BrotliEncoder; +use async_compression::tokio::write::GzipEncoder; +use async_compression::tokio::write::ZlibEncoder; +use bytes::Bytes; +use futures::future::BoxFuture; +use futures::Stream; +use futures::TryFutureExt; +use global::get_text_map_propagator; +use http::header::ACCEPT_ENCODING; +use http::header::CONTENT_ENCODING; +use http::HeaderMap; +use http::HeaderValue; +use http::Request; +use hyper::client::HttpConnector; +use hyper::Body; +use hyper_rustls::ConfigBuilderExt; +use hyper_rustls::HttpsConnector; +use opentelemetry::global; +use pin_project_lite::pin_project; +use rustls::ClientConfig; +use rustls::RootCertStore; +use schemars::JsonSchema; +use tokio::io::AsyncWriteExt; +use tower::BoxError; +use tower::Service; +use tower::ServiceBuilder; +use tower_http::decompression::Decompression; +use tower_http::decompression::DecompressionBody; +use tower_http::decompression::DecompressionLayer; +use tracing::Instrument; +use tracing_opentelemetry::OpenTelemetrySpanExt; + +use super::HttpRequest; +use super::HttpResponse; +use crate::configuration::TlsClientAuth; +use crate::error::FetchError; +use crate::plugins::authentication::subgraph::SigningParamsConfig; +use crate::plugins::telemetry::LOGGING_DISPLAY_BODY; +use crate::plugins::telemetry::LOGGING_DISPLAY_HEADERS; +use crate::plugins::traffic_shaping::Http2Config; +use crate::services::trust_dns_connector::new_async_http_connector; +use crate::services::trust_dns_connector::AsyncHyperResolver; +use crate::Configuration; +use crate::Context; + +type HTTPClient = + Decompression>, Body>>; + +// interior mutability is not a concern here, the value is never modified +#[allow(clippy::declare_interior_mutable_const)] +static ACCEPTED_ENCODINGS: HeaderValue = HeaderValue::from_static("gzip, br, deflate"); +const POOL_IDLE_TIMEOUT_DURATION: Option = Some(Duration::from_secs(5)); + +#[derive(PartialEq, Debug, Clone, Deserialize, JsonSchema, Copy)] +#[serde(rename_all = "lowercase")] +pub(crate) enum Compression { + /// gzip + Gzip, + /// deflate + Deflate, + /// brotli + Br, + /// identity + Identity, +} + +impl Display for Compression { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Compression::Gzip => write!(f, "gzip"), + Compression::Deflate => write!(f, "deflate"), + Compression::Br => write!(f, "br"), + Compression::Identity => write!(f, "identity"), + } + } +} + +#[derive(Clone)] +pub(crate) struct HttpClientService { + // Note: We use hyper::Client here in preference to reqwest to avoid expensive URL translation + // in the hot path. We use reqwest elsewhere because it's convenient and some of the + // opentelemetry crate require reqwest clients to work correctly (at time of writing). + client: HTTPClient, + service: Arc, +} + +impl HttpClientService { + pub(crate) fn from_config( + service: impl Into, + configuration: &Configuration, + tls_root_store: &Option, + http2: Http2Config, + ) -> Result { + let name: String = service.into(); + let tls_cert_store = configuration + .tls + .subgraph + .subgraphs + .get(&name) + .as_ref() + .and_then(|subgraph| subgraph.create_certificate_store()) + .transpose()? + .or_else(|| tls_root_store.clone()); + let client_cert_config = configuration + .tls + .subgraph + .subgraphs + .get(&name) + .as_ref() + .and_then(|tls| tls.client_authentication.as_ref()) + .or(configuration + .tls + .subgraph + .all + .client_authentication + .as_ref()); + + let tls_client_config = generate_tls_client_config(tls_cert_store, client_cert_config)?; + + HttpClientService::new(name, http2, tls_client_config) + } + + pub(crate) fn new( + service: impl Into, + http2: Http2Config, + tls_config: ClientConfig, + ) -> Result { + let mut http_connector = new_async_http_connector()?; + http_connector.set_nodelay(true); + http_connector.set_keepalive(Some(std::time::Duration::from_secs(60))); + http_connector.enforce_http(false); + + let builder = hyper_rustls::HttpsConnectorBuilder::new() + .with_tls_config(tls_config) + .https_or_http() + .enable_http1(); + + let connector = if http2 != Http2Config::Disable { + builder.enable_http2().wrap_connector(http_connector) + } else { + builder.wrap_connector(http_connector) + }; + + let http_client = hyper::Client::builder() + .pool_idle_timeout(POOL_IDLE_TIMEOUT_DURATION) + .http2_only(http2 == Http2Config::Http2Only) + .build(connector); + Ok(Self { + client: ServiceBuilder::new() + .layer(DecompressionLayer::new()) + .service(http_client), + service: Arc::new(service.into()), + }) + } +} + +pub(crate) fn generate_tls_client_config( + tls_cert_store: Option, + client_cert_config: Option<&TlsClientAuth>, +) -> Result { + let tls_builder = rustls::ClientConfig::builder().with_safe_defaults(); + Ok(match (tls_cert_store, client_cert_config) { + (None, None) => tls_builder.with_native_roots().with_no_client_auth(), + (Some(store), None) => tls_builder + .with_root_certificates(store) + .with_no_client_auth(), + (None, Some(client_auth_config)) => tls_builder.with_native_roots().with_client_auth_cert( + client_auth_config.certificate_chain.clone(), + client_auth_config.key.clone(), + )?, + (Some(store), Some(client_auth_config)) => tls_builder + .with_root_certificates(store) + .with_client_auth_cert( + client_auth_config.certificate_chain.clone(), + client_auth_config.key.clone(), + )?, + }) +} + +impl tower::Service for HttpClientService { + type Response = HttpResponse; + type Error = BoxError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { + self.client + .poll_ready(cx) + .map(|res| res.map_err(|e| Box::new(e) as BoxError)) + } + + fn call(&mut self, request: HttpRequest) -> Self::Future { + let HttpRequest { + mut http_request, + context, + } = request; + + let schema_uri = http_request.uri(); + let host = schema_uri.host().unwrap_or_default(); + let port = schema_uri.port_u16().unwrap_or_else(|| { + let scheme = schema_uri.scheme_str(); + if scheme == Some("https") { + 443 + } else if scheme == Some("http") { + 80 + } else { + 0 + } + }); + + let path = schema_uri.path(); + + let http_req_span = tracing::info_span!("http_request", + "otel.kind" = "CLIENT", + "net.peer.name" = %host, + "net.peer.port" = %port, + "http.route" = %path, + "http.url" = %schema_uri, + "net.transport" = "ip_tcp", + //"apollo.subgraph.name" = %service_name, + //"graphql.operation.name" = %operation_name, + ); + get_text_map_propagator(|propagator| { + propagator.inject_context( + &http_req_span.context(), + &mut opentelemetry_http::HeaderInjector(http_request.headers_mut()), + ); + }); + + let client = self.client.clone(); + let service_name = self.service.clone(); + Box::pin(async move { + let (parts, body) = http_request.into_parts(); + let body = hyper::body::to_bytes(body).await.map_err(|err| { + tracing::error!(compress_error = format!("{err:?}").as_str()); + + FetchError::CompressionError { + service: service_name.to_string(), + reason: err.to_string(), + } + })?; + let compressed_body = compress(body, &parts.headers) + .instrument(tracing::debug_span!("body_compression")) + .await + .map_err(|err| { + tracing::error!(compress_error = format!("{err:?}").as_str()); + + FetchError::CompressionError { + service: service_name.to_string(), + reason: err.to_string(), + } + })?; + let mut http_request = http::Request::from_parts(parts, Body::from(compressed_body)); + + http_request + .headers_mut() + .insert(ACCEPT_ENCODING, ACCEPTED_ENCODINGS.clone()); + + let signing_params = context + .extensions() + .lock() + .get::() + .cloned(); + + let http_request = if let Some(signing_params) = signing_params { + signing_params.sign(http_request, &service_name).await? + } else { + http_request + }; + + let display_headers = context.contains_key(LOGGING_DISPLAY_HEADERS); + let display_body = context.contains_key(LOGGING_DISPLAY_BODY); + + // Print out the debug for the request + if display_headers { + tracing::info!(http.request.headers = ?http_request.headers(), apollo.subgraph.name = %service_name, "Request headers to subgraph {service_name:?}"); + } + if display_body { + tracing::info!(http.request.body = ?http_request.body(), apollo.subgraph.name = %service_name, "Request body to subgraph {service_name:?}"); + } + + let http_response = do_fetch(client, &context, &service_name, http_request) + .instrument(http_req_span) + .await?; + + // Print out the debug for the response + if display_headers { + tracing::info!(response.headers = ?http_response.headers(), apollo.subgraph.name = %service_name, "Response headers from subgraph {service_name:?}"); + } + + Ok(HttpResponse { + http_response, + context, + }) + }) + } +} + +async fn do_fetch( + mut client: HTTPClient, + context: &Context, + service_name: &str, + request: Request, +) -> Result, FetchError> { + let _active_request_guard = context.enter_active_request(); + let (parts, body) = client + .call(request) + .map_err(|err| { + tracing::error!(fetch_error = ?err); + FetchError::SubrequestHttpError { + status_code: None, + service: service_name.to_string(), + reason: err.to_string(), + } + }) + .await? + .into_parts(); + Ok(http::Response::from_parts( + parts, + Body::wrap_stream(BodyStream { inner: body }), + )) +} + +pub(crate) async fn compress(body: Bytes, headers: &HeaderMap) -> Result { + let content_encoding = headers.get(&CONTENT_ENCODING); + match content_encoding { + Some(content_encoding) => match content_encoding.to_str()? { + "br" => { + let mut br_encoder = BrotliEncoder::new(Vec::new()); + br_encoder.write_all(&body).await?; + br_encoder.shutdown().await?; + + Ok(br_encoder.into_inner().into()) + } + "gzip" => { + let mut gzip_encoder = GzipEncoder::new(Vec::new()); + gzip_encoder.write_all(&body).await?; + gzip_encoder.shutdown().await?; + + Ok(gzip_encoder.into_inner().into()) + } + "deflate" => { + let mut df_encoder = ZlibEncoder::new(Vec::new()); + df_encoder.write_all(&body).await?; + df_encoder.shutdown().await?; + + Ok(df_encoder.into_inner().into()) + } + "identity" => Ok(body), + unknown => { + tracing::error!("unknown content-encoding value '{:?}'", unknown); + Err(BoxError::from(format!( + "unknown content-encoding value '{unknown:?}'", + ))) + } + }, + None => Ok(body), + } +} + +pin_project! { + pub(crate) struct BodyStream { + #[pin] + inner: DecompressionBody + } +} + +impl Stream for BodyStream +where + B: hyper::body::HttpBody, + B::Error: Into, +{ + type Item = Result; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + use hyper::body::HttpBody; + + self.project().inner.poll_data(cx) + } +} diff --git a/apollo-router/src/services/testdata/CA/ca.crt b/apollo-router/src/services/http/testdata/CA/ca.crt similarity index 100% rename from apollo-router/src/services/testdata/CA/ca.crt rename to apollo-router/src/services/http/testdata/CA/ca.crt diff --git a/apollo-router/src/services/testdata/CA/ca.key b/apollo-router/src/services/http/testdata/CA/ca.key similarity index 100% rename from apollo-router/src/services/testdata/CA/ca.key rename to apollo-router/src/services/http/testdata/CA/ca.key diff --git a/apollo-router/src/services/testdata/client.crt b/apollo-router/src/services/http/testdata/client.crt similarity index 100% rename from apollo-router/src/services/testdata/client.crt rename to apollo-router/src/services/http/testdata/client.crt diff --git a/apollo-router/src/services/testdata/client.csr b/apollo-router/src/services/http/testdata/client.csr similarity index 100% rename from apollo-router/src/services/testdata/client.csr rename to apollo-router/src/services/http/testdata/client.csr diff --git a/apollo-router/src/services/testdata/client.ext b/apollo-router/src/services/http/testdata/client.ext similarity index 100% rename from apollo-router/src/services/testdata/client.ext rename to apollo-router/src/services/http/testdata/client.ext diff --git a/apollo-router/src/services/testdata/client.key b/apollo-router/src/services/http/testdata/client.key similarity index 100% rename from apollo-router/src/services/testdata/client.key rename to apollo-router/src/services/http/testdata/client.key diff --git a/apollo-router/src/services/testdata/server.crt b/apollo-router/src/services/http/testdata/server.crt similarity index 100% rename from apollo-router/src/services/testdata/server.crt rename to apollo-router/src/services/http/testdata/server.crt diff --git a/apollo-router/src/services/testdata/server.csr b/apollo-router/src/services/http/testdata/server.csr similarity index 100% rename from apollo-router/src/services/testdata/server.csr rename to apollo-router/src/services/http/testdata/server.csr diff --git a/apollo-router/src/services/testdata/server.ext b/apollo-router/src/services/http/testdata/server.ext similarity index 100% rename from apollo-router/src/services/testdata/server.ext rename to apollo-router/src/services/http/testdata/server.ext diff --git a/apollo-router/src/services/testdata/server.key b/apollo-router/src/services/http/testdata/server.key similarity index 100% rename from apollo-router/src/services/testdata/server.key rename to apollo-router/src/services/http/testdata/server.key diff --git a/apollo-router/src/services/testdata/server_self_signed.crt b/apollo-router/src/services/http/testdata/server_self_signed.crt similarity index 100% rename from apollo-router/src/services/testdata/server_self_signed.crt rename to apollo-router/src/services/http/testdata/server_self_signed.crt diff --git a/apollo-router/src/services/testdata/server_self_signed.csr b/apollo-router/src/services/http/testdata/server_self_signed.csr similarity index 100% rename from apollo-router/src/services/testdata/server_self_signed.csr rename to apollo-router/src/services/http/testdata/server_self_signed.csr diff --git a/apollo-router/src/services/testdata/tls.md b/apollo-router/src/services/http/testdata/tls.md similarity index 100% rename from apollo-router/src/services/testdata/tls.md rename to apollo-router/src/services/http/testdata/tls.md diff --git a/apollo-router/src/services/http/tests.rs b/apollo-router/src/services/http/tests.rs new file mode 100644 index 0000000000..1acdadb2f9 --- /dev/null +++ b/apollo-router/src/services/http/tests.rs @@ -0,0 +1,432 @@ +use std::convert::Infallible; +use std::io; +use std::net::TcpListener; +use std::str::FromStr; + +use async_compression::tokio::write::GzipEncoder; +use axum::Server; +use http::header::CONTENT_ENCODING; +use http::header::CONTENT_TYPE; +use http::StatusCode; +use http::Uri; +use http::Version; +use hyper::server::conn::AddrIncoming; +use hyper::service::make_service_fn; +use hyper::Body; +use hyper_rustls::ConfigBuilderExt; +use hyper_rustls::TlsAcceptor; +use mime::APPLICATION_JSON; +use rustls::server::AllowAnyAuthenticatedClient; +use rustls::Certificate; +use rustls::PrivateKey; +use rustls::RootCertStore; +use rustls::ServerConfig; +use serde_json_bytes::ByteString; +use serde_json_bytes::Value; +use tokio::io::AsyncWriteExt; +use tower::service_fn; +use tower::ServiceExt; + +use crate::configuration::load_certs; +use crate::configuration::load_key; +use crate::configuration::TlsClient; +use crate::configuration::TlsClientAuth; +use crate::graphql::Response; +use crate::plugins::traffic_shaping::Http2Config; +use crate::services::http::HttpClientService; +use crate::services::http::HttpRequest; +use crate::Configuration; +use crate::Context; + +async fn tls_server( + listener: tokio::net::TcpListener, + certificates: Vec, + key: PrivateKey, + body: &'static str, +) { + let acceptor = TlsAcceptor::builder() + .with_single_cert(certificates, key) + .unwrap() + .with_all_versions_alpn() + .with_incoming(AddrIncoming::from_listener(listener).unwrap()); + let service = make_service_fn(|_| async { + Ok::<_, io::Error>(service_fn(|_req| async { + Ok::<_, io::Error>( + http::Response::builder() + .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) + .status(StatusCode::OK) + .version(Version::HTTP_11) + .body::(body.into()) + .unwrap(), + ) + })) + }); + let server = Server::builder(acceptor).serve(service); + server.await.unwrap() +} + +// Note: This test relies on a checked in certificate with the following validity +// characteristics: +// Validity +// Not Before: Oct 10 07:32:39 2023 GMT +// Not After : Oct 7 07:32:39 2033 GMT +// If this test fails and it is October 7th 2033, you will need to generate a +// new self signed cert. Currently, we use openssl to do this, in the future I +// hope we have something better... +// In the testdata directory run: +// openssl x509 -req -in server_self_signed.csr -signkey server.key -out server_self_signed.crt -extfile server.ext -days 3650 +// That will give you another 10 years, assuming nothing else in the signing +// framework has expired. +#[tokio::test(flavor = "multi_thread")] +async fn tls_self_signed() { + let certificate_pem = include_str!("./testdata/server_self_signed.crt"); + let key_pem = include_str!("./testdata/server.key"); + + let certificates = load_certs(certificate_pem).unwrap(); + let key = load_key(key_pem).unwrap(); + + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let socket_addr = listener.local_addr().unwrap(); + tokio::task::spawn(tls_server(listener, certificates, key, r#"{"data": null}"#)); + + // we cannot parse a configuration from text, because certificates are generally + // added by file expansion and we don't have access to that here, and inserting + // the PEM data directly generates parsing issues due to end of line characters + let mut config = Configuration::default(); + config.tls.subgraph.subgraphs.insert( + "test".to_string(), + TlsClient { + certificate_authorities: Some(certificate_pem.into()), + client_authentication: None, + }, + ); + let subgraph_service = + HttpClientService::from_config("test", &config, &None, Http2Config::Enable).unwrap(); + + let url = Uri::from_str(&format!("https://localhost:{}", socket_addr.port())).unwrap(); + let response = subgraph_service + .oneshot(HttpRequest { + http_request: http::Request::builder() + .uri(url) + .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) + .body(r#"{"query":"{ me { name username } }"#.into()) + .unwrap(), + context: Context::new(), + }) + .await + .unwrap(); + + assert_eq!( + std::str::from_utf8( + &hyper::body::to_bytes(response.http_response.into_parts().1) + .await + .unwrap() + ) + .unwrap(), + r#"{"data": null}"# + ); +} + +#[tokio::test(flavor = "multi_thread")] +async fn tls_custom_root() { + let certificate_pem = include_str!("./testdata/server.crt"); + let ca_pem = include_str!("./testdata/CA/ca.crt"); + let key_pem = include_str!("./testdata/server.key"); + + let mut certificates = load_certs(certificate_pem).unwrap(); + certificates.extend(load_certs(ca_pem).unwrap()); + let key = load_key(key_pem).unwrap(); + + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let socket_addr = listener.local_addr().unwrap(); + tokio::task::spawn(tls_server(listener, certificates, key, r#"{"data": null}"#)); + + // we cannot parse a configuration from text, because certificates are generally + // added by file expansion and we don't have access to that here, and inserting + // the PEM data directly generates parsing issues due to end of line characters + let mut config = Configuration::default(); + config.tls.subgraph.subgraphs.insert( + "test".to_string(), + TlsClient { + certificate_authorities: Some(ca_pem.into()), + client_authentication: None, + }, + ); + let subgraph_service = + HttpClientService::from_config("test", &config, &None, Http2Config::Enable).unwrap(); + + let url = Uri::from_str(&format!("https://localhost:{}", socket_addr.port())).unwrap(); + let response = subgraph_service + .oneshot(HttpRequest { + http_request: http::Request::builder() + .uri(url) + .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) + .body(r#"{"query":"{ me { name username } }"#.into()) + .unwrap(), + context: Context::new(), + }) + .await + .unwrap(); + assert_eq!( + std::str::from_utf8( + &hyper::body::to_bytes(response.http_response.into_parts().1) + .await + .unwrap() + ) + .unwrap(), + r#"{"data": null}"# + ); +} + +async fn tls_server_with_client_auth( + listener: tokio::net::TcpListener, + certificates: Vec, + key: PrivateKey, + client_root: Certificate, + body: &'static str, +) { + let mut client_auth_roots = RootCertStore::empty(); + client_auth_roots.add(&client_root).unwrap(); + + let client_auth = AllowAnyAuthenticatedClient::new(client_auth_roots).boxed(); + + let acceptor = TlsAcceptor::builder() + .with_tls_config( + ServerConfig::builder() + .with_safe_defaults() + .with_client_cert_verifier(client_auth) + .with_single_cert(certificates, key) + .unwrap(), + ) + .with_all_versions_alpn() + .with_incoming(AddrIncoming::from_listener(listener).unwrap()); + let service = make_service_fn(|_| async { + Ok::<_, io::Error>(service_fn(|_req| async { + Ok::<_, io::Error>( + http::Response::builder() + .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) + .status(StatusCode::OK) + .version(Version::HTTP_11) + .body::(body.into()) + .unwrap(), + ) + })) + }); + let server = Server::builder(acceptor).serve(service); + server.await.unwrap() +} + +#[tokio::test(flavor = "multi_thread")] +async fn tls_client_auth() { + let server_certificate_pem = include_str!("./testdata/server.crt"); + let ca_pem = include_str!("./testdata/CA/ca.crt"); + let server_key_pem = include_str!("./testdata/server.key"); + + let mut server_certificates = load_certs(server_certificate_pem).unwrap(); + let ca_certificate = load_certs(ca_pem).unwrap().remove(0); + server_certificates.push(ca_certificate.clone()); + let key = load_key(server_key_pem).unwrap(); + + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let socket_addr = listener.local_addr().unwrap(); + tokio::task::spawn(tls_server_with_client_auth( + listener, + server_certificates, + key, + ca_certificate, + r#"{"data": null}"#, + )); + + let client_certificate_pem = include_str!("./testdata/client.crt"); + let client_key_pem = include_str!("./testdata/client.key"); + + let client_certificates = load_certs(client_certificate_pem).unwrap(); + let client_key = load_key(client_key_pem).unwrap(); + + // we cannot parse a configuration from text, because certificates are generally + // added by file expansion and we don't have access to that here, and inserting + // the PEM data directly generates parsing issues due to end of line characters + let mut config = Configuration::default(); + config.tls.subgraph.subgraphs.insert( + "test".to_string(), + TlsClient { + certificate_authorities: Some(ca_pem.into()), + client_authentication: Some(TlsClientAuth { + certificate_chain: client_certificates, + key: client_key, + }), + }, + ); + let subgraph_service = + HttpClientService::from_config("test", &config, &None, Http2Config::Enable).unwrap(); + + let url = Uri::from_str(&format!("https://localhost:{}", socket_addr.port())).unwrap(); + let response = subgraph_service + .oneshot(HttpRequest { + http_request: http::Request::builder() + .uri(url) + .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) + .body(r#"{"query":"{ me { name username } }"#.into()) + .unwrap(), + context: Context::new(), + }) + .await + .unwrap(); + assert_eq!( + std::str::from_utf8( + &hyper::body::to_bytes(response.http_response.into_parts().1) + .await + .unwrap() + ) + .unwrap(), + r#"{"data": null}"# + ); +} + +// starts a local server emulating a subgraph returning status code 401 +async fn emulate_h2c_server(listener: TcpListener) { + async fn handle(_request: http::Request) -> Result, Infallible> { + println!("h2C server got req: {_request:?}"); + Ok(http::Response::builder() + .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) + .status(StatusCode::OK) + .body( + serde_json::to_string(&Response { + data: Some(Value::default()), + ..Response::default() + }) + .expect("always valid") + .into(), + ) + .unwrap()) + } + + let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); + let server = Server::from_tcp(listener) + .unwrap() + .http2_only(true) + .serve(make_svc); + server.await.unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_subgraph_h2c() { + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let socket_addr = listener.local_addr().unwrap(); + tokio::task::spawn(emulate_h2c_server(listener)); + let subgraph_service = HttpClientService::new( + "test", + Http2Config::Http2Only, + rustls::ClientConfig::builder() + .with_safe_defaults() + .with_native_roots() + .with_no_client_auth(), + ) + .expect("can create a HttpService"); + + let url = Uri::from_str(&format!("http://{socket_addr}")).unwrap(); + let response = subgraph_service + .oneshot(HttpRequest { + http_request: http::Request::builder() + .uri(url) + .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) + .body(r#"{"query":"{ me { name username } }"#.into()) + .unwrap(), + context: Context::new(), + }) + .await + .unwrap(); + assert_eq!( + std::str::from_utf8( + &hyper::body::to_bytes(response.http_response.into_parts().1) + .await + .unwrap() + ) + .unwrap(), + r#"{"data":null}"# + ); +} + +// starts a local server emulating a subgraph returning compressed response +async fn emulate_subgraph_compressed_response(listener: TcpListener) { + async fn handle(request: http::Request) -> Result, Infallible> { + // Check the compression of the body + let mut encoder = GzipEncoder::new(Vec::new()); + encoder + .write_all(r#"{"query":"{ me { name username } }"#.as_bytes()) + .await + .unwrap(); + encoder.shutdown().await.unwrap(); + let compressed_body = encoder.into_inner(); + assert_eq!( + compressed_body, + hyper::body::to_bytes(request.into_body()) + .await + .unwrap() + .to_vec() + ); + + let original_body = Response { + data: Some(Value::String(ByteString::from("test"))), + ..Response::default() + }; + let mut encoder = GzipEncoder::new(Vec::new()); + encoder + .write_all(&serde_json::to_vec(&original_body).unwrap()) + .await + .unwrap(); + encoder.shutdown().await.unwrap(); + let compressed_body = encoder.into_inner(); + + Ok(http::Response::builder() + .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) + .header(CONTENT_ENCODING, "gzip") + .status(StatusCode::OK) + .body(compressed_body.into()) + .unwrap()) + } + + let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); + let server = Server::from_tcp(listener).unwrap().serve(make_svc); + server.await.unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_compressed_request_response_body() { + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let socket_addr = listener.local_addr().unwrap(); + tokio::task::spawn(emulate_subgraph_compressed_response(listener)); + let subgraph_service = HttpClientService::new( + "test", + Http2Config::Http2Only, + rustls::ClientConfig::builder() + .with_safe_defaults() + .with_native_roots() + .with_no_client_auth(), + ) + .expect("can create a HttpService"); + + let url = Uri::from_str(&format!("http://{socket_addr}")).unwrap(); + let response = subgraph_service + .oneshot(HttpRequest { + http_request: http::Request::builder() + .uri(url) + .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) + .header(CONTENT_ENCODING, "gzip") + .body(r#"{"query":"{ me { name username } }"#.into()) + .unwrap(), + context: Context::new(), + }) + .await + .unwrap(); + + assert_eq!( + std::str::from_utf8( + &hyper::body::to_bytes(response.http_response.into_parts().1) + .await + .unwrap() + ) + .unwrap(), + r#"{"data":"test"}"# + ); +} diff --git a/apollo-router/src/services/layers/allow_only_http_post_mutations.rs b/apollo-router/src/services/layers/allow_only_http_post_mutations.rs index e77536a36c..edc8aba5c7 100644 --- a/apollo-router/src/services/layers/allow_only_http_post_mutations.rs +++ b/apollo-router/src/services/layers/allow_only_http_post_mutations.rs @@ -54,12 +54,12 @@ where .message("Cannot find executable document".to_string()) .extension_code("MISSING_EXECUTABLE_DOCUMENT") .build()]; - let res = SupergraphResponse::builder() + let res = SupergraphResponse::infallible_builder() .errors(errors) .extensions(Object::default()) .status_code(StatusCode::INTERNAL_SERVER_ERROR) .context(req.context.clone()) - .build()?; + .build(); return Ok(ControlFlow::Break(res)); } @@ -76,12 +76,12 @@ where .message("Cannot find operation".to_string()) .extension_code("MISSING_OPERATION") .build()]; - let res = SupergraphResponse::builder() + let res = SupergraphResponse::infallible_builder() .errors(errors) .extensions(Object::default()) .status_code(StatusCode::METHOD_NOT_ALLOWED) .context(req.context) - .build()?; + .build(); Ok(ControlFlow::Break(res)) } diff --git a/apollo-router/src/services/layers/apq.rs b/apollo-router/src/services/layers/apq.rs index 376f7d85e6..d730b2cc95 100644 --- a/apollo-router/src/services/layers/apq.rs +++ b/apollo-router/src/services/layers/apq.rs @@ -88,7 +88,11 @@ async fn apq_request( if query_matches_hash(query.as_str(), query_hash_bytes.as_slice()) { tracing::trace!("apq: cache insert"); let _ = request.context.insert("persisted_query_register", true); - cache.insert(redis_key(&query_hash), query).await; + let query = query.to_owned(); + let cache = cache.clone(); + tokio::spawn(async move { + cache.insert(redis_key(&query_hash), query).await; + }); Ok(request) } else { tracing::debug!("apq: graphql request doesn't match provided sha256Hash"); diff --git a/apollo-router/src/services/layers/query_analysis.rs b/apollo-router/src/services/layers/query_analysis.rs index 288b5bbfa3..927fe59980 100644 --- a/apollo-router/src/services/layers/query_analysis.rs +++ b/apollo-router/src/services/layers/query_analysis.rs @@ -47,7 +47,7 @@ impl QueryAnalysisLayer { configuration .supergraph .query_planning - .experimental_cache + .cache .in_memory .limit, ))), diff --git a/apollo-router/src/services/mod.rs b/apollo-router/src/services/mod.rs index d73bddd120..802510d45d 100644 --- a/apollo-router/src/services/mod.rs +++ b/apollo-router/src/services/mod.rs @@ -25,6 +25,7 @@ pub(crate) use crate::services::supergraph::Response as SupergraphResponse; pub mod execution; pub(crate) mod external; +pub(crate) mod http; pub(crate) mod layers; pub(crate) mod new_service; pub(crate) mod query_planner; diff --git a/apollo-router/src/services/router.rs b/apollo-router/src/services/router.rs index aa741d0d93..e7459cafa6 100644 --- a/apollo-router/src/services/router.rs +++ b/apollo-router/src/services/router.rs @@ -272,6 +272,50 @@ impl Response { ) } + /// This is the constructor (or builder) to use when constructing a real Response.. + /// + /// Required parameters are required in non-testing code to create a Response.. + #[allow(clippy::too_many_arguments)] + #[builder(visibility = "pub(crate)")] + fn infallible_new( + label: Option, + data: Option, + path: Option, + errors: Vec, + // Skip the `Object` type alias in order to use buildstructor’s map special-casing + extensions: JsonMap, + status_code: Option, + headers: MultiMap, + context: Context, + ) -> Self { + // Build a response + let b = graphql::Response::builder() + .and_label(label) + .and_path(path) + .errors(errors) + .extensions(extensions); + let res = match data { + Some(data) => b.data(data).build(), + None => b.build(), + }; + + // Build an http Response + let mut builder = http::Response::builder().status(status_code.unwrap_or(StatusCode::OK)); + for (header_name, values) in headers { + for header_value in values { + builder = builder.header(header_name.clone(), header_value); + } + } + + let response = builder + .body(hyper::Body::from( + serde_json::to_vec(&res).expect("can't fail"), + )) + .expect("can't fail"); + + Self { response, context } + } + /// EXPERIMENTAL: this is function is experimental and subject to potentially change. pub async fn into_graphql_response_stream( self, diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index 98b5955c73..527c201ec7 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -1,60 +1,42 @@ //! Tower fetcher for subgraphs. use std::collections::HashMap; -use std::fmt::Display; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering::Relaxed; use std::sync::Arc; use std::task::Poll; -use std::time::Duration; -use ::serde::Deserialize; -use async_compression::tokio::write::BrotliEncoder; -use async_compression::tokio::write::GzipEncoder; -use async_compression::tokio::write::ZlibEncoder; use bytes::Bytes; use futures::future::BoxFuture; use futures::SinkExt; use futures::StreamExt; use futures::TryFutureExt; -use global::get_text_map_propagator; use http::header::ACCEPT; -use http::header::ACCEPT_ENCODING; -use http::header::CONTENT_ENCODING; use http::header::CONTENT_TYPE; use http::header::{self}; use http::response::Parts; -use http::HeaderMap; use http::HeaderValue; use http::Request; -use hyper::client::HttpConnector; use hyper::Body; use hyper_rustls::ConfigBuilderExt; -use hyper_rustls::HttpsConnector; use mediatype::names::APPLICATION; use mediatype::names::JSON; use mediatype::MediaType; use mime::APPLICATION_JSON; -use opentelemetry::global; -use rustls::ClientConfig; use rustls::RootCertStore; -use schemars::JsonSchema; use serde::Serialize; -use tokio::io::AsyncWriteExt; use tokio_tungstenite::connect_async; use tokio_tungstenite::connect_async_tls_with_config; use tokio_tungstenite::tungstenite::client::IntoClientRequest; use tower::util::BoxService; use tower::BoxError; use tower::Service; -use tower::ServiceBuilder; use tower::ServiceExt; -use tower_http::decompression::Decompression; -use tower_http::decompression::DecompressionLayer; use tracing::Instrument; -use tracing_opentelemetry::OpenTelemetrySpanExt; use uuid::Uuid; +use super::http::HttpClientServiceFactory; +use super::http::HttpRequest; use super::layers::content_negotiation::GRAPHQL_JSON_RESPONSE_HEADER_VALUE; use super::Plugins; use crate::configuration::TlsClientAuth; @@ -71,22 +53,16 @@ use crate::plugins::subscription::WebSocketConfiguration; use crate::plugins::subscription::SUBSCRIPTION_WS_CUSTOM_CONNECTION_PARAMS; use crate::plugins::telemetry::LOGGING_DISPLAY_BODY; use crate::plugins::telemetry::LOGGING_DISPLAY_HEADERS; -use crate::plugins::traffic_shaping::Http2Config; use crate::protocols::websocket::convert_websocket_stream; use crate::protocols::websocket::GraphqlWebSocket; use crate::query_planner::OperationKind; use crate::services::layers::apq; -use crate::services::trust_dns_connector::new_async_http_connector; -use crate::services::trust_dns_connector::AsyncHyperResolver; use crate::services::SubgraphRequest; use crate::services::SubgraphResponse; use crate::Configuration; use crate::Context; use crate::Notify; -type HTTPClientService = - Decompression>, Body>>; - const PERSISTED_QUERY_NOT_FOUND_EXTENSION_CODE: &str = "PERSISTED_QUERY_NOT_FOUND"; const PERSISTED_QUERY_NOT_SUPPORTED_EXTENSION_CODE: &str = "PERSISTED_QUERY_NOT_SUPPORTED"; const PERSISTED_QUERY_NOT_FOUND_MESSAGE: &str = "PersistedQueryNotFound"; @@ -97,11 +73,7 @@ const HASH_VERSION_KEY: &str = "version"; const HASH_VERSION_VALUE: i32 = 1; const HASH_KEY: &str = "sha256Hash"; const GRAPHQL_RESPONSE: mediatype::Name = mediatype::Name::new_unchecked("graphql-response"); -const POOL_IDLE_TIMEOUT_DURATION: Option = Some(Duration::from_secs(5)); -// interior mutability is not a concern here, the value is never modified -#[allow(clippy::declare_interior_mutable_const)] -static ACCEPTED_ENCODINGS: HeaderValue = HeaderValue::from_static("gzip, br, deflate"); #[allow(clippy::declare_interior_mutable_const)] static CALLBACK_PROTOCOL_ACCEPT: HeaderValue = HeaderValue::from_static("application/json;callbackSpec=1.0"); @@ -116,31 +88,7 @@ enum APQError { Other, } -#[derive(PartialEq, Debug, Clone, Deserialize, JsonSchema, Copy)] -#[serde(rename_all = "lowercase")] -pub(crate) enum Compression { - /// gzip - Gzip, - /// deflate - Deflate, - /// brotli - Br, - /// identity - Identity, -} - -impl Display for Compression { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Compression::Gzip => write!(f, "gzip"), - Compression::Deflate => write!(f, "deflate"), - Compression::Br => write!(f, "br"), - Compression::Identity => write!(f, "identity"), - } - } -} - -#[cfg_attr(test, derive(Deserialize))] +#[cfg_attr(test, derive(serde::Deserialize))] #[derive(Serialize, Clone, Debug)] #[serde(rename_all = "camelCase")] struct SubscriptionExtension { @@ -153,10 +101,9 @@ struct SubscriptionExtension { /// Client for interacting with subgraphs. #[derive(Clone)] pub(crate) struct SubgraphService { - // Note: We use hyper::Client here in preference to reqwest to avoid expensive URL translation - // in the hot path. We use reqwest elsewhere because it's convenient and some of the - // opentelemetry crate require reqwest clients to work correctly (at time of writing). - client: HTTPClientService, + // we hold a HTTP client service factory here because a service with plugins applied + // cannot be cloned + pub(crate) client_factory: HttpClientServiceFactory, service: Arc, /// Whether apq is enabled in the router for subgraph calls @@ -175,20 +122,11 @@ impl SubgraphService { pub(crate) fn from_config( service: impl Into, configuration: &Configuration, - tls_root_store: &Option, - http2: Http2Config, subscription_config: Option, + client_factory: crate::services::http::HttpClientServiceFactory, ) -> Result { let name: String = service.into(); - let tls_cert_store = configuration - .tls - .subgraph - .subgraphs - .get(&name) - .as_ref() - .and_then(|subgraph| subgraph.create_certificate_store()) - .transpose()? - .or_else(|| tls_root_store.clone()); + let enable_apq = configuration .apq .subgraph @@ -196,64 +134,25 @@ impl SubgraphService { .get(&name) .map(|apq| apq.enabled) .unwrap_or(configuration.apq.subgraph.all.enabled); - let client_cert_config = configuration - .tls - .subgraph - .subgraphs - .get(&name) - .as_ref() - .and_then(|tls| tls.client_authentication.as_ref()) - .or(configuration - .tls - .subgraph - .all - .client_authentication - .as_ref()); - - let tls_client_config = generate_tls_client_config(tls_cert_store, client_cert_config)?; SubgraphService::new( name, enable_apq, - http2, subscription_config, - tls_client_config, configuration.notify.clone(), + client_factory, ) } pub(crate) fn new( service: impl Into, enable_apq: bool, - http2: Http2Config, subscription_config: Option, - tls_config: ClientConfig, notify: Notify, + client_factory: crate::services::http::HttpClientServiceFactory, ) -> Result { - let mut http_connector = new_async_http_connector()?; - http_connector.set_nodelay(true); - http_connector.set_keepalive(Some(std::time::Duration::from_secs(60))); - http_connector.enforce_http(false); - - let builder = hyper_rustls::HttpsConnectorBuilder::new() - .with_tls_config(tls_config) - .https_or_http() - .enable_http1(); - - let connector = if http2 != Http2Config::Disable { - builder.enable_http2().wrap_connector(http_connector) - } else { - builder.wrap_connector(http_connector) - }; - - let http_client = hyper::Client::builder() - .pool_idle_timeout(POOL_IDLE_TIMEOUT_DURATION) - .http2_only(http2 == Http2Config::Http2Only) - .build(connector); Ok(Self { - client: ServiceBuilder::new() - .layer(DecompressionLayer::new()) - .service(http_client), + client_factory, service: Arc::new(service.into()), apq: Arc::new(::new(enable_apq)), subscription_config, @@ -290,10 +189,8 @@ impl tower::Service for SubgraphService { type Error = BoxError; type Future = BoxFuture<'static, Result>; - fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { - self.client - .poll_ready(cx) - .map(|res| res.map_err(|e| Box::new(e) as BoxError)) + fn poll_ready(&mut self, _cx: &mut std::task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) } fn call(&mut self, mut request: SubgraphRequest) -> Self::Future { @@ -332,8 +229,7 @@ impl tower::Service for SubgraphService { let (_, mut body) = subgraph_request.into_parts(); - let clone = self.client.clone(); - let client = std::mem::replace(&mut self.client, clone); + let client_factory = self.client_factory.clone(); let arc_apq_enabled = self.apq.clone(); @@ -462,6 +358,8 @@ impl tower::Service for SubgraphService { } } + let client = client_factory.create(&service_name); + // If APQ is not enabled, simply make the graphql call // with the same request body. let apq_enabled = arc_apq_enabled.as_ref(); @@ -500,7 +398,7 @@ impl tower::Service for SubgraphService { request.clone(), apq_body.clone(), context.clone(), - client.clone(), + client_factory.create(&service_name), &service_name, ) .await?; @@ -721,7 +619,7 @@ async fn call_http( request: SubgraphRequest, body: graphql::Request, context: Context, - client: HTTPClientService, + client: crate::services::http::BoxService, service_name: &str, ) -> Result { let SubgraphRequest { @@ -733,22 +631,10 @@ async fn call_http( .operation_name .clone() .unwrap_or_default(); - let (parts, _) = subgraph_request.into_parts(); + let (parts, _) = subgraph_request.into_parts(); let body = serde_json::to_string(&body).expect("JSON serialization should not fail"); - let compressed_body = compress(body, &parts.headers) - .instrument(tracing::debug_span!("body_compression")) - .await - .map_err(|err| { - tracing::error!(compress_error = format!("{err:?}").as_str()); - - FetchError::CompressionError { - service: service_name.to_string(), - reason: err.to_string(), - } - })?; - - let mut request = http::request::Request::from_parts(parts, compressed_body.into()); + let mut request = http::Request::from_parts(parts, Body::from(body)); request .headers_mut() @@ -756,9 +642,6 @@ async fn call_http( request .headers_mut() .append(ACCEPT, ACCEPT_GRAPHQL_JSON.clone()); - request - .headers_mut() - .insert(ACCEPT_ENCODING, ACCEPTED_ENCODINGS.clone()); let schema_uri = request.uri(); let host = schema_uri.host().unwrap_or_default(); @@ -785,12 +668,6 @@ async fn call_http( "apollo.subgraph.name" = %service_name, "graphql.operation.name" = %operation_name, ); - get_text_map_propagator(|propagator| { - propagator.inject_context( - &subgraph_req_span.context(), - &mut opentelemetry_http::HeaderInjector(request.headers_mut()), - ); - }); // The graphql spec is lax about what strategy to use for processing responses: https://github.com/graphql/graphql-over-http/blob/main/spec/GraphQLOverHTTP.md#processing-the-response // @@ -806,45 +683,14 @@ async fn call_http( // 2. If an HTTP status is not 2xx it will always be attached as a graphql error. // 3. If the response type is `application/json` and status is not 2xx and the body the entire body will be output if the response is not valid graphql. - let display_headers = context.contains_key(LOGGING_DISPLAY_HEADERS); let display_body = context.contains_key(LOGGING_DISPLAY_BODY); - let signing_params = context - .extensions() - .lock() - .get::() - .cloned(); - - let request = if let Some(signing_params) = signing_params { - signing_params.sign(request, service_name).await? - } else { - request - }; - - // Print out the debug for the request - if display_headers { - tracing::info!(http.request.headers = ?request.headers(), apollo.subgraph.name = %service_name, "Request headers to subgraph {service_name:?}"); - } - if display_body { - tracing::info!(http.request.body = ?request.body(), apollo.subgraph.name = %service_name, "Request body to subgraph {service_name:?}"); - } - // Perform the actual fetch. If this fails then we didn't manage to make the call at all, so we can't do anything with it. - let (parts, content_type, body) = do_fetch( - client, - &context, - service_name, - request, - display_headers, - display_body, - ) - .instrument(subgraph_req_span) - .await?; + let (parts, content_type, body) = + do_fetch(client, &context, service_name, request, display_body) + .instrument(subgraph_req_span) + .await?; - // Print out the debug for the response - if display_headers { - tracing::info!(response.headers = ?parts.headers, apollo.subgraph.name = %service_name, "Response headers from subgraph {service_name:?}"); - } if display_body { if let Some(Ok(b)) = &body { tracing::info!( @@ -964,11 +810,11 @@ fn get_graphql_content_type(service_name: &str, parts: &Parts) -> Result, - display_headers: bool, display_body: bool, ) -> Result< ( @@ -980,7 +826,10 @@ async fn do_fetch( > { let _active_request_guard = context.enter_active_request(); let response = client - .call(request) + .call(HttpRequest { + http_request: request, + context: context.clone(), + }) .map_err(|err| { tracing::error!(fetch_error = ?err); FetchError::SubrequestHttpError { @@ -991,13 +840,8 @@ async fn do_fetch( }) .await?; - let (parts, body) = response.into_parts(); - // Print out debug for the response - if display_headers { - tracing::info!( - http.response.headers = ?parts.headers, apollo.subgraph.name = %service_name, "Response headers from subgraph {service_name:?}" - ); - } + let (parts, body) = response.http_response.into_parts(); + let content_type = get_graphql_content_type(service_name, &parts); let body = if content_type.is_ok() { @@ -1121,43 +965,6 @@ fn get_apq_error(gql_response: &graphql::Response) -> APQError { APQError::Other } -pub(crate) async fn compress(body: String, headers: &HeaderMap) -> Result, BoxError> { - let content_encoding = headers.get(&CONTENT_ENCODING); - match content_encoding { - Some(content_encoding) => match content_encoding.to_str()? { - "br" => { - let mut br_encoder = BrotliEncoder::new(Vec::new()); - br_encoder.write_all(body.as_bytes()).await?; - br_encoder.shutdown().await?; - - Ok(br_encoder.into_inner()) - } - "gzip" => { - let mut gzip_encoder = GzipEncoder::new(Vec::new()); - gzip_encoder.write_all(body.as_bytes()).await?; - gzip_encoder.shutdown().await?; - - Ok(gzip_encoder.into_inner()) - } - "deflate" => { - let mut df_encoder = ZlibEncoder::new(Vec::new()); - df_encoder.write_all(body.as_bytes()).await?; - df_encoder.shutdown().await?; - - Ok(df_encoder.into_inner()) - } - "identity" => Ok(body.into_bytes()), - unknown => { - tracing::error!("unknown content-encoding value '{:?}'", unknown); - Err(BoxError::from(format!( - "unknown content-encoding value '{unknown:?}'", - ))) - } - }, - None => Ok(body.into_bytes()), - } -} - #[derive(Clone)] pub(crate) struct SubgraphServiceFactory { pub(crate) services: Arc>>, @@ -1213,7 +1020,6 @@ where #[cfg(test)] mod tests { use std::convert::Infallible; - use std::io; use std::net::SocketAddr; use std::net::TcpListener; use std::str::FromStr; @@ -1230,15 +1036,8 @@ mod tests { use http::header::HOST; use http::StatusCode; use http::Uri; - use http::Version; - use hyper::server::conn::AddrIncoming; use hyper::service::make_service_fn; use hyper::Body; - use hyper_rustls::TlsAcceptor; - use rustls::server::AllowAnyAuthenticatedClient; - use rustls::Certificate; - use rustls::PrivateKey; - use rustls::ServerConfig; use serde_json_bytes::ByteString; use serde_json_bytes::Value; use tokio::sync::mpsc; @@ -1249,10 +1048,6 @@ mod tests { use SubgraphRequest; use super::*; - use crate::configuration::load_certs; - use crate::configuration::load_key; - use crate::configuration::TlsClient; - use crate::configuration::TlsClientAuth; use crate::graphql::Error; use crate::graphql::Request; use crate::graphql::Response; @@ -1260,6 +1055,7 @@ mod tests { use crate::plugins::subscription::SubgraphPassthroughMode; use crate::plugins::subscription::SubscriptionModeConfig; use crate::plugins::subscription::SUBSCRIPTION_CALLBACK_HMAC_KEY; + use crate::plugins::traffic_shaping::Http2Config; use crate::protocols::websocket::ClientMessage; use crate::protocols::websocket::ServerMessage; use crate::protocols::websocket::WebSocketProtocol; @@ -1400,53 +1196,6 @@ mod tests { server.await.unwrap(); } - // starts a local server emulating a subgraph returning compressed response - async fn emulate_subgraph_compressed_response(listener: TcpListener) { - async fn handle(request: http::Request) -> Result, Infallible> { - // Check the compression of the body - let mut encoder = GzipEncoder::new(Vec::new()); - encoder - .write_all( - &serde_json::to_vec(&Request::builder().query("query".to_string()).build()) - .unwrap(), - ) - .await - .unwrap(); - encoder.shutdown().await.unwrap(); - let compressed_body = encoder.into_inner(); - assert_eq!( - compressed_body, - hyper::body::to_bytes(request.into_body()) - .await - .unwrap() - .to_vec() - ); - - let original_body = Response { - data: Some(Value::String(ByteString::from("test"))), - ..Response::default() - }; - let mut encoder = GzipEncoder::new(Vec::new()); - encoder - .write_all(&serde_json::to_vec(&original_body).unwrap()) - .await - .unwrap(); - encoder.shutdown().await.unwrap(); - let compressed_body = encoder.into_inner(); - - Ok(http::Response::builder() - .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) - .header(CONTENT_ENCODING, "gzip") - .status(StatusCode::OK) - .body(compressed_body.into()) - .unwrap()) - } - - let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); - let server = Server::from_tcp(listener).unwrap().serve(make_svc); - server.await.unwrap(); - } - // starts a local server emulating a subgraph returning response with // "errors" : {["message": "PersistedQueryNotSupported",...],...} async fn emulate_persisted_query_not_supported_message(listener: TcpListener) { @@ -1935,13 +1684,13 @@ mod tests { let subgraph_service = SubgraphService::new( "testbis", true, - Http2Config::Disable, subscription_config().into(), - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::builder().build(), + HttpClientServiceFactory::from_config( + "testbis", + &Configuration::default(), + Http2Config::Disable, + ), ) .expect("can create a SubgraphService"); let (tx, _rx) = mpsc::channel(2); @@ -1979,13 +1728,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2013,13 +1762,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2047,13 +1796,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2086,13 +1835,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2129,13 +1878,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2170,13 +1919,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Disable, subscription_config().into(), - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::builder().build(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); let (tx, rx) = mpsc::channel(2); @@ -2223,13 +1972,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Disable, subscription_config().into(), - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::builder().build(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); let (tx, _rx) = mpsc::channel(2); @@ -2267,13 +2016,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2309,13 +2058,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2338,54 +2087,6 @@ mod tests { ); } - #[tokio::test(flavor = "multi_thread")] - async fn test_compressed_request_response_body() { - let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); - let socket_addr = listener.local_addr().unwrap(); - tokio::task::spawn(emulate_subgraph_compressed_response(listener)); - let subgraph_service = SubgraphService::new( - "test", - false, - Http2Config::Enable, - None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), - Notify::default(), - ) - .expect("can create a SubgraphService"); - - let url = Uri::from_str(&format!("http://{socket_addr}")).unwrap(); - let resp = subgraph_service - .oneshot(SubgraphRequest { - supergraph_request: supergraph_request("query"), - subgraph_request: http::Request::builder() - .header(HOST, "rhost") - .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) - .header(CONTENT_ENCODING, "gzip") - .uri(url) - .body(Request::builder().query("query".to_string()).build()) - .expect("expecting valid request"), - operation_kind: OperationKind::Query, - context: Context::new(), - subgraph_name: String::from("test").into(), - subscription_stream: None, - connection_closed_signal: None, - query_hash: Default::default(), - authorization: Default::default(), - }) - .await - .unwrap(); - // Test the right decompression of the body - let resp_from_subgraph = Response { - data: Some(Value::String(ByteString::from("test"))), - ..Response::default() - }; - - assert_eq!(resp.response.body(), &resp_from_subgraph); - } - #[tokio::test(flavor = "multi_thread")] async fn test_unauthorized() { let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); @@ -2394,13 +2095,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2431,13 +2132,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2477,13 +2178,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2521,13 +2222,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2562,13 +2263,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2603,13 +2304,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", true, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2644,13 +2345,13 @@ mod tests { let subgraph_service = SubgraphService::new( "test", false, - Http2Config::Enable, None, - ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), ) .expect("can create a SubgraphService"); @@ -2676,291 +2377,4 @@ mod tests { assert_eq!(resp.response.body(), &expected_resp); } - - async fn tls_server( - listener: tokio::net::TcpListener, - certificates: Vec, - key: PrivateKey, - body: &'static str, - ) { - let acceptor = TlsAcceptor::builder() - .with_single_cert(certificates, key) - .unwrap() - .with_all_versions_alpn() - .with_incoming(AddrIncoming::from_listener(listener).unwrap()); - let service = make_service_fn(|_| async { - Ok::<_, io::Error>(service_fn(|_req| async { - Ok::<_, io::Error>( - http::Response::builder() - .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) - .status(StatusCode::OK) - .version(Version::HTTP_11) - .body::(body.into()) - .unwrap(), - ) - })) - }); - let server = Server::builder(acceptor).serve(service); - server.await.unwrap() - } - - // Note: This test relies on a checked in certificate with the following validity - // characteristics: - // Validity - // Not Before: Oct 10 07:32:39 2023 GMT - // Not After : Oct 7 07:32:39 2033 GMT - // If this test fails and it is October 7th 2033, you will need to generate a - // new self signed cert. Currently, we use openssl to do this, in the future I - // hope we have something better... - // In the testdata directory run: - // openssl x509 -req -in server_self_signed.csr -signkey server.key -out server_self_signed.crt -extfile server.ext -days 3650 - // That will give you another 10 years, assuming nothing else in the signing - // framework has expired. - #[tokio::test(flavor = "multi_thread")] - async fn tls_self_signed() { - let certificate_pem = include_str!("./testdata/server_self_signed.crt"); - let key_pem = include_str!("./testdata/server.key"); - - let certificates = load_certs(certificate_pem).unwrap(); - let key = load_key(key_pem).unwrap(); - - let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); - let socket_addr = listener.local_addr().unwrap(); - tokio::task::spawn(tls_server(listener, certificates, key, r#"{"data": null}"#)); - - // we cannot parse a configuration from text, because certificates are generally - // added by file expansion and we don't have access to that here, and inserting - // the PEM data directly generates parsing issues due to end of line characters - let mut config = Configuration::default(); - config.tls.subgraph.subgraphs.insert( - "test".to_string(), - TlsClient { - certificate_authorities: Some(certificate_pem.into()), - client_authentication: None, - }, - ); - let subgraph_service = - SubgraphService::from_config("test", &config, &None, Http2Config::Enable, None) - .unwrap(); - - let url = Uri::from_str(&format!("https://localhost:{}", socket_addr.port())).unwrap(); - let response = subgraph_service - .oneshot( - SubgraphRequest::builder() - .supergraph_request(supergraph_request("query")) - .subgraph_request(subgraph_http_request(url, "query")) - .operation_kind(OperationKind::Query) - .subgraph_name(String::from("test")) - .context(Context::new()) - .build(), - ) - .await - .unwrap(); - - assert_eq!(response.response.body().data, Some(Value::Null)); - } - - #[tokio::test(flavor = "multi_thread")] - async fn tls_custom_root() { - let certificate_pem = include_str!("./testdata/server.crt"); - let ca_pem = include_str!("./testdata/CA/ca.crt"); - let key_pem = include_str!("./testdata/server.key"); - - let mut certificates = load_certs(certificate_pem).unwrap(); - certificates.extend(load_certs(ca_pem).unwrap()); - let key = load_key(key_pem).unwrap(); - - let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); - let socket_addr = listener.local_addr().unwrap(); - tokio::task::spawn(tls_server(listener, certificates, key, r#"{"data": null}"#)); - - // we cannot parse a configuration from text, because certificates are generally - // added by file expansion and we don't have access to that here, and inserting - // the PEM data directly generates parsing issues due to end of line characters - let mut config = Configuration::default(); - config.tls.subgraph.subgraphs.insert( - "test".to_string(), - TlsClient { - certificate_authorities: Some(ca_pem.into()), - client_authentication: None, - }, - ); - let subgraph_service = - SubgraphService::from_config("test", &config, &None, Http2Config::Enable, None) - .unwrap(); - - let url = Uri::from_str(&format!("https://localhost:{}", socket_addr.port())).unwrap(); - let response = subgraph_service - .oneshot( - SubgraphRequest::builder() - .supergraph_request(supergraph_request("query")) - .subgraph_request(subgraph_http_request(url, "query")) - .operation_kind(OperationKind::Query) - .subgraph_name(String::from("test")) - .context(Context::new()) - .build(), - ) - .await - .unwrap(); - assert_eq!(response.response.body().data, Some(Value::Null)); - } - - async fn tls_server_with_client_auth( - listener: tokio::net::TcpListener, - certificates: Vec, - key: PrivateKey, - client_root: Certificate, - body: &'static str, - ) { - let mut client_auth_roots = RootCertStore::empty(); - client_auth_roots.add(&client_root).unwrap(); - - let client_auth = AllowAnyAuthenticatedClient::new(client_auth_roots).boxed(); - - let acceptor = TlsAcceptor::builder() - .with_tls_config( - ServerConfig::builder() - .with_safe_defaults() - .with_client_cert_verifier(client_auth) - .with_single_cert(certificates, key) - .unwrap(), - ) - .with_all_versions_alpn() - .with_incoming(AddrIncoming::from_listener(listener).unwrap()); - let service = make_service_fn(|_| async { - Ok::<_, io::Error>(service_fn(|_req| async { - Ok::<_, io::Error>( - http::Response::builder() - .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) - .status(StatusCode::OK) - .version(Version::HTTP_11) - .body::(body.into()) - .unwrap(), - ) - })) - }); - let server = Server::builder(acceptor).serve(service); - server.await.unwrap() - } - - #[tokio::test(flavor = "multi_thread")] - async fn tls_client_auth() { - let server_certificate_pem = include_str!("./testdata/server.crt"); - let ca_pem = include_str!("./testdata/CA/ca.crt"); - let server_key_pem = include_str!("./testdata/server.key"); - - let mut server_certificates = load_certs(server_certificate_pem).unwrap(); - let ca_certificate = load_certs(ca_pem).unwrap().remove(0); - server_certificates.push(ca_certificate.clone()); - let key = load_key(server_key_pem).unwrap(); - - let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); - let socket_addr = listener.local_addr().unwrap(); - tokio::task::spawn(tls_server_with_client_auth( - listener, - server_certificates, - key, - ca_certificate, - r#"{"data": null}"#, - )); - - let client_certificate_pem = include_str!("./testdata/client.crt"); - let client_key_pem = include_str!("./testdata/client.key"); - - let client_certificates = load_certs(client_certificate_pem).unwrap(); - let client_key = load_key(client_key_pem).unwrap(); - - // we cannot parse a configuration from text, because certificates are generally - // added by file expansion and we don't have access to that here, and inserting - // the PEM data directly generates parsing issues due to end of line characters - let mut config = Configuration::default(); - config.tls.subgraph.subgraphs.insert( - "test".to_string(), - TlsClient { - certificate_authorities: Some(ca_pem.into()), - client_authentication: Some(TlsClientAuth { - certificate_chain: client_certificates, - key: client_key, - }), - }, - ); - let subgraph_service = - SubgraphService::from_config("test", &config, &None, Http2Config::Enable, None) - .unwrap(); - - let url = Uri::from_str(&format!("https://localhost:{}", socket_addr.port())).unwrap(); - let response = subgraph_service - .oneshot( - SubgraphRequest::builder() - .supergraph_request(supergraph_request("query")) - .subgraph_request(subgraph_http_request(url, "query")) - .operation_kind(OperationKind::Query) - .subgraph_name(String::from("test")) - .context(Context::new()) - .build(), - ) - .await - .unwrap(); - assert_eq!(response.response.body().data, Some(Value::Null)); - } - - // starts a local server emulating a subgraph returning status code 401 - async fn emulate_h2c_server(listener: TcpListener) { - async fn handle(_request: http::Request) -> Result, Infallible> { - println!("h2C server got req: {_request:?}"); - Ok(http::Response::builder() - .header(CONTENT_TYPE, APPLICATION_JSON.essence_str()) - .status(StatusCode::OK) - .body( - serde_json::to_string(&Response { - data: Some(Value::default()), - ..Response::default() - }) - .expect("always valid") - .into(), - ) - .unwrap()) - } - - let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); - let server = Server::from_tcp(listener) - .unwrap() - .http2_only(true) - .serve(make_svc); - server.await.unwrap(); - } - - #[tokio::test(flavor = "multi_thread")] - async fn test_subgraph_h2c() { - let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); - let socket_addr = listener.local_addr().unwrap(); - tokio::task::spawn(emulate_h2c_server(listener)); - let subgraph_service = SubgraphService::new( - "test", - true, - Http2Config::Http2Only, - None, - rustls::ClientConfig::builder() - .with_safe_defaults() - .with_native_roots() - .with_no_client_auth(), - Notify::default(), - ) - .expect("can create a SubgraphService"); - - let url = Uri::from_str(&format!("http://{socket_addr}")).unwrap(); - let response = subgraph_service - .oneshot( - SubgraphRequest::builder() - .supergraph_request(supergraph_request("query")) - .subgraph_request(subgraph_http_request(url, "query")) - .operation_kind(OperationKind::Query) - .subgraph_name(String::from("test")) - .context(Context::new()) - .build(), - ) - .await - .unwrap(); - assert!(response.response.body().errors.is_empty()); - } } diff --git a/apollo-router/src/services/supergraph.rs b/apollo-router/src/services/supergraph.rs index 04cd955f76..3807cf859d 100644 --- a/apollo-router/src/services/supergraph.rs +++ b/apollo-router/src/services/supergraph.rs @@ -305,6 +305,46 @@ impl Response { ) } + /// This is the constructor (or builder) to use when constructing a real Response.. + /// + /// Required parameters are required in non-testing code to create a Response.. + #[allow(clippy::too_many_arguments)] + #[builder(visibility = "pub(crate)")] + fn infallible_new( + label: Option, + data: Option, + path: Option, + errors: Vec, + // Skip the `Object` type alias in order to use buildstructor’s map special-casing + extensions: JsonMap, + status_code: Option, + headers: MultiMap, + context: Context, + ) -> Self { + // Build a response + let b = graphql::Response::builder() + .and_label(label) + .and_path(path) + .errors(errors) + .extensions(extensions); + let res = match data { + Some(data) => b.data(data).build(), + None => b.build(), + }; + + // Build an http Response + let mut builder = http::Response::builder().status(status_code.unwrap_or(StatusCode::OK)); + for (header_name, values) in headers { + for header_value in values { + builder = builder.header(header_name.clone(), header_value); + } + } + + let response = builder.body(once(ready(res)).boxed()).expect("can't fail"); + + Self { response, context } + } + pub(crate) fn new_from_graphql_response(response: graphql::Response, context: Context) -> Self { Self { response: http::Response::new(once(ready(response)).boxed()), diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index 69b4f54512..dff0594e40 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -64,6 +64,7 @@ use crate::services::ExecutionResponse; use crate::services::ExecutionServiceFactory; use crate::services::QueryPlannerContent; use crate::services::QueryPlannerResponse; +use crate::services::SubgraphService; use crate::services::SupergraphRequest; use crate::services::SupergraphResponse; use crate::spec::Query; @@ -143,12 +144,11 @@ impl Service for SupergraphService { ..Default::default() }]; - Ok(SupergraphResponse::builder() + Ok(SupergraphResponse::infallible_builder() .errors(errors) .status_code(StatusCode::INTERNAL_SERVER_ERROR) .context(context_cloned) - .build() - .expect("building a response like this should not fail")) + .build()) }); Box::pin(fut) @@ -186,24 +186,22 @@ async fn service_call( Ok(resp) => resp, Err(err) => match err.into_graphql_errors() { Ok(gql_errors) => { - return Ok(SupergraphResponse::builder() + return Ok(SupergraphResponse::infallible_builder() .context(context) .errors(gql_errors) .status_code(StatusCode::BAD_REQUEST) // If it's a graphql error we return a status code 400 - .build() - .expect("this response build must not fail")); + .build()); } Err(err) => return Err(err.into()), }, }; if !errors.is_empty() { - return Ok(SupergraphResponse::builder() + return Ok(SupergraphResponse::infallible_builder() .context(context) .errors(errors) .status_code(StatusCode::BAD_REQUEST) // If it's a graphql error we return a status code 400 - .build() - .expect("this response build must not fail")); + .build()); } match content { @@ -293,7 +291,7 @@ async fn service_call( let query_plan = plan.clone(); let execution_service_factory_cloned = execution_service_factory.clone(); let cloned_supergraph_req = - clone_supergraph_request(&req.supergraph_request, context.clone())?; + clone_supergraph_request(&req.supergraph_request, context.clone()); // Spawn task for subscription tokio::spawn(async move { subscription_task( @@ -487,8 +485,14 @@ async fn subscription_task( break; }, }; + let plugins = Arc::new(IndexMap::from_iter(plugins)); - execution_service_factory = ExecutionServiceFactory { schema: execution_service_factory.schema.clone(), plugins: plugins.clone(), subgraph_service_factory: Arc::new(SubgraphServiceFactory::new(subgraph_services.into_iter().map(|(k, v)| (k, Arc::new(v) as Arc)).collect(), plugins.clone())) }; + execution_service_factory = ExecutionServiceFactory { + schema: execution_service_factory.schema.clone(), + plugins: plugins.clone(), + subgraph_service_factory: Arc::new(SubgraphServiceFactory::new(subgraph_services.into_iter().map(|(k, v)| (k, Arc::new(v) as Arc)).collect(), plugins.clone())), + + }; } } Some(new_schema) = schema_updated_rx.next() => { @@ -529,8 +533,7 @@ async fn dispatch_event( let cloned_supergraph_req = clone_supergraph_request( &supergraph_req.supergraph_request, supergraph_req.context.clone(), - ) - .expect("it's a clone of the original one; qed"); + ); let execution_request = ExecutionRequest::internal_builder() .supergraph_request(cloned_supergraph_req.supergraph_request) .query_plan(query_plan.clone()) @@ -621,7 +624,7 @@ async fn plan_query( fn clone_supergraph_request( req: &http::Request, context: Context, -) -> Result { +) -> SupergraphRequest { let mut cloned_supergraph_req = SupergraphRequest::builder() .extensions(req.body().extensions.clone()) .and_query(req.body().query.clone()) @@ -637,7 +640,9 @@ fn clone_supergraph_request( } } - cloned_supergraph_req.build() + cloned_supergraph_req + .build() + .expect("cloning an existing supergraph response should not fail") } /// Builder which generates a plugin pipeline. @@ -648,7 +653,7 @@ fn clone_supergraph_request( /// through the entire stack to return a response. pub(crate) struct PluggableSupergraphServiceBuilder { plugins: Plugins, - subgraph_services: Vec<(String, Arc)>, + subgraph_services: Vec<(String, Box)>, configuration: Option>, planner: BridgeQueryPlanner, } @@ -681,7 +686,7 @@ impl PluggableSupergraphServiceBuilder { S: MakeSubgraphService, { self.subgraph_services - .push((name.to_string(), Arc::new(service_maker))); + .push((name.to_string(), Box::new(service_maker))); self } @@ -693,7 +698,9 @@ impl PluggableSupergraphServiceBuilder { self } - pub(crate) async fn build(self) -> Result { + pub(crate) async fn build( + mut self, + ) -> Result { let configuration = self.configuration.unwrap_or_default(); let schema = self.planner.schema(); @@ -715,9 +722,19 @@ impl PluggableSupergraphServiceBuilder { } let plugins = Arc::new(plugins); + for (_, service) in self.subgraph_services.iter_mut() { + if let Some(subgraph) = + (service as &mut dyn std::any::Any).downcast_mut::() + { + subgraph.client_factory.plugins = plugins.clone(); + } + } let subgraph_service_factory = Arc::new(SubgraphServiceFactory::new( - self.subgraph_services, + self.subgraph_services + .into_iter() + .map(|(name, service)| (name, service.into())) + .collect(), plugins.clone(), )); diff --git a/apollo-router/src/uplink/license_enforcement.rs b/apollo-router/src/uplink/license_enforcement.rs index 2c5ff92f99..c81777a4b0 100644 --- a/apollo-router/src/uplink/license_enforcement.rs +++ b/apollo-router/src/uplink/license_enforcement.rs @@ -30,6 +30,7 @@ use serde_json::Value; use thiserror::Error; use url::Url; +use crate::plugins::authentication::convert_key_algorithm; use crate::spec::LINK_AS_ARGUMENT; use crate::spec::LINK_DIRECTIVE_NAME; use crate::spec::LINK_URL_ARGUMENT; @@ -297,7 +298,7 @@ impl LicenseEnforcementReport { .name("Coprocessor plugin") .build(), ConfigurationRestriction::builder() - .path("$.supergraph.query_planning.experimental_cache.redis") + .path("$.supergraph.query_planning.cache.redis") .name("Query plan caching") .build(), ConfigurationRestriction::builder() @@ -305,7 +306,7 @@ impl LicenseEnforcementReport { .name("APQ caching") .build(), ConfigurationRestriction::builder() - .path("$.experimental_entity_cache.enabled") + .path("$.preview_entity_cache.enabled") .value(true) .name("Subgraph entity caching") .build(), @@ -483,9 +484,12 @@ impl FromStr for License { // Set up the validation for the JWT. // We don't require exp as we are only interested in haltAt and warnAt let mut validation = Validation::new( - jwk.common - .algorithm - .expect("alg is required on all keys in router.jwks.json"), + convert_key_algorithm( + jwk.common + .key_algorithm + .expect("alg is required on all keys in router.jwks.json"), + ) + .expect("only signing algorithms are used"), ); validation.validate_exp = false; validation.set_required_spec_claims(&["iss", "sub", "aud", "warnAt", "haltAt"]); diff --git a/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap b/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap index 4580520627..49dd8427d6 100644 --- a/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap +++ b/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap @@ -13,13 +13,13 @@ Configuration yaml: .coprocessor * Query plan caching - .supergraph.query_planning.experimental_cache.redis + .supergraph.query_planning.cache.redis * APQ caching .apq.router.cache.redis * Subgraph entity caching - .experimental_entity_cache.enabled + .preview_entity_cache.enabled * Federated subscriptions .subscription.enabled diff --git a/apollo-router/src/uplink/testdata/restricted.router.yaml b/apollo-router/src/uplink/testdata/restricted.router.yaml index 67b0d5b585..9947011e5a 100644 --- a/apollo-router/src/uplink/testdata/restricted.router.yaml +++ b/apollo-router/src/uplink/testdata/restricted.router.yaml @@ -14,7 +14,7 @@ coprocessor: supergraph: query_planning: - experimental_cache: + cache: redis: urls: - https://example.com @@ -52,7 +52,7 @@ plugins: experimental.restricted: enabled: true -experimental_entity_cache: +preview_entity_cache: redis: urls: - https://example.com diff --git a/apollo-router/tests/fixtures/jwks-unknown-alg.json b/apollo-router/tests/fixtures/jwks-unknown-alg.json index 3278b32dcb..74f96df115 100644 --- a/apollo-router/tests/fixtures/jwks-unknown-alg.json +++ b/apollo-router/tests/fixtures/jwks-unknown-alg.json @@ -4,13 +4,13 @@ "kty": "oct", "kid": "key1", "alg": "HS256", - "k": "c2VjcmV0Cg==", + "k": "c2VjcmV0Cg", "use": "sig" }, { "kty": "oct", "kid": "key2", - "k": "c2VjcmV0Cg==", + "k": "c2VjcmV0Cg", "use": "sig" }, { diff --git a/apollo-router/tests/fixtures/jwks.json b/apollo-router/tests/fixtures/jwks.json index a7bb2c14a1..1fc7e475dd 100644 --- a/apollo-router/tests/fixtures/jwks.json +++ b/apollo-router/tests/fixtures/jwks.json @@ -4,13 +4,13 @@ "kty": "oct", "kid": "key1", "alg": "HS256", - "k": "c2VjcmV0Cg==", + "k": "c2VjcmV0Cg", "use": "sig" }, { "kty": "oct", "kid": "key2", - "k": "c2VjcmV0Cg==", + "k": "c2VjcmV0Cg", "use": "sig" }, { diff --git a/apollo-router/tests/fixtures/prometheus.router.yaml b/apollo-router/tests/fixtures/prometheus.router.yaml index 21358af4fa..53c258bb41 100644 --- a/apollo-router/tests/fixtures/prometheus.router.yaml +++ b/apollo-router/tests/fixtures/prometheus.router.yaml @@ -8,6 +8,19 @@ telemetry: enabled: true path: /metrics common: + views: + - name: apollo_router_http_request_duration_seconds + aggregation: + histogram: + buckets: + - 0.1 + - 0.5 + - 1 + - 2 + - 3 + - 4 + - 5 + - 100 attributes: subgraph: all: diff --git a/apollo-router/tests/fixtures/remove_header.rhai b/apollo-router/tests/fixtures/remove_header.rhai new file mode 100644 index 0000000000..14e06680f1 --- /dev/null +++ b/apollo-router/tests/fixtures/remove_header.rhai @@ -0,0 +1,18 @@ +fn supergraph_service(service) { + print("registering callbacks for operation timing"); + + const request_callback = Fn("process_request"); + service.map_request(request_callback); + + const response_callback = Fn("process_response"); + service.map_response(response_callback); +} + +fn process_request(request) { + request.context["request_start"] = Router.APOLLO_START.elapsed; +} + +fn process_response(response) { + response.headers.remove("x-custom-header") +} + diff --git a/apollo-router/tests/fixtures/subgraph_auth.router.yaml b/apollo-router/tests/fixtures/subgraph_auth.router.yaml index 4a32fdf48c..48c9964bbf 100644 --- a/apollo-router/tests/fixtures/subgraph_auth.router.yaml +++ b/apollo-router/tests/fixtures/subgraph_auth.router.yaml @@ -6,6 +6,19 @@ telemetry: enabled: true path: /metrics common: + views: + - name: apollo_router_http_request_duration_seconds + aggregation: + histogram: + buckets: + - 0.1 + - 0.5 + - 1 + - 2 + - 3 + - 4 + - 5 + - 100 attributes: subgraph: all: diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index 2614fe39f8..14c6f808bc 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -23,7 +23,7 @@ mod test { // 2. run `docker compose up -d` and connect to the redis container by running `docker exec -ti /bin/bash`. // 3. Run the `redis-cli` command from the shell and start the redis `monitor` command. // 4. Run this test and yank the updated cache key from the redis logs. - let known_cache_key = "plan.5abb5fecf7df056396fb90fdf38d430b8c1fec55ec132fde878161608af18b76.4c45433039407593557f8a982dafd316a66ec03f0e1ed5fa1b7ef8060d76e8ec.3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112.4f918cb09d5956bea87fe8addb4db3bd16de2cdf935e899cf252cac5528090e4.f68a33e37534ac1e1f19e929f285ebacd55a8807eb076d955935bcc3aad58320"; + let known_cache_key = "plan:v2.7.1:5abb5fecf7df056396fb90fdf38d430b8c1fec55ec132fde878161608af18b76:4c45433039407593557f8a982dafd316a66ec03f0e1ed5fa1b7ef8060d76e8ec:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:2bf7810d3a47b31d8a77ebb09cdc784a3f77306827dc55b06770030a858167c7"; let config = RedisConfig::from_url("redis://127.0.0.1:6379")?; let client = RedisClient::new(config, None, None, None); @@ -37,7 +37,7 @@ mod test { .configuration_json(json!({ "supergraph": { "query_planning": { - "experimental_cache": { + "cache": { "in_memory": { "limit": 2 }, @@ -272,7 +272,7 @@ mod test { let supergraph = apollo_router::TestHarness::builder() .with_subgraph_network_requests() .configuration_json(json!({ - "experimental_entity_cache": { + "preview_entity_cache": { "redis": { "urls": ["redis://127.0.0.1:6379"], "ttl": "2s" @@ -376,7 +376,7 @@ mod test { let supergraph = apollo_router::TestHarness::builder() .with_subgraph_network_requests() .configuration_json(json!({ - "experimental_entity_cache": { + "preview_entity_cache": { "redis": { "urls": ["redis://127.0.0.1:6379"], "ttl": "2s" @@ -570,7 +570,7 @@ mod test { let supergraph = apollo_router::TestHarness::builder() .with_subgraph_network_requests() .configuration_json(json!({ - "experimental_entity_cache": { + "preview_entity_cache": { "redis": { "urls": ["redis://127.0.0.1:6379"], "ttl": "2s" diff --git a/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap b/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap index c77db0d03f..d84f144654 100644 --- a/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap +++ b/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap @@ -7,5 +7,7 @@ Exit code: Some(0) stderr: stdout: -This Router version has no preview configuration +List of all preview configurations with related GitHub discussions: + + - preview_entity_cache: https://github.com/apollographql/router/discussions/4592 diff --git a/apollo-router/tests/telemetry/metrics.rs b/apollo-router/tests/telemetry/metrics.rs index 1929366cb8..5d859a1e40 100644 --- a/apollo-router/tests/telemetry/metrics.rs +++ b/apollo-router/tests/telemetry/metrics.rs @@ -42,6 +42,7 @@ async fn test_metrics_reloading() { router.assert_metrics_contains(r#"apollo_router_cache_hit_count_total{kind="query planner",storage="memory",otel_scope_name="apollo/router"} 4"#, None).await; router.assert_metrics_contains(r#"apollo_router_cache_miss_count_total{kind="query planner",storage="memory",otel_scope_name="apollo/router"} 2"#, None).await; + router.assert_metrics_contains(r#"apollo_router_http_request_duration_seconds_bucket{status="200",otel_scope_name="apollo/router",le="100"}"#, None).await; router .assert_metrics_contains(r#"apollo_router_cache_hit_time"#, None) .await; diff --git a/dev-docs/logging.md b/dev-docs/logging.md new file mode 100644 index 0000000000..abf7ef32c1 --- /dev/null +++ b/dev-docs/logging.md @@ -0,0 +1,137 @@ +# Logging + +The Router uses tokio tracing for logging. When writing code make sure to include log statements that will help users debug their own issues. +To ensure a aconsistent experience for our users, make sure to follow the following guidelines. + +## Guidelines + +### Don't use variable interpolation +Log statements should be fixed should not use variable interpolation. This allows users to filter logs by message. + +#### Good + +```rust +debug!(request, "received request"); +``` + +#### Bad + +```rust +debug!("received request: {}", request); +``` + +### Make the error message short and concise + +If actions can be taken to resolve the error include them in an `action` attribute + +#### Good +```rust +error!(actions = ["check that the request is valid on the client, and modify the router config to allow the request"], "bad request"); +``` + +#### Bad +```rust +error!(request, "bad request, check that the request is valid on the client, and modify the router config to allow the request"); +``` + +### Use otel attributes +When adding fields to an error message check to see if an attribute already defined in [OpenTelemetry semantic conventions](https://opentelemetry.io/docs/specs/semconv/). +By using these well-defined attributes, APM providers have a better chance of understanding the error. + +#### Good +```rust +error!(url.full = url, "bad request"); +``` + +#### Bad +```rust +error!(url, "bad request"); +``` + +### Include caught error as `exception.message` field +`exception.message` is used to capture the error message of a caught error. + +See [OpenTelemetry semantic conventions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-logs/) for more information. + +#### Good +```rust +error!(exception.message = err, "bad request"); +``` + +#### Bad +```rust +error!("bad request {}", err); +``` + +### Include error type as `exception.type` field +`exception.type` is used to capture the class of an error message, in our case this translates to error code. + +See [OpenTelemetry semantic conventions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-logs/) for more information. + +#### Good +```rust +error!(exception.type = err.code(), "bad request"); +``` + +#### Bad +```rust +error!(exception.type = "MyError", "bad request"); +``` + + +## Testing +Log statements can be captured during a test using by attaching a subscriber by using `assert_snapshot_subscriber!()`. + +Under the hood `insta` is used to assert that a yaml version of the log statements is identical. For example here is the output of a test with three log statements: + +Do add tests for logging, it's very low overhead, and the act of seeing log statements in a test can help you think about what you are logging and how to help the user. + +```yaml +--- +source: apollo-router/src/plugins/authentication/tests.rs +expression: yaml +--- +- fields: + alg: UnknownAlg + reason: "unknown variant `UnknownAlg`, expected one of `HS256`, `HS384`, `HS512`, `ES256`, `ES384`, `RS256`, `RS384`, `RS512`, `PS256`, `PS384`, `PS512`, `EdDSA`" + index: 2 + level: WARN + message: "ignoring a key since it is not valid, enable debug logs to full content" +- fields: + alg: "" + reason: "invalid value: map, expected map with a single key" + index: 3 + level: WARN + message: "ignoring a key since it is not valid, enable debug logs to full content" +- fields: + alg: ES256 + reason: "invalid type: string \"Hmm\", expected a sequence" + index: 5 + level: WARN + message: "ignoring a key since it is not valid, enable debug logs to full content" +``` + + + +#### Testing Sync +Use `subscriber::with_default` to attach a subscriber for the duration of a block. + +```rust + #[test] + async fn test_sync() { + subscriber::with_default(assert_snapshot_subscriber!(), || { ... }) + } +``` + +#### Testing Async + +Use `with_subscriber` to attach a subscriber to an async block. + +```rust + #[tokio::test] + async fn test_async() { + async{...}.with_subscriber(assert_snapshot_subscriber!()) + } +``` + + diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index ed7a2fdc85..8280a4d960 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.39.1 + image: ghcr.io/apollographql/router:v1.40.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 04185392e1..912f9b5c55 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.39.1 + image: ghcr.io/apollographql/router:v1.40.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 0cfc1577e0..ba2a7e5848 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.39.1 + image: ghcr.io/apollographql/router:v1.40.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/config.json b/docs/source/config.json index 85820080da..1498552029 100644 --- a/docs/source/config.json +++ b/docs/source/config.json @@ -1,5 +1,5 @@ { - "title": "Router (self-hosted)", + "title": "Self-Hosted Router", "algoliaFilters": [ "docset:router" ], @@ -23,6 +23,13 @@ [ "enterprise" ] + ], + "Entity caching": [ + "/configuration/entity-caching", + [ + "enterprise", + "preview" + ] ] }, "Debugging": { diff --git a/docs/source/configuration/distributed-caching.mdx b/docs/source/configuration/distributed-caching.mdx index 32f10f0870..c5e8a69d70 100644 --- a/docs/source/configuration/distributed-caching.mdx +++ b/docs/source/configuration/distributed-caching.mdx @@ -92,7 +92,7 @@ To enable distributed caching of query plans, add the following to your router's ```yaml title="router.yaml" supergraph: query_planning: - experimental_cache: + cache: redis: #highlight-line urls: ["redis://..."] #highlight-line username: admin/123 # Optional, can be part of the urls directly, mainly useful if you have special character like '/' in your password that doesn't work in url. This field takes precedence over the username in the URL diff --git a/docs/source/configuration/entity-caching.mdx b/docs/source/configuration/entity-caching.mdx new file mode 100644 index 0000000000..aac410d80d --- /dev/null +++ b/docs/source/configuration/entity-caching.mdx @@ -0,0 +1,140 @@ +--- +title: Subgraph entity caching for the Apollo Router +subtitle: Redis-backed caching for entities +description: Subgraph entity caching for Apollo Router with GraphOS Enterprise. Cache and reuse individual entities across queries. +minVersion: 1.40.0 +--- + + + + + +Learn how the Apollo Router can cache subgraph query responses using Redis to improve your query latency for entities in the supergraph. + +## Overview + +An entity gets its fields from one or more subgraphs. To respond to a client request for an entity, the Apollo Router must make multiple subgraph requests. Different clients requesting the same entity can make redundant, identical subgraph requests. + +Entity caching enables the router to respond to identical subgraph queries with cached subgraph responses. The router uses Redis to cache data from subgraph query responses. Because cached data is keyed per subgraph and entity, different clients making the same client query—with the same or different query arguments—hit the same cache entries of subgraph response data. + +### Benefits of entity caching + +Compared to caching entire client responses, entity caching supports finer control over: +- the time to live (TTL) of cached data +- the amount of data being cached + +When caching an entire client response, the router must store it with a shorter TTL because application data can change often. Real-time data needs more frequent updates. + +A client-response cache might not be shareable between users, because the application data might contain personal and private information. A client-response cache might also duplicate a lot of data between client responses. + +For example, consider the `Products` and `Inventory` subgraphs from the [Entities guide](/federation/entities): + + + +```graphql title="Products subgraph" disableCopy=true +type Product @key(fields: "id") { + id: ID! + name: String! + price: Int +} +``` + +```graphql title="Inventory subgraph" disableCopy=true +type Product @key(fields: "id") { + id: ID! + inStock: Boolean! +} +``` + + + +Assume the client for a shopping cart application requests the following for each product in the cart: +- The product's name and price from the `Products` subgraph. +- The product's availability in inventory from the `Inventory` subgraph. + +If caching the entire client response, it would require a short TTL because the cart data can change often and the real-time inventory has to be up to date. A client-response cache couldn't be shared between users, because each cart is personal. A client-response cache might also duplicate data because the same products might appear in multiple carts. + +With entity caching enabled for this example, the router can: +- Store each product's description and price separately with a long TTL. +- Minimize the number of subgraph requests made for each client request, with some client requests fetching all product data from the cache and requiring no subgraph requests. +- Share the product cache between all users. +- Cache the cart per user, with a small amount of data. +- Cache inventory data with a short TTL or not cache it at all. + +## Use entity caching + +Follow this guide to enable and configure entity caching in the Apollo Router. + +### Prerequisites + +To use entity caching in the Apollo Router, you must set up: + +- A Redis instance or cluster that your router instances can communicate with +- A [GraphOS Enterprise plan](https://www.apollographql.com/pricing/) that [connects your router to GraphOS](./overview/#environment-variables). + +### Configure router for entity caching + +In `router.yaml`, configure `preview_entity_cache`: +- Enable entity caching globally. +- Configure Redis using the same conventions described in [distributed caching](./distributed-caching#redis-url-configuration). +- Configure entity caching per subgraph, with overrides per subgraph for disabling entity caching and TTL. + +For example: + +```yaml title="router.yaml" +# Enable entity caching globally +preview_entity_cache: + enabled: true + + # Configure Redis + redis: + urls: ["redis://..."] + timeout: 5ms # Optional, by default: 2ms + ttl: 24h # Optional, by default no expiration + + # Configure entity caching per subgraph + subgraphs: + products: + ttl: 120s # overrides the global TTL + inventory: + enabled: false # disable for a specific subgraph +``` + +### Configure time to live (TTL) + +Besides configuring a global TTL for all the entries in Redis, the Apollo Router also honors the [`Cache-Control` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control) returned with the subgraph response. It generates a `Cache-Control` header for the client response by aggregating the TTL information from all response parts. + +### Customize Redis cache key + +If you need to store data for a particular request in different cache entries, you can configure the cache key through the `apollo_entity_cache::key` context entry. + +This entry contains an object with the `all` field to affect all subgraph requests under one client request, and fields named after subgraph operation names to affect individual subgraph queries. The field's value can be any valid JSON value (object, string, etc). + +```json +{ + "all": 1, + "subgraph_operation1": "key1", + "subgraph_operation2": { + "data": "key2" + } +} + +``` + +## Implementation notes + +### Responses with errors not cached + +To prevent transient errors from affecting the cache for a long duration, subgraph responses with errors are not cached. + +### Authorization and entity caching + +When used alongside the router's [authorization directives](./authorization), cache entries are separated by authorization context. If a query contains fields that need a specific scope, the requests providing that scope have different cache entries from those not providing the scope. This means that data requiring authorization can still be safely cached and even shared across users, without needing invalidation when a user's roles change because their requests are automatically directed to a different part of the cache. + +### Schema updates and entity caching + +On schema updates, the router ensures that queries unaffected by the changes keep their cache entries. Queries with affected fields need to be cached again to ensure the router doesn't serve invalid data from before the update. + +### Entity cache invalidation not supported + +Cache invalidation is not yet supported and is planned for a future release. diff --git a/docs/source/configuration/in-memory-caching.mdx b/docs/source/configuration/in-memory-caching.mdx index c17349e5dc..6b7741a1b8 100644 --- a/docs/source/configuration/in-memory-caching.mdx +++ b/docs/source/configuration/in-memory-caching.mdx @@ -27,7 +27,7 @@ The Apollo Router enables query plan caching by default. In your router's [YAML ```yaml title="router.yaml" supergraph: query_planning: - experimental_cache: + cache: in_memory: limit: 512 # This is the default value. ``` @@ -39,7 +39,7 @@ supergraph: query_planning: # Pre-plan the 100 most used operations when the supergraph changes. (Default is "0", disabled.) warmed_up_queries: 100 - experimental_cache: + cache: in_memory: limit: 512 ``` diff --git a/docs/source/configuration/overview.mdx b/docs/source/configuration/overview.mdx index caab672bc5..68a80f1425 100644 --- a/docs/source/configuration/overview.mdx +++ b/docs/source/configuration/overview.mdx @@ -446,7 +446,7 @@ Path parameters and wildcards are supported. For example: - `/:my_dynamic_prefix/graphql` matches both `/my_project_a/graphql` and `/my_project_b/graphql`. - `/graphql/*` matches `/graphql/my_project_a` and `/graphql/my_project_b`. -- `/g*` matches `/graphql` and `/gateway`. +- `/g*` matches `/graphql`, `/gateway` and `/graphql/api`. @@ -466,7 +466,7 @@ supergraph: ### Debugging -- To configure logging, see [Logging in the Apollo Router](./logging). +- To configure logging, see [Logging in the Apollo Router](./telemetry/exporters/logging/overview). - To configure the inclusion of subgraph errors, see [Subgraph error inclusion](./subgraph-error-inclusion). @@ -540,7 +540,9 @@ By default, the Apollo Router stores the following data in its in-memory cache t You can configure certain caching behaviors for generated query plans and APQ (but not introspection responses). For details, see [In-memory caching in the Apollo Router](./in-memory-caching/). -**If you have a GraphOS Enterprise plan,** you can also configure a Redis-backed _distributed_ cache that enables multiple router instances to share cached values. For details, see [Distributed caching in the Apollo Router](./distributed-caching/). +**If you have a GraphOS Enterprise plan:** +- You can configure a Redis-backed _distributed_ cache that enables multiple router instances to share cached values. For details, see [Distributed caching in the Apollo Router](./distributed-caching/). +- You can configure a Redis-backed _entity_ cache that enables a client query to retrieve cached entity data split between subgraph reponses. For details, see [Subgraph entity caching in the Apollo Router](./entity-caching/). ### Safelisting with persisted queries diff --git a/docs/source/configuration/telemetry/exporters/logging/stdout.mdx b/docs/source/configuration/telemetry/exporters/logging/stdout.mdx index 022afd96e4..29432c4a56 100644 --- a/docs/source/configuration/telemetry/exporters/logging/stdout.mdx +++ b/docs/source/configuration/telemetry/exporters/logging/stdout.mdx @@ -42,6 +42,27 @@ telemetry: format: text #highlight-line ``` +### `tty_format` + +You can configure the log format when you're running on an interactive shell. This is useful during development. + +If both `format` and `tty_format` are configured then the output depends on the environment where the Router is run: + +* In an interactive shell, `tty_format` will take precedence. +* In a non-interactive shell, `format` will take precedence. + +You can explicitly set the format in [`router.yaml`](../../../overview#yaml-config-file) with `telemetry.exporters.logging.stdout.tty_format`: + +```yaml title="router.yaml" +telemetry: + exporters: + logging: + stdout: + enabled: true + format: json + tty_format: text #highlight-line +``` + ### `rate_limit` The rate at which log messages are produced can become too high, especially for request processing errors. To prevent the router from filling its logs with redundant messages, you can use the `rate_limit` option to set the logging rate limit. @@ -69,6 +90,7 @@ For configuration options specific to each output format, see the [`text`](#text |-----------------------|----------------------------|-----------------|------------------------------------------------------| | `enabled` | `true`\|`false` | `false` | Enable or disable stdout logging. | | `format` | | `text`\|`json` | See the [format documentation](#format) for details. | +| `tty_format` | | `text`\|`json` | See the [format documentation](#format) for details. | ## Logging output format diff --git a/docs/source/configuration/telemetry/exporters/metrics/overview.mdx b/docs/source/configuration/telemetry/exporters/metrics/overview.mdx index 45d80b0ba7..943f45f0fa 100644 --- a/docs/source/configuration/telemetry/exporters/metrics/overview.mdx +++ b/docs/source/configuration/telemetry/exporters/metrics/overview.mdx @@ -22,9 +22,10 @@ In [`router.yaml`](../../../overview/#yaml-config-file), you configure router me Common metrics configuration contains global settings for all exporters: * [Service name](#service_name) -* [Resource attributes](#resource-attribute) -* [Custom historgram buckets](#custom-histogram-buckets) +* [Resource attributes](#resource) +* [Custom default histogram buckets](#buckets) * [`apollo_router_http_requests` attributes](#attributes) +* [OpenTelemetry views](#views) ### `service_name` @@ -183,6 +184,33 @@ OpenTelemetry includes many [standard attributes](https://opentelemetry.io/docs/ +### `views` + +You can override default attributes and default buckets for specific metrics thanks to this configuration. + +```yaml title="router.yaml" +telemetry: + exporters: + metrics: + common: + service_name: apollo-router + views: + - name: apollo_router_http_request_duration_seconds # Instrument name you want to edit. You can use wildcard in names. If you want to target all instruments just use '*' + unit: "ms" # (Optional) override the unit + description: "my new description of this metric" # (Optional) override the description + aggregation: # (Optional) + histogram: + buckets: # Override default buckets configured for this histogram + - 1 + - 2 + - 3 + - 4 + - 5 + allowed_attribute_keys: # (Optional) Keep only listed attributes on the metric + - status + +``` + ## Metrics common reference | Attribute | Default | Description | diff --git a/docs/source/configuration/telemetry/instrumentation/selectors.mdx b/docs/source/configuration/telemetry/instrumentation/selectors.mdx index 2df3485b28..d2749e59a2 100644 --- a/docs/source/configuration/telemetry/instrumentation/selectors.mdx +++ b/docs/source/configuration/telemetry/instrumentation/selectors.mdx @@ -63,21 +63,22 @@ The supergraph service is executed after query parsing but before query executio The subgraph service executes multiple times during query execution, with each execution representing a call to a single subgraph. It is GraphQL centric and deals with GraphQL queries and responses. -| Selector | Defaultable | Values | Description | -|-----------------------------|-------------|-------------------------------------|----------------------------------------------| -| `subgraph_operation_name` | Yes | | The operation name from the subgraph query | -| `subgraph_operation_kind` | No | `query`\|`mutation`\|`subscription` | The operation kind from the subgraph query | -| `subgraph_query` | Yes | | The graphql query to the subgraph | -| `subgraph_query_variable` | Yes | | The name of a subgraph query variable | -| `subgraph_response_body` | Yes | | Json Path into the subgraph response body | -| `subgraph_request_header` | Yes | | The name of a subgraph request header | -| `subgraph_response_header` | Yes | | The name of a subgraph response header | -| `subgraph_response_status` | Yes | | The name of a subgraph response header | -| `supergraph_operation_name` | Yes | | The operation name from the supergraph query | -| `supergraph_operation_kind` | Yes | `query`\|`mutation`\|`subscription` | The operation kind from the supergraph query | -| `supergraph_query` | Yes | | The graphql query to the supergraph | -| `supergraph_query_variable` | Yes | | The name of a supergraph query variable | -| `request_context` | Yes | | The name of a request context key | -| `response_context` | Yes | | The name of a response context key | -| `baggage` | Yes | | The name of a baggage item | -| `env` | Yes | | The name of an environment variable | +| Selector | Defaultable | Values | Description | +|-----------------------------|-------------|-------------------------------------|---------------------------------------------------------------------------------| +| `subgraph_operation_name` | Yes | | The operation name from the subgraph query | +| `subgraph_operation_kind` | No | `query`\|`mutation`\|`subscription` | The operation kind from the subgraph query | +| `subgraph_query` | Yes | | The graphql query to the subgraph | +| `subgraph_query_variable` | Yes | | The name of a subgraph query variable | +| `subgraph_response_data` | Yes | | Json Path into the subgraph response body data (it might impact performances) | +| `subgraph_response_errors` | Yes | | Json Path into the subgraph response body errors (it might impact performances) | +| `subgraph_request_header` | Yes | | The name of a subgraph request header | +| `subgraph_response_header` | Yes | | The name of a subgraph response header | +| `subgraph_response_status` | Yes | | The name of a subgraph response header | +| `supergraph_operation_name` | Yes | | The operation name from the supergraph query | +| `supergraph_operation_kind` | Yes | `query`\|`mutation`\|`subscription` | The operation kind from the supergraph query | +| `supergraph_query` | Yes | | The graphql query to the supergraph | +| `supergraph_query_variable` | Yes | | The name of a supergraph query variable | +| `request_context` | Yes | | The name of a request context key | +| `response_context` | Yes | | The name of a response context key | +| `baggage` | Yes | | The name of a baggage item | +| `env` | Yes | | The name of an environment variable | diff --git a/docs/source/configuration/telemetry/instrumentation/spans.mdx b/docs/source/configuration/telemetry/instrumentation/spans.mdx index 74d94e7588..417d9c3382 100644 --- a/docs/source/configuration/telemetry/instrumentation/spans.mdx +++ b/docs/source/configuration/telemetry/instrumentation/spans.mdx @@ -39,7 +39,7 @@ Attributes may be drawn from [standard attributes](./standard-attributes) or [se -Custom attributes for spans via selector is an Enterprise Feature that requires a [GraphOS Enterprise plan](/graphos/enterprise/). +Granular customization of attributes on spans is an Enterprise Feature that requires a [GraphOS Enterprise plan](/graphos/enterprise/). diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx index a57bfea2dc..881a7e7292 100644 --- a/docs/source/containerization/kubernetes.mdx +++ b/docs/source/containerization/kubernetes.mdx @@ -54,7 +54,7 @@ Follow this guide to deploy the Apollo Router using Helm to install the basic ch Each router chart has a `values.yaml` file with router and deployment settings. The released, unedited file has a few explicit settings, including: -* Default container ports for the router's [HTTP server](../configuration/overview/#listen-address), [health check endpoint](../configuration/health-checks), and [metrics endpoint](../configuration/metrics). +* Default container ports for the router's [HTTP server](../configuration/overview/#listen-address), [health check endpoint](../configuration/health-checks), and [metrics endpoint](../configuration/telemetry/exporters/metrics/overview). * A command-line argument to enable [hot reloading of the router](../configuration/overview/#--hr----hot-reload). * A single replica. @@ -146,7 +146,7 @@ helm list --namespace ## Deploy with metrics endpoints -The router supports [metrics endpoints for Prometheus and OpenTelemetry protocol (OTLP)](../configuration/metrics). A [basic deployment](#basic-deployment) doesn't enable metrics endpoints, because the router chart disables both Prometheus (explicitly) and OTLP (by omission). +The router supports [metrics endpoints for Prometheus and OpenTelemetry protocol (OTLP)](../configuration/telemetry/exporters/metrics/overview). A [basic deployment](#basic-deployment) doesn't enable metrics endpoints, because the router chart disables both Prometheus (explicitly) and OTLP (by omission). To enable metrics endpoints in your deployed router through a YAML configuration file: diff --git a/docs/source/customizations/coprocessor.mdx b/docs/source/customizations/coprocessor.mdx index c0d6142314..12c4c1dd49 100644 --- a/docs/source/customizations/coprocessor.mdx +++ b/docs/source/customizations/coprocessor.mdx @@ -272,7 +272,8 @@ Properties of the JSON body are divided into two high-level categories: }, "body": { "query": "query Long {\n me {\n name\n}\n}", - "operationName": "MyQuery" + "operationName": "MyQuery", + "variables": {} }, "context": { "entries": { diff --git a/docs/source/customizations/rhai-api.mdx b/docs/source/customizations/rhai-api.mdx index 5a035ec25d..bdafa98344 100644 --- a/docs/source/customizations/rhai-api.mdx +++ b/docs/source/customizations/rhai-api.mdx @@ -113,7 +113,7 @@ fn supergraph_service(service) { } ``` -Rhai throws at the `map_request` layer behave the same as `ControlFlow::Break`, which is explained in the [external extensibility section](https://www.apollographql.com/docs/router/configuration/rhai). +Rhai throws at the `map_request` layer behave the same as `ControlFlow::Break`, which is explained in the [external extensibility section](./coprocessor/#terminating-a-client-request). If the supplied status code is not a valid HTTP status code, then a `500` response code will result. diff --git a/docs/source/enterprise-features.mdx b/docs/source/enterprise-features.mdx index 4361febc09..a29e199b75 100644 --- a/docs/source/enterprise-features.mdx +++ b/docs/source/enterprise-features.mdx @@ -16,7 +16,7 @@ Try out these Enterprise features for free with an [Enterprise trial](/graphos/o - **Real-time updates** via [GraphQL subscriptions](./executing-operations/subscription-support/) - **Authentication of inbound requests** via [JSON Web Token (JWT)](./configuration/authn-jwt/) - [**Authorization** of specific fields and types](./configuration/authorization) through the [`@requiresScopes`](./configuration/authorization#requiresscopes), [`@authenticated`](./configuration/authorization#authenticated), and [`@policy`](./configuration/authorization#policy) directives -- Redis-backed [**distributed caching** of query plans and persisted queries](./configuration/distributed-caching/) +- Redis-backed [**distributed caching** of query plans and persisted queries](./configuration/distributed-caching/) and [**subgraph entity caching**](./configuration/entity-caching/) - **Custom request handling** in any language via [external coprocessing](./customizations/coprocessor/) - **Mitigation of potentially malicious requests** via [operation limits](./configuration/operation-limits) and [safelisting with persisted queries](./configuration/persisted-queries) - **Custom instrumentation and telemetry**, including [custom attributes for spans](./configuration/telemetry/instrumentation/spans/#attributes). @@ -116,7 +116,7 @@ Follow these steps to configure an Apollo Router to use an offline Enterprise li * [`APOLLO_SUPERGRAPH_PATH`](./configuration/overview/#-s----supergraph) environment variable, containing an absolute or relative path to supergraph schema file * [`APOLLO_SUPERGRAPH_URLS`](./configuration/overview/#-s----supergraph) environment variable, containing URLs to supergraph schemas -1. (**Recommended**) Configure the router to report usage metrics to GraphOS in a best-effort basis by setting both the [`APOLLO_KEY`](./configuration/overview/#apollo_key) and [`APOLLO_GRAPH_REF`](../configuration/overview#apollo_graph_ref) environment variables. +1. (**Recommended**) Configure the router to report usage metrics to GraphOS in a best-effort basis by setting both the [`APOLLO_KEY`](./configuration/overview/#apollo_key) and [`APOLLO_GRAPH_REF`](./configuration/overview#apollo_graph_ref) environment variables. These metrics are necessary for several important GraphOS features (operations checks, field insights, operation traces, contracts). Sending them best-effort incurs no performance or uptime penalties. diff --git a/docs/source/executing-operations/subscription-multipart-protocol.mdx b/docs/source/executing-operations/subscription-multipart-protocol.mdx index b73b1b745c..00e910123e 100644 --- a/docs/source/executing-operations/subscription-multipart-protocol.mdx +++ b/docs/source/executing-operations/subscription-multipart-protocol.mdx @@ -5,7 +5,7 @@ description: For GraphQL clients communicating with the Apollo Router To execute GraphQL subscription operations on the Apollo Router, client apps do _not_ communicate over WebSocket. Instead, they use **HTTP with multipart responses**. This multipart protocol is built on the same [Incremental Delivery over HTTP](https://github.com/graphql/graphql-over-http/blob/main/rfcs/IncrementalDelivery.md) spec that the Apollo Router uses to support [the `@defer` directive](./defer-support/). -Use this reference if you're adding protocol support to a new GraphQL client library. Apollo Client for [Web](/react/data/subscriptions#http), [Kotlin](/kotlin/essentials/subscriptions#configuring-http-subscriptions), and [iOS](/ios/fetching/subscriptions#http) all support this protocol. +Use this reference if you're adding protocol support to a new GraphQL client library. [Apollo Client](/react/data/subscriptions#http), [Apollo Kotlin](/kotlin/essentials/subscriptions#configuring-http-subscriptions), and [Apollo iOS](/ios/fetching/subscriptions#http) all support this protocol. Apollo Client also provides network adapters for the [Relay](/react/data/subscriptions#relay) and [urql](/react/data/subscriptions#urql) libraries. ## Executing a subscription diff --git a/docs/source/executing-operations/subscription-support.mdx b/docs/source/executing-operations/subscription-support.mdx index 0fd9ba53ef..e427768274 100644 --- a/docs/source/executing-operations/subscription-support.mdx +++ b/docs/source/executing-operations/subscription-support.mdx @@ -76,7 +76,7 @@ flowchart LR; - **The client does _not_ use a WebSocket protocol.** Instead, it receives updates via [multipart HTTP responses](./subscription-multipart-protocol/). - By using HTTP for subscriptions, clients can execute _all_ GraphQL operation types over HTTP instead of using two different protocols. - - Apollo Client for [Web](/react/data/subscriptions#http), [Kotlin](/kotlin/essentials/subscriptions#configuring-http-subscriptions), and [iOS](/ios/fetching/subscriptions#http) all support GraphQL subscriptions over HTTP with minimal configuration. See each library's documentation for details. + - [Apollo Client](/react/data/subscriptions#http), [Apollo Kotlin](/kotlin/essentials/subscriptions#configuring-http-subscriptions), and [Apollo iOS](/ios/fetching/subscriptions#http) all support GraphQL subscriptions over HTTP with minimal configuration. See each library's documentation for details. Apollo Client also provides network adapters for the [Relay](/react/data/subscriptions#relay) and [urql](/react/data/subscriptions#urql) libraries. 2. When your router receives a subscription, it executes that _same_ subscription against whichever subgraph defines the requested field (`stockPricesChanged` in the example above). diff --git a/examples/jwt-claims/rhai/jwks.json b/examples/jwt-claims/rhai/jwks.json index abaae12057..41d8e019d0 100644 --- a/examples/jwt-claims/rhai/jwks.json +++ b/examples/jwt-claims/rhai/jwks.json @@ -4,7 +4,7 @@ "kty": "oct", "kid": "key1", "alg": "HS256", - "k": "c2VjcmV0Cg==", + "k": "c2VjcmV0Cg", "use": "sig" }, { diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index ffdf9763d6..a253b523ae 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.39.1 +version: 1.40.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.39.1" +appVersion: "v1.40.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index c23c818397..6604062b01 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.39.1](https://img.shields.io/badge/Version-1.39.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.39.1](https://img.shields.io/badge/AppVersion-v1.39.1-informational?style=flat-square) +![Version: 1.40.0](https://img.shields.io/badge/Version-1.40.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.40.0](https://img.shields.io/badge/AppVersion-v1.40.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.39.1 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.40.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.39.1 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.39.1 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.40.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ @@ -95,4 +95,4 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | virtualservice.enabled | bool | `false` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) +Autogenerated from chart metadata using [helm-docs v1.11.3](https://github.com/norwoodj/helm-docs/releases/v1.11.3) diff --git a/helm/chart/router/templates/virtualservice.yaml b/helm/chart/router/templates/virtualservice.yaml index 90bbc8d32a..1112750a3c 100644 --- a/helm/chart/router/templates/virtualservice.yaml +++ b/helm/chart/router/templates/virtualservice.yaml @@ -21,9 +21,14 @@ metadata: spec: hosts: - "*" + {{- if .Values.virtualservice.gatewayName }} gateways: - {{ .Values.virtualservice.gatewayName }} - {{- if .Values.virtualservice.http }} + {{- else if .Values.virtualservice.gatewayNames }} + gateways: + {{- toYaml .Values.virtualservice.gatewayNames | nindent 4 }} + {{- end }} + {{- if .Values.virtualservice.http }} http: {{- if .Values.virtualservice.http.main.enabled }} - name: "router-graphql-routes" diff --git a/helm/chart/router/values.yaml b/helm/chart/router/values.yaml index bfaa8a5000..40d4980697 100644 --- a/helm/chart/router/values.yaml +++ b/helm/chart/router/values.yaml @@ -160,7 +160,10 @@ ingress: virtualservice: enabled: false # namespace: "" - # gatewayName: "" + # gatewayName: "" # Deprecated in favor of gatewayNames + # gatewayNames: [] + # - "gateway-1" + # - "gateway-2" # http: # main: # # set enabled to true to add diff --git a/licenses.html b/licenses.html index 505bf9994d..e320383d1b 100644 --- a/licenses.html +++ b/licenses.html @@ -44,8 +44,8 @@

Third Party Licenses

Overview of licenses:

    -
  • Apache License 2.0 (512)
  • -
  • MIT License (160)
  • +
  • Apache License 2.0 (511)
  • +
  • MIT License (151)
  • BSD 3-Clause "New" or "Revised" License (12)
  • ISC License (11)
  • BSD 2-Clause "Simplified" License (3)
  • @@ -260,6 +260,7 @@

    Used by:

  • pin-project
  • pin-project-internal
  • pin-project-lite
  • +
  • portable-atomic
  • sync_wrapper
  • zstd-safe
  • zstd-sys
  • @@ -448,9 +449,7 @@

    Apache License 2.0

    Used by:

                                      Apache License
    @@ -641,7 +640,427 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2022 Jacob Pratt et al. + Copyright 2022 Jacob Pratt et al. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +
    + +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    +                                 Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +   APPENDIX: How to apply the Apache License to your work.
    +
    +      To apply the Apache License to your work, attach the following
    +      boilerplate notice, with the fields enclosed by brackets "[]"
    +      replaced with your own identifying information. (Don't include
    +      the brackets!)  The text should be enclosed in the appropriate
    +      comment syntax for the file format. We also recommend that a
    +      file or class name and description of purpose be included on the
    +      same "printed page" as the copyright notice for easier
    +      identification within third-party archives.
    +
    +   Copyright 2023 Jacob Pratt
    +
    +   Licensed under the Apache License, Version 2.0 (the "License");
    +   you may not use this file except in compliance with the License.
    +   You may obtain a copy of the License at
    +
    +       http://www.apache.org/licenses/LICENSE-2.0
    +
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    +                                 Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +   APPENDIX: How to apply the Apache License to your work.
    +
    +      To apply the Apache License to your work, attach the following
    +      boilerplate notice, with the fields enclosed by brackets "[]"
    +      replaced with your own identifying information. (Don't include
    +      the brackets!)  The text should be enclosed in the appropriate
    +      comment syntax for the file format. We also recommend that a
    +      file or class name and description of purpose be included on the
    +      same "printed page" as the copyright notice for easier
    +      identification within third-party archives.
    +
    +   Copyright 2023 Jacob Pratt et al.
     
        Licensed under the Apache License, Version 2.0 (the "License");
        you may not use this file except in compliance with the License.
    @@ -660,7 +1079,8 @@ 

    Used by:

    Apache License 2.0

    Used by:

                                      Apache License
    @@ -851,7 +1271,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2023 Jacob Pratt et al. + Copyright 2024 Jacob Pratt et al. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -2361,8 +2781,6 @@

    Used by:

  • clap_derive
  • clap_lex
  • opentelemetry-proto
  • -
  • structopt
  • -
  • structopt-derive
                                 Apache License
                            Version 2.0, January 2004
@@ -3428,9 +3846,11 @@ 

Used by:

  • humantime
  • quick-error
  • resolv-conf
  • -
  • terminal_size
  • +
  • serde_spanned
  • tokio-io-timeout
  • +
  • toml
  • toml_datetime
  • +
  • toml_edit
  • trust-dns-proto
  • trust-dns-resolver
  • unreachable
  • @@ -3842,7 +4262,7 @@

    Used by:

    Apache License 2.0

    Used by:

                                     Apache License
                                Version 2.0, January 2004
    @@ -5659,216 +6079,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2016 Sean McArthur - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -
    - -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                  Apache License
    -                        Version 2.0, January 2004
    -                     http://www.apache.org/licenses/
    -
    -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -1. Definitions.
    -
    -   "License" shall mean the terms and conditions for use, reproduction,
    -   and distribution as defined by Sections 1 through 9 of this document.
    -
    -   "Licensor" shall mean the copyright owner or entity authorized by
    -   the copyright owner that is granting the License.
    -
    -   "Legal Entity" shall mean the union of the acting entity and all
    -   other entities that control, are controlled by, or are under common
    -   control with that entity. For the purposes of this definition,
    -   "control" means (i) the power, direct or indirect, to cause the
    -   direction or management of such entity, whether by contract or
    -   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -   outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -   "You" (or "Your") shall mean an individual or Legal Entity
    -   exercising permissions granted by this License.
    -
    -   "Source" form shall mean the preferred form for making modifications,
    -   including but not limited to software source code, documentation
    -   source, and configuration files.
    -
    -   "Object" form shall mean any form resulting from mechanical
    -   transformation or translation of a Source form, including but
    -   not limited to compiled object code, generated documentation,
    -   and conversions to other media types.
    -
    -   "Work" shall mean the work of authorship, whether in Source or
    -   Object form, made available under the License, as indicated by a
    -   copyright notice that is included in or attached to the work
    -   (an example is provided in the Appendix below).
    -
    -   "Derivative Works" shall mean any work, whether in Source or Object
    -   form, that is based on (or derived from) the Work and for which the
    -   editorial revisions, annotations, elaborations, or other modifications
    -   represent, as a whole, an original work of authorship. For the purposes
    -   of this License, Derivative Works shall not include works that remain
    -   separable from, or merely link (or bind by name) to the interfaces of,
    -   the Work and Derivative Works thereof.
    -
    -   "Contribution" shall mean any work of authorship, including
    -   the original version of the Work and any modifications or additions
    -   to that Work or Derivative Works thereof, that is intentionally
    -   submitted to Licensor for inclusion in the Work by the copyright owner
    -   or by an individual or Legal Entity authorized to submit on behalf of
    -   the copyright owner. For the purposes of this definition, "submitted"
    -   means any form of electronic, verbal, or written communication sent
    -   to the Licensor or its representatives, including but not limited to
    -   communication on electronic mailing lists, source code control systems,
    -   and issue tracking systems that are managed by, or on behalf of, the
    -   Licensor for the purpose of discussing and improving the Work, but
    -   excluding communication that is conspicuously marked or otherwise
    -   designated in writing by the copyright owner as "Not a Contribution."
    -
    -   "Contributor" shall mean Licensor and any individual or Legal Entity
    -   on behalf of whom a Contribution has been received by Licensor and
    -   subsequently incorporated within the Work.
    -
    -2. Grant of Copyright License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   copyright license to reproduce, prepare Derivative Works of,
    -   publicly display, publicly perform, sublicense, and distribute the
    -   Work and such Derivative Works in Source or Object form.
    -
    -3. Grant of Patent License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   (except as stated in this section) patent license to make, have made,
    -   use, offer to sell, sell, import, and otherwise transfer the Work,
    -   where such license applies only to those patent claims licensable
    -   by such Contributor that are necessarily infringed by their
    -   Contribution(s) alone or by combination of their Contribution(s)
    -   with the Work to which such Contribution(s) was submitted. If You
    -   institute patent litigation against any entity (including a
    -   cross-claim or counterclaim in a lawsuit) alleging that the Work
    -   or a Contribution incorporated within the Work constitutes direct
    -   or contributory patent infringement, then any patent licenses
    -   granted to You under this License for that Work shall terminate
    -   as of the date such litigation is filed.
    -
    -4. Redistribution. You may reproduce and distribute copies of the
    -   Work or Derivative Works thereof in any medium, with or without
    -   modifications, and in Source or Object form, provided that You
    -   meet the following conditions:
    -
    -   (a) You must give any other recipients of the Work or
    -       Derivative Works a copy of this License; and
    -
    -   (b) You must cause any modified files to carry prominent notices
    -       stating that You changed the files; and
    -
    -   (c) You must retain, in the Source form of any Derivative Works
    -       that You distribute, all copyright, patent, trademark, and
    -       attribution notices from the Source form of the Work,
    -       excluding those notices that do not pertain to any part of
    -       the Derivative Works; and
    -
    -   (d) If the Work includes a "NOTICE" text file as part of its
    -       distribution, then any Derivative Works that You distribute must
    -       include a readable copy of the attribution notices contained
    -       within such NOTICE file, excluding those notices that do not
    -       pertain to any part of the Derivative Works, in at least one
    -       of the following places: within a NOTICE text file distributed
    -       as part of the Derivative Works; within the Source form or
    -       documentation, if provided along with the Derivative Works; or,
    -       within a display generated by the Derivative Works, if and
    -       wherever such third-party notices normally appear. The contents
    -       of the NOTICE file are for informational purposes only and
    -       do not modify the License. You may add Your own attribution
    -       notices within Derivative Works that You distribute, alongside
    -       or as an addendum to the NOTICE text from the Work, provided
    -       that such additional attribution notices cannot be construed
    -       as modifying the License.
    -
    -   You may add Your own copyright statement to Your modifications and
    -   may provide additional or different license terms and conditions
    -   for use, reproduction, or distribution of Your modifications, or
    -   for any such Derivative Works as a whole, provided Your use,
    -   reproduction, and distribution of the Work otherwise complies with
    -   the conditions stated in this License.
    -
    -5. Submission of Contributions. Unless You explicitly state otherwise,
    -   any Contribution intentionally submitted for inclusion in the Work
    -   by You to the Licensor shall be under the terms and conditions of
    -   this License, without any additional terms or conditions.
    -   Notwithstanding the above, nothing herein shall supersede or modify
    -   the terms of any separate license agreement you may have executed
    -   with Licensor regarding such Contributions.
    -
    -6. Trademarks. This License does not grant permission to use the trade
    -   names, trademarks, service marks, or product names of the Licensor,
    -   except as required for reasonable and customary use in describing the
    -   origin of the Work and reproducing the content of the NOTICE file.
    -
    -7. Disclaimer of Warranty. Unless required by applicable law or
    -   agreed to in writing, Licensor provides the Work (and each
    -   Contributor provides its Contributions) on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -   implied, including, without limitation, any warranties or conditions
    -   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -   PARTICULAR PURPOSE. You are solely responsible for determining the
    -   appropriateness of using or redistributing the Work and assume any
    -   risks associated with Your exercise of permissions under this License.
    -
    -8. Limitation of Liability. In no event and under no legal theory,
    -   whether in tort (including negligence), contract, or otherwise,
    -   unless required by applicable law (such as deliberate and grossly
    -   negligent acts) or agreed to in writing, shall any Contributor be
    -   liable to You for damages, including any direct, indirect, special,
    -   incidental, or consequential damages of any character arising as a
    -   result of this License or out of the use or inability to use the
    -   Work (including but not limited to damages for loss of goodwill,
    -   work stoppage, computer failure or malfunction, or any and all
    -   other commercial damages or losses), even if such Contributor
    -   has been advised of the possibility of such damages.
    -
    -9. Accepting Warranty or Additional Liability. While redistributing
    -   the Work or Derivative Works thereof, You may choose to offer,
    -   and charge a fee for, acceptance of support, warranty, indemnity,
    -   or other liability obligations and/or rights consistent with this
    -   License. However, in accepting such obligations, You may act only
    -   on Your own behalf and on Your sole responsibility, not on behalf
    -   of any other Contributor, and only if You agree to indemnify,
    -   defend, and hold each Contributor harmless for any liability
    -   incurred by, or claims asserted against, such Contributor by reason
    -   of your accepting any such warranty or additional liability.
    -
    -END OF TERMS AND CONDITIONS
    -
    -APPENDIX: How to apply the Apache License to your work.
    -
    -   To apply the Apache License to your work, attach the following
    -   boilerplate notice, with the fields enclosed by brackets "[]"
    -   replaced with your own identifying information. (Don't include
    -   the brackets!)  The text should be enclosed in the appropriate
    -   comment syntax for the file format. We also recommend that a
    -   file or class name and description of purpose be included on the
    -   same "printed page" as the copyright notice for easier
    -   identification within third-party archives.
    -
    -Copyright 2017 http-rs authors
    +Copyright 2016 Sean McArthur
     
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
    @@ -5887,7 +6098,7 @@ 

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -6077,7 +6288,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2017 quininer kel +Copyright 2017 http-rs authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6096,9 +6307,7 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -6288,7 +6497,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2017-2020 Dirkjan Ochtman +Copyright 2017 quininer kel Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6307,7 +6516,9 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -6497,7 +6708,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2018 The pin-utils authors +Copyright 2017-2020 Dirkjan Ochtman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6516,8 +6727,7 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -6707,7 +6917,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2018-2022 RustCrypto Developers +Copyright 2018 The pin-utils authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6726,7 +6936,8 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -6916,13 +7127,13 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2019 The CryptoCorrosion Contributors +Copyright 2018-2022 RustCrypto Developers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -6935,7 +7146,7 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -7125,13 +7336,13 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2019-2020 CreepySkeleton <creepy-skeleton@yandex.ru> +Copyright 2019 The CryptoCorrosion Contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -7841,7 +8052,6 @@

    Used by:

  • hdrhistogram
  • heck
  • heck
  • -
  • hermit-abi
  • hermit-abi
  • httparse
  • humantime-serde
  • @@ -7851,7 +8061,7 @@

    Used by:

  • idna
  • if_chain
  • indexmap
  • -
  • indexmap
  • +
  • indexmap
  • inventory
  • io-lifetimes
  • ipconfig
  • @@ -7956,7 +8166,6 @@

    Used by:

  • threadpool
  • tikv-jemalloc-sys
  • tikv-jemallocator
  • -
  • toml
  • toml_edit
  • triomphe
  • try_match
  • @@ -8413,7 +8622,6 @@

    Used by:

  • linked-hash-map
  • lru-cache
  • minimal-lexical
  • -
  • vec_map
  •                               Apache License
                             Version 2.0, January 2004
    @@ -10087,7 +10295,7 @@ 

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -10265,31 +10473,6 @@ 

    Used by:

    of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2019-2020 CreepySkeleton <creepy-skeleton@yandex.ru> - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.
  • @@ -11393,6 +11576,7 @@

    Apache License 2.0

    Used by:

    Apache License
    @@ -13299,34 +13483,6 @@ 

    Used by:

    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
    -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    Copyright (c) 2015 David Cuddeback
    -
    -Permission is hereby granted, free of charge, to any person obtaining
    -a copy of this software and associated documentation files (the
    -"Software"), to deal in the Software without restriction, including
    -without limitation the rights to use, copy, modify, merge, publish,
    -distribute, sublicense, and/or sell copies of the Software, and to
    -permit persons to whom the Software is furnished to do so, subject to
    -the following conditions:
    -
    -The above copyright notice and this permission notice shall be
    -included in all copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
    -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
    -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     
  • @@ -13487,34 +13643,6 @@

    Used by:

    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
  • - -
  • -

    MIT License

    -

    Used by:

    - -
    Copyright (c) 2015-2019 Doug Tangren
    -
    -Permission is hereby granted, free of charge, to any person obtaining
    -a copy of this software and associated documentation files (the
    -"Software"), to deal in the Software without restriction, including
    -without limitation the rights to use, copy, modify, merge, publish,
    -distribute, sublicense, and/or sell copies of the Software, and to
    -permit persons to whom the Software is furnished to do so, subject to
    -the following conditions:
    -
    -The above copyright notice and this permission notice shall be
    -included in all copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
    -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
    -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     
  • @@ -13667,7 +13795,6 @@

    Used by:

    Copyright (c) 2017 Redox OS Developers
     
    @@ -14398,35 +14525,6 @@ 

    Used by:

    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    MIT License
    -
    -Copyright (c) 2016 Martin Geisler
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    -
  • MIT License

    @@ -14478,35 +14576,6 @@

    Used by:

    The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    MIT License
    -
    -Copyright (c) 2018 Benjamin Sago
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    @@ -14939,6 +15008,7 @@ 

    Used by:

  • difflib
  • jsonschema
  • lazy-regex-proc_macros
  • +
  • number_prefix
  • serde_v8
  • v8
  • valuable
  • @@ -15142,35 +15212,6 @@

    Used by:

    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
    -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    The MIT License (MIT)
    -
    -Copyright (c) 2014 Benjamin Sago
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
     
  • @@ -15267,7 +15308,6 @@

    MIT License

    Used by:

    The MIT License (MIT)
     
    @@ -15390,38 +15430,7 @@ 

    Used by:

    MIT License

    Used by:

    -
    The MIT License (MIT)
    -
    -Copyright (c) 2015 Danny Guo
    -Copyright (c) 2016 Titus Wormer <tituswormer@gmail.com>
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    -
    -
    -
  • -
  • -

    MIT License

    -

    Used by:

    -
    The MIT License (MIT)
     
    @@ -15498,35 +15507,6 @@ 

    Used by:

    The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -
    -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    The MIT License (MIT)
    -
    -Copyright (c) 2015-2016 Kevin B. Knapp
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    @@ -15689,11 +15669,9 @@ 

    Used by:

    MIT License

    Used by:

    The MIT License (MIT)
     
    @@ -15918,6 +15896,7 @@ 

    Used by:

  • aho-corasick
  • byteorder
  • globset
  • +
  • memchr
  • regex-automata
  • same-file
  • termcolor
  • diff --git a/scripts/install.sh b/scripts/install.sh index 82ec9b8b04..f89b6e93a8 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.39.1" +PACKAGE_VERSION="v1.40.0" download_binary() { downloader --check diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index 39f620c1fb..8ac7e2800f 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -216,9 +216,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" +checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" dependencies = [ "android-tzdata", "iana-time-zone", @@ -228,9 +228,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.18" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" +checksum = "80c21025abd42669a92efc996ef13cfb2c5c627858421ea58d5c3b331a6c134f" dependencies = [ "clap_builder", "clap_derive", @@ -238,9 +238,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.18" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" +checksum = "458bf1f341769dfcf849846f65dffdf9146daa56bcd2a47cb4e1de9915567c99" dependencies = [ "anstream", "anstyle", @@ -250,9 +250,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" dependencies = [ "heck", "proc-macro2", @@ -262,9 +262,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "colorchoice" @@ -790,9 +790,9 @@ checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "itertools" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] @@ -1282,18 +1282,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", @@ -1383,9 +1383,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" [[package]] name = "syn" @@ -1480,9 +1480,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 7ecc54ab1b..81eab1a545 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -11,17 +11,17 @@ publish = false [dependencies] anyhow = "1" camino = "1" -clap = { version = "4.4.18", features = ["derive"] } +clap = { version = "4.5.0", features = ["derive"] } cargo_metadata = "0.18.1" # Only use the `clock` features of `chrono` to avoid the `time` dependency # impacted by CVE-2020-26235. https://github.com/chronotope/chrono/issues/602 # and https://github.com/chronotope/chrono/issues/1073 will explain more. -chrono = { version = "0.4.33", default-features = false, features = ["clock"] } +chrono = { version = "0.4.34", default-features = false, features = ["clock"] } console = "0.15.8" dialoguer = "0.11.0" flate2 = "1" graphql_client = { version = "0.13.0", features = ["reqwest-rustls"] } -itertools = "0.12.0" +itertools = "0.12.1" libc = "0.2" memorable-wordlist = "0.1.7" nu-ansi-term = "0.49" @@ -32,12 +32,12 @@ reqwest = { version = "0.11", default-features = false, features = [ "rustls-tls", "rustls-tls-native-roots", ] } -serde = { version = "1.0.195", features = ["derive"] } +serde = { version = "1.0.196", features = ["derive"] } serde_json = "1" tar = "0.4" tempfile = "3" tinytemplate = "1.2.1" -tokio = "1.35.1" +tokio = "1.36.0" which = "5.0.0" walkdir = "2.4.0"