diff --git a/.circleci/config.yml b/.circleci/config.yml
index 1fd3ee4294..159d53b96f 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -3,8 +3,9 @@ version: 2.1
# These "CircleCI Orbs" are reusable bits of configuration that can be shared
# across projects. See https://circleci.com/orbs/ for more information.
orbs:
- gh: circleci/github-cli@2.2.0
- slack: circleci/slack@4.12.5
+ gh: circleci/github-cli@2.3.0
+ slack: circleci/slack@4.12.6
+ secops: apollo/circleci-secops-orb@2.0.7
executors:
amd_linux_build: &amd_linux_build_executor
@@ -21,8 +22,8 @@ executors:
amd_linux_test: &amd_linux_test_executor
docker:
- image: cimg/base:stable
- - image: cimg/redis:7.2.3
- - image: jaegertracing/all-in-one:1.51.0
+ - image: cimg/redis:7.2.4
+ - image: jaegertracing/all-in-one:1.53.0
resource_class: xlarge
environment:
CARGO_BUILD_JOBS: 4
@@ -1011,3 +1012,19 @@ workflows:
ignore: /.*/
tags:
only: /v.*/
+
+ security-scans:
+ when:
+ not: << pipeline.parameters.nightly >>
+ jobs:
+ - secops/gitleaks:
+ context:
+ - secops-oidc
+ - github-orb
+ git-base-revision: <<#pipeline.git.base_revision>><><>
+ git-revision: << pipeline.git.revision >>
+ - secops/semgrep:
+ context:
+ - secops-oidc
+ - github-orb
+ git-base-revision: <<#pipeline.git.base_revision>><><>
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5c10416337..22c4ae21b3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,167 @@ All notable changes to Router will be documented in this file.
This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html).
+# [1.39.0] - 2024-02-05
+
+## ๐ Features
+
+### Introduce support for progressive `@override` ([PR #4521](https://github.com/apollographql/router/pull/4521))
+
+> โ ๏ธ This is an [Enterprise feature](https://www.apollographql.com/blog/platform/evaluating-apollo-router-understanding-free-and-open-vs-commercial-features/) of the Apollo Router. It requires an organization with a [GraphOS Enterprise plan](https://www.apollographql.com/pricing/).
+>
+> If your organization doesn't currently have an Enterprise plan, you can test out this functionality by signing up for a free Enterprise trial.
+
+The change brings support for progressive `@override`, which allows dynamically overriding root fields and entity fields in the schema. This feature is enterprise only and requires a license key to be used.
+
+A new `label` argument is added to the `@override` directive in order to indicate the field is dynamically overridden. Labels can come in two forms:
+1) String matching the form `percent(x)`: The router resolves these labels based on the `x` value. For example, `percent(50)` will route 50% of requests to the overridden field and 50% of requests to the original field.
+2) Arbitrary string matching the regex `^[a-zA-Z][a-zA-Z0-9_-:./]*$`: These labels are expected to be resolved externally via coprocessor. A supergraph request hook can inspect and modify the context of a request in order to inform the router which labels to use during query planning.
+
+Please consult the docs for more information on how to use this feature and how to implement a coprocessor for label resolution.
+
+By [@TrevorScheer](https://github.com/TrevorScheer) in https://github.com/apollographql/router/pull/4521
+
+### Specify trace ID formatting ([PR #4530](https://github.com/apollographql/router/pull/4530))
+
+You can specify the format of the trace ID in the response headers of the supergraph service.
+
+An example configuration using this feature:
+```yaml
+telemetry:
+ apollo:
+ client_name_header: name_header
+ client_version_header: version_header
+ exporters:
+ tracing:
+ experimental_response_trace_id:
+ enabled: true
+ header_name: trace_id
+ format: decimal # Optional, defaults to hexadecimal
+```
+
+If the format is not specified, then the trace ID will continue to be in hexadecimal format.
+
+By [@nicholascioli](https://github.com/nicholascioli) in https://github.com/apollographql/router/pull/4530
+
+### Add selector to get all baggage key values in span attributes ([Issue #4425](https://github.com/apollographql/router/issues/4425))
+
+Previously, baggage items were configured as standard attributes in `router.yaml`, and adding a new baggage item required a configuration update and router rerelease.
+
+This release supports a new configuration that enables baggage items to be added automatically as span attributes.
+
+If you have several baggage items and would like to add all of them directly as span attributes (for example, `baggage: my_item=test, my_second_item=bar`), setting `baggage: true` will add automatically add two span attributes, `my_item=test` and `my_second_item=bar`.
+
+An example configuration:
+
+```yaml
+telemetry:
+ instrumentation:
+ spans:
+ router:
+ attributes:
+ baggage: true
+```
+
+
+By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/4537
+
+### Create a trace during router creation and plugin initialization ([Issue #4472](https://github.com/apollographql/router/issues/4472))
+
+When the router starts or reloads, it will now generate a trace with spans for query planner creation, schema parsing, plugin initialisation and request pipeline creation. This will help debugging any issue during startup, especially during plugins creation.
+
+By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4480
+
+### Allow adding static attributes on specific spans in telemetry settings ([Issue #4561](https://github.com/apollographql/router/issues/4561))
+
+It is now possible to add static attributes to spans, defined in the configuration file.
+
+Example of configuration:
+
+```yaml
+telemetry:
+ instrumentation:
+ spans:
+ router:
+ attributes:
+ "my_attribute": "constant_value"
+ supergraph:
+ attributes:
+ "my_attribute": "constant_value"
+ subgraph:
+ attributes:
+ "my_attribute": "constant_value"
+```
+
+By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/4566
+
+## ๐ Fixes
+
+### Order HPA targets to resolve OutOfSync states ([Issue #4435](https://github.com/apollographql/router/issues/4435))
+
+This update addresses an `OutOfSync` issue in ArgoCD applications when Horizontal Pod Autoscaler (HPA) is configured with both memory and CPU limits.
+Previously, the live and desired manifests within Kubernetes were not consistently sorted, leading to persistent `OutOfSync` states in ArgoCD.
+This change implements a sorting mechanism for HPA targets within the Helm chart, ensuring alignment with Kubernetes' expected order.
+This fix proactively resolves the sync discrepancies while using HPA, circumventing the need to wait for Kubernetes' issue resolution (kubernetes/kubernetes#74099).
+
+By [@cyberhck](https://github.com/cyberhck) in https://github.com/apollographql/router/pull/4436
+
+### Reactivate log events in traces ([PR #4486](https://github.com/apollographql/router/pull/4486))
+
+This fixes a regression introduced in #2999, where events were not sent with traces anymore due to too aggressive sampling
+
+By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4486
+
+### Fix inconsistency in environment variable parsing for telemetry ([Issue #3203](https://github.com/apollographql/router/issues/ISSUE_NUMBER))
+
+Previously, the router would complain when using the rover recommendation of `APOLLO_TELEMETRY_DISABLED=1` environment
+variable. Now any non-falsey value can be used, such as 1, yes, on, etc..
+
+By [@nicholascioli](https://github.com/nicholascioli) in https://github.com/apollographql/router/pull/4549
+
+### Store static pages in `Bytes` structure to avoid expensive allocation per request ([PR #4528](https://github.com/apollographql/router/pull/4528))
+
+The `CheckpointService` created by the `StaticPageLayer` caused a non-insignificant amount of memory to be allocated on every request. The service stack gets cloned on every request, and so does the rendered template.
+
+The template is now stored in a `Bytes` struct instead which is cheap to clone.
+
+By [@xuorig](https://github.com/xuorig) in https://github.com/apollographql/router/pull/4528
+
+### Fix header propagation issues ([Issue #4312](https://github.com/apollographql/router/issues/4312)), ([Issue #4398](https://github.com/apollographql/router/issues/4398))
+
+This fixes two header propagation issues:
+* if a client request header has already been added to a subgraph request due to another header propagation rule, then it is only added once
+* `Accept`, `Accept-Encoding` and `Content-Encoding` were not in the list of reserved headers that cannot be propagated. They are now in that list because those headers are set explicitely by the Router in its subgraph requests
+
+There is a potential change in behavior: if a router deployment was accidentally relying on header propagation to compress subgraph requests, then it will not work anymore because `Content-Encoding` is not propagated anymore. Instead it should be set up from the `traffic_shaping` section of the Router configuration:
+
+```yaml
+traffic_shaping:
+ all:
+ compression: gzip
+ subgraphs: # Rules applied to requests from the router to individual subgraphs
+ products:
+ compression: identity
+```
+
+By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4535
+
+## ๐งช Experimental
+
+### Move cacheability metrics to the entity cache plugin ([Issue #4253](https://github.com/apollographql/router/issues/4253))
+
+Cacheability metrics have been moved from the telemetry plugin to the entity cache plugin.
+
+New configuration has been added:
+- Enabling or disabling the metrics
+- Setting the metrics storage TTL (default is 60s)
+- Disabling the metric's typename attribute by default. (Activating it can greatly increase the cardinality.)
+
+Cleanup and performance improvements have also been implemented.
+
+By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4469
+
+
+
# [1.38.0] - 2024-01-19
## ๐ Features
diff --git a/Cargo.lock b/Cargo.lock
index bc1f4c0aaa..85501c528f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -20,7 +20,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -147,9 +147,9 @@ dependencies = [
[[package]]
name = "anstream"
-version = "0.6.0"
+version = "0.6.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bff2cf94a3dbe2d57cbd56485e1bd7436455058034d6c2d47be51d4e5e4bc6ab"
+checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5"
dependencies = [
"anstyle",
"anstyle-parse",
@@ -185,19 +185,19 @@ dependencies = [
[[package]]
name = "anstyle-wincon"
-version = "3.0.0"
+version = "3.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0238ca56c96dfa37bdf7c373c8886dd591322500aceeeccdb2216fe06dc2f796"
+checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7"
dependencies = [
"anstyle",
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
]
[[package]]
name = "anyhow"
-version = "1.0.75"
+version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
+checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca"
[[package]]
name = "apollo-compiler"
@@ -258,7 +258,7 @@ dependencies = [
[[package]]
name = "apollo-router"
-version = "1.38.0"
+version = "1.39.0"
dependencies = [
"access-json",
"anyhow",
@@ -271,16 +271,17 @@ dependencies = [
"aws-config",
"aws-credential-types",
"aws-sigv4",
+ "aws-smithy-runtime-api",
"aws-types",
"axum",
- "base64 0.21.5",
+ "base64 0.21.7",
"bloomfilter",
"brotli",
"buildstructor",
"bytes",
"ci_info",
- "clap 4.4.11",
- "console 0.15.7",
+ "clap 4.4.18",
+ "console 0.15.8",
"console-subscriber",
"dashmap",
"derivative",
@@ -298,7 +299,7 @@ dependencies = [
"heck 0.4.1",
"hex",
"hmac",
- "http",
+ "http 0.2.11",
"http-body",
"http-serde",
"humantime",
@@ -357,6 +358,7 @@ dependencies = [
"rustls",
"rustls-pemfile",
"schemars",
+ "semver 1.0.21",
"serde",
"serde_derive_default",
"serde_json",
@@ -411,7 +413,7 @@ dependencies = [
[[package]]
name = "apollo-router-benchmarks"
-version = "1.38.0"
+version = "1.39.0"
dependencies = [
"apollo-parser",
"apollo-router",
@@ -427,11 +429,11 @@ dependencies = [
[[package]]
name = "apollo-router-scaffold"
-version = "1.38.0"
+version = "1.39.0"
dependencies = [
"anyhow",
"cargo-scaffold",
- "clap 4.4.11",
+ "clap 4.4.18",
"copy_dir",
"regex",
"str_inflector",
@@ -507,10 +509,10 @@ dependencies = [
"mime",
"mime_guess",
"nom",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"serde",
- "syn 2.0.32",
+ "syn 2.0.48",
]
[[package]]
@@ -536,7 +538,7 @@ dependencies = [
"anyhow",
"apollo-router",
"async-trait",
- "http",
+ "http 0.2.11",
"schemars",
"serde",
"serde_json",
@@ -571,9 +573,9 @@ dependencies = [
[[package]]
name = "async-compression"
-version = "0.4.5"
+version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc2d0cfb2a7388d34f590e76686704c494ed7aaceed62ee1ba35cbf363abc2a5"
+checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c"
dependencies = [
"brotli",
"flate2",
@@ -645,7 +647,7 @@ dependencies = [
"futures-lite 2.0.0",
"parking",
"polling 3.3.1",
- "rustix 0.38.21",
+ "rustix 0.38.30",
"slab",
"tracing",
"windows-sys 0.52.0",
@@ -684,7 +686,7 @@ dependencies = [
"cfg-if",
"event-listener 3.1.0",
"futures-lite 1.13.0",
- "rustix 0.38.21",
+ "rustix 0.38.30",
"windows-sys 0.48.0",
]
@@ -700,7 +702,7 @@ dependencies = [
"cfg-if",
"futures-core",
"futures-io",
- "rustix 0.38.21",
+ "rustix 0.38.30",
"signal-hook-registry",
"slab",
"windows-sys 0.48.0",
@@ -750,9 +752,9 @@ version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -763,13 +765,13 @@ checksum = "b4eb2cdb97421e01129ccb49169d8279ed21e829929144f4a22a6e54ac549ca1"
[[package]]
name = "async-trait"
-version = "0.1.74"
+version = "0.1.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9"
+checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -808,99 +810,100 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "aws-config"
-version = "0.56.1"
+version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc6b3804dca60326e07205179847f17a4fce45af3a1106939177ad41ac08a6de"
+checksum = "8b30c39ebe61f75d1b3785362b1586b41991873c9ab3e317a9181c246fb71d82"
dependencies = [
"aws-credential-types",
- "aws-http",
+ "aws-runtime",
"aws-sdk-sso",
+ "aws-sdk-ssooidc",
"aws-sdk-sts",
"aws-smithy-async",
- "aws-smithy-client",
"aws-smithy-http",
- "aws-smithy-http-tower",
"aws-smithy-json",
+ "aws-smithy-runtime",
+ "aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
"bytes",
"fastrand 2.0.0",
"hex",
- "http",
+ "http 0.2.11",
"hyper",
- "ring 0.16.20",
+ "ring 0.17.5",
"time",
"tokio",
- "tower",
"tracing",
"zeroize",
]
[[package]]
name = "aws-credential-types"
-version = "0.56.1"
+version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "70a66ac8ef5fa9cf01c2d999f39d16812e90ec1467bd382cbbb74ba23ea86201"
+checksum = "33cc49dcdd31c8b6e79850a179af4c367669150c7ac0135f176c61bec81a70f7"
dependencies = [
"aws-smithy-async",
+ "aws-smithy-runtime-api",
"aws-smithy-types",
- "fastrand 2.0.0",
- "tokio",
- "tracing",
"zeroize",
]
[[package]]
-name = "aws-http"
-version = "0.56.1"
+name = "aws-runtime"
+version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3e626370f9ba806ae4c439e49675fd871f5767b093075cdf4fef16cac42ba900"
+checksum = "eb031bff99877c26c28895766f7bb8484a05e24547e370768d6cc9db514662aa"
dependencies = [
"aws-credential-types",
+ "aws-sigv4",
+ "aws-smithy-async",
"aws-smithy-http",
+ "aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
"bytes",
- "http",
+ "fastrand 2.0.0",
+ "http 0.2.11",
"http-body",
- "lazy_static",
"percent-encoding",
"pin-project-lite",
"tracing",
+ "uuid",
]
[[package]]
-name = "aws-runtime"
-version = "0.56.1"
+name = "aws-sdk-sso"
+version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07ac5cf0ff19c1bca0cea7932e11b239d1025a45696a4f44f72ea86e2b8bdd07"
+checksum = "f486420a66caad72635bc2ce0ff6581646e0d32df02aa39dc983bfe794955a5b"
dependencies = [
"aws-credential-types",
- "aws-http",
- "aws-sigv4",
+ "aws-runtime",
"aws-smithy-async",
"aws-smithy-http",
+ "aws-smithy-json",
+ "aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
- "fastrand 2.0.0",
- "http",
- "percent-encoding",
+ "bytes",
+ "http 0.2.11",
+ "once_cell",
+ "regex-lite",
"tracing",
- "uuid",
]
[[package]]
-name = "aws-sdk-sso"
-version = "0.30.0"
+name = "aws-sdk-ssooidc"
+version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "903f888ff190e64f6f5c83fb0f8d54f9c20481f1dc26359bb8896f5d99908949"
+checksum = "39ddccf01d82fce9b4a15c8ae8608211ee7db8ed13a70b514bbfe41df3d24841"
dependencies = [
"aws-credential-types",
- "aws-http",
"aws-runtime",
"aws-smithy-async",
- "aws-smithy-client",
"aws-smithy-http",
"aws-smithy-json",
"aws-smithy-runtime",
@@ -908,23 +911,21 @@ dependencies = [
"aws-smithy-types",
"aws-types",
"bytes",
- "http",
- "regex",
- "tokio-stream",
+ "http 0.2.11",
+ "once_cell",
+ "regex-lite",
"tracing",
]
[[package]]
name = "aws-sdk-sts"
-version = "0.30.0"
+version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a47ad6bf01afc00423d781d464220bf69fb6a674ad6629cbbcb06d88cdc2be82"
+checksum = "1a591f8c7e6a621a501b2b5d2e88e1697fcb6274264523a6ad4d5959889a41ce"
dependencies = [
"aws-credential-types",
- "aws-http",
"aws-runtime",
"aws-smithy-async",
- "aws-smithy-client",
"aws-smithy-http",
"aws-smithy-json",
"aws-smithy-query",
@@ -933,25 +934,30 @@ dependencies = [
"aws-smithy-types",
"aws-smithy-xml",
"aws-types",
- "http",
- "regex",
+ "http 0.2.11",
+ "once_cell",
+ "regex-lite",
"tracing",
]
[[package]]
name = "aws-sigv4"
-version = "0.56.1"
+version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b7b28f4910bb956b7ab320b62e98096402354eca976c587d1eeccd523d9bac03"
+checksum = "c371c6b0ac54d4605eb6f016624fb5c7c2925d315fdf600ac1bf21b19d5f1742"
dependencies = [
+ "aws-credential-types",
"aws-smithy-http",
+ "aws-smithy-runtime-api",
+ "aws-smithy-types",
+ "bytes",
"form_urlencoded",
"hex",
"hmac",
- "http",
+ "http 0.2.11",
+ "http 1.0.0",
"once_cell",
"percent-encoding",
- "regex",
"sha2",
"time",
"tracing",
@@ -959,53 +965,28 @@ dependencies = [
[[package]]
name = "aws-smithy-async"
-version = "0.56.1"
+version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2cdb73f85528b9d19c23a496034ac53703955a59323d581c06aa27b4e4e247af"
+checksum = "72ee2d09cce0ef3ae526679b522835d63e75fb427aca5413cd371e490d52dcc6"
dependencies = [
"futures-util",
"pin-project-lite",
"tokio",
- "tokio-stream",
-]
-
-[[package]]
-name = "aws-smithy-client"
-version = "0.56.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c27b2756264c82f830a91cb4d2d485b2d19ad5bea476d9a966e03d27f27ba59a"
-dependencies = [
- "aws-smithy-async",
- "aws-smithy-http",
- "aws-smithy-http-tower",
- "aws-smithy-types",
- "bytes",
- "fastrand 2.0.0",
- "http",
- "http-body",
- "hyper",
- "hyper-rustls",
- "lazy_static",
- "pin-project-lite",
- "rustls",
- "tokio",
- "tower",
- "tracing",
]
[[package]]
name = "aws-smithy-http"
-version = "0.56.1"
+version = "0.60.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54cdcf365d8eee60686885f750a34c190e513677db58bbc466c44c588abf4199"
+checksum = "dab56aea3cd9e1101a0a999447fb346afb680ab1406cebc44b32346e25b4117d"
dependencies = [
+ "aws-smithy-runtime-api",
"aws-smithy-types",
"bytes",
"bytes-utils",
"futures-core",
- "http",
+ "http 0.2.11",
"http-body",
- "hyper",
"once_cell",
"percent-encoding",
"pin-project-lite",
@@ -1013,36 +994,20 @@ dependencies = [
"tracing",
]
-[[package]]
-name = "aws-smithy-http-tower"
-version = "0.56.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "822de399d0ce62829a69dfa8c5cd08efdbe61a7426b953e2268f8b8b52a607bd"
-dependencies = [
- "aws-smithy-http",
- "aws-smithy-types",
- "bytes",
- "http",
- "http-body",
- "pin-project-lite",
- "tower",
- "tracing",
-]
-
[[package]]
name = "aws-smithy-json"
-version = "0.56.1"
+version = "0.60.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4fb1e7ab8fa7ad10c193af7ae56d2420989e9f4758bf03601a342573333ea34f"
+checksum = "fd3898ca6518f9215f62678870064398f00031912390efd03f1f6ef56d83aa8e"
dependencies = [
"aws-smithy-types",
]
[[package]]
name = "aws-smithy-query"
-version = "0.56.1"
+version = "0.60.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "28556a3902091c1f768a34f6c998028921bdab8d47d92586f363f14a4a32d047"
+checksum = "bda4b1dfc9810e35fba8a620e900522cd1bd4f9578c446e82f49d1ce41d2e9f9"
dependencies = [
"aws-smithy-types",
"urlencoding",
@@ -1050,50 +1015,61 @@ dependencies = [
[[package]]
name = "aws-smithy-runtime"
-version = "0.56.1"
+version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "745e096b3553e7e0f40622aa04971ce52765af82bebdeeac53aa6fc82fe801e6"
+checksum = "fafdab38f40ad7816e7da5dec279400dd505160780083759f01441af1bbb10ea"
dependencies = [
"aws-smithy-async",
- "aws-smithy-client",
"aws-smithy-http",
"aws-smithy-runtime-api",
"aws-smithy-types",
"bytes",
"fastrand 2.0.0",
- "http",
+ "h2",
+ "http 0.2.11",
"http-body",
+ "hyper",
+ "hyper-rustls",
"once_cell",
"pin-project-lite",
"pin-utils",
+ "rustls",
"tokio",
"tracing",
]
[[package]]
name = "aws-smithy-runtime-api"
-version = "0.56.1"
+version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "93d0ae0c9cfd57944e9711ea610b48a963fb174a53aabacc08c5794a594b1d02"
+checksum = "c18276dd28852f34b3bf501f4f3719781f4999a51c7bff1a5c6dc8c4529adc29"
dependencies = [
"aws-smithy-async",
- "aws-smithy-http",
"aws-smithy-types",
"bytes",
- "http",
+ "http 0.2.11",
+ "pin-project-lite",
"tokio",
"tracing",
+ "zeroize",
]
[[package]]
name = "aws-smithy-types"
-version = "0.56.1"
+version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d90dbc8da2f6be461fa3c1906b20af8f79d14968fe47f2b7d29d086f62a51728"
+checksum = "bb3e134004170d3303718baa2a4eb4ca64ee0a1c0a7041dca31b38be0fb414f3"
dependencies = [
"base64-simd",
+ "bytes",
+ "bytes-utils",
+ "futures-core",
+ "http 0.2.11",
+ "http-body",
"itoa",
"num-integer",
+ "pin-project-lite",
+ "pin-utils",
"ryu",
"serde",
"time",
@@ -1101,25 +1077,24 @@ dependencies = [
[[package]]
name = "aws-smithy-xml"
-version = "0.56.1"
+version = "0.60.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e01d2dedcdd8023043716cfeeb3c6c59f2d447fce365d8e194838891794b23b6"
+checksum = "8604a11b25e9ecaf32f9aa56b9fe253c5e2f606a3477f0071e96d3155a5ed218"
dependencies = [
"xmlparser",
]
[[package]]
name = "aws-types"
-version = "0.56.1"
+version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85aa0451bf8af1bf22a4f028d5d28054507a14be43cb8ac0597a8471fba9edfe"
+checksum = "789bbe008e65636fe1b6dbbb374c40c8960d1232b96af5ff4aec349f9c4accf4"
dependencies = [
"aws-credential-types",
"aws-smithy-async",
- "aws-smithy-client",
- "aws-smithy-http",
+ "aws-smithy-runtime-api",
"aws-smithy-types",
- "http",
+ "http 0.2.11",
"rustc_version 0.4.0",
"tracing",
]
@@ -1132,12 +1107,12 @@ checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf"
dependencies = [
"async-trait",
"axum-core",
- "base64 0.21.5",
+ "base64 0.21.7",
"bitflags 1.3.2",
"bytes",
"futures-util",
"headers",
- "http",
+ "http 0.2.11",
"http-body",
"hyper",
"itoa",
@@ -1169,7 +1144,7 @@ dependencies = [
"async-trait",
"bytes",
"futures-util",
- "http",
+ "http 0.2.11",
"http-body",
"mime",
"rustversion",
@@ -1212,9 +1187,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
[[package]]
name = "base64"
-version = "0.21.5"
+version = "0.21.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9"
+checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
[[package]]
name = "base64-simd"
@@ -1351,10 +1326,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3907aac66c65520545ae3cb3c195306e20d5ed5c90bfbb992e061cf12a104d0"
dependencies = [
"lazy_static",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"str_inflector",
- "syn 2.0.32",
+ "syn 2.0.48",
"thiserror",
"try_match",
]
@@ -1399,7 +1374,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -1525,9 +1500,9 @@ dependencies = [
[[package]]
name = "clap"
-version = "4.4.11"
+version = "4.4.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2"
+checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c"
dependencies = [
"clap_builder",
"clap_derive",
@@ -1535,9 +1510,9 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.4.11"
+version = "4.4.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb"
+checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7"
dependencies = [
"anstream",
"anstyle",
@@ -1552,9 +1527,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442"
dependencies = [
"heck 0.4.1",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -1656,15 +1631,15 @@ dependencies = [
[[package]]
name = "console"
-version = "0.15.7"
+version = "0.15.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8"
+checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb"
dependencies = [
"encode_unicode",
"lazy_static",
"libc",
"unicode-width",
- "windows-sys 0.45.0",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -1739,7 +1714,7 @@ dependencies = [
"anyhow",
"apollo-router",
"async-trait",
- "http",
+ "http 0.2.11",
"tower",
"tracing",
]
@@ -1762,7 +1737,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -1832,7 +1807,7 @@ dependencies = [
"anes",
"cast",
"ciborium",
- "clap 4.4.11",
+ "clap 4.4.18",
"criterion-plot",
"futures",
"is-terminal",
@@ -1951,7 +1926,7 @@ version = "0.1.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096"
dependencies = [
- "quote 1.0.33",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -1999,9 +1974,9 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -2070,8 +2045,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c65c2ffdafc1564565200967edc4851c7b55422d3913466688907efd05ea26f"
dependencies = [
"deno-proc-macro-rules-macros",
- "proc-macro2 1.0.66",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "syn 2.0.48",
]
[[package]]
@@ -2081,9 +2056,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3047b312b7451e3190865713a4dd6e1f821aed614ada219766ebc3024a690435"
dependencies = [
"once_cell",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -2168,13 +2143,13 @@ dependencies = [
"once_cell",
"pmutil",
"proc-macro-crate",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"regex",
"strum",
"strum_macros",
"syn 1.0.109",
- "syn 2.0.32",
+ "syn 2.0.48",
"thiserror",
]
@@ -2253,8 +2228,8 @@ version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -2264,9 +2239,9 @@ version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -2276,8 +2251,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321"
dependencies = [
"convert_case",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"rustc_version 0.4.0",
"syn 1.0.109",
]
@@ -2378,9 +2353,9 @@ version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -2518,16 +2493,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a"
dependencies = [
"heck 0.4.1",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
name = "env_logger"
-version = "0.10.1"
+version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece"
+checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580"
dependencies = [
"humantime",
"is-terminal",
@@ -2563,23 +2538,12 @@ dependencies = [
[[package]]
name = "errno"
-version = "0.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b30f669a7961ef1631673d2766cc92f52d64f7ef354d4fe0ddfd30ed52f0f4f"
-dependencies = [
- "errno-dragonfly",
- "libc",
- "windows-sys 0.48.0",
-]
-
-[[package]]
-name = "errno-dragonfly"
-version = "0.1.2"
+version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
+checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
dependencies = [
- "cc",
"libc",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -2628,7 +2592,7 @@ dependencies = [
"apollo-router",
"async-trait",
"futures",
- "http",
+ "http 0.2.11",
"hyper",
"multimap 0.9.1",
"schemars",
@@ -2741,7 +2705,7 @@ dependencies = [
"anyhow",
"apollo-router",
"async-trait",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -2754,7 +2718,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -2776,9 +2740,9 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -2814,9 +2778,9 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa"
[[package]]
name = "fred"
-version = "7.1.0"
+version = "7.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9282e65613822eea90c99872c51afa1de61542215cb11f91456a93f50a5a131a"
+checksum = "b99c2b48934cd02a81032dd7428b7ae831a27794275bc94eba367418db8a9e55"
dependencies = [
"arc-swap",
"async-trait",
@@ -2832,7 +2796,7 @@ dependencies = [
"rustls",
"rustls-native-certs",
"rustls-webpki",
- "semver 1.0.18",
+ "semver 1.0.21",
"socket2 0.5.5",
"tokio",
"tokio-rustls",
@@ -2863,9 +2827,9 @@ dependencies = [
[[package]]
name = "futures"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335"
+checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0"
dependencies = [
"futures-channel",
"futures-core",
@@ -2878,9 +2842,9 @@ dependencies = [
[[package]]
name = "futures-channel"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb"
+checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78"
dependencies = [
"futures-core",
"futures-sink",
@@ -2888,15 +2852,15 @@ dependencies = [
[[package]]
name = "futures-core"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c"
+checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
[[package]]
name = "futures-executor"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc"
+checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d"
dependencies = [
"futures-core",
"futures-task",
@@ -2906,9 +2870,9 @@ dependencies = [
[[package]]
name = "futures-io"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa"
+checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
[[package]]
name = "futures-lite"
@@ -2942,32 +2906,32 @@ dependencies = [
[[package]]
name = "futures-macro"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb"
+checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
name = "futures-sink"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817"
+checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5"
[[package]]
name = "futures-task"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2"
+checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
[[package]]
name = "futures-test"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73ad78d6c79a3c76f8bc7496240d0586e069ed6797824fdd8c41d7c42b145b8d"
+checksum = "ce388237b32ac42eca0df1ba55ed3bbda4eaf005d7d4b5dbc0b20ab962928ac9"
dependencies = [
"futures-core",
"futures-executor",
@@ -2988,9 +2952,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c"
[[package]]
name = "futures-util"
-version = "0.3.29"
+version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104"
+checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
dependencies = [
"futures-channel",
"futures-core",
@@ -3064,9 +3028,9 @@ version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba330b70a5341d3bc730b8e205aaee97ddab5d9c448c4f51a7c2d924266fa8f9"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -3155,8 +3119,8 @@ dependencies = [
"graphql-parser",
"heck 0.4.1",
"lazy_static",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"serde",
"serde_json",
"syn 1.0.109",
@@ -3169,7 +3133,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00bda454f3d313f909298f626115092d348bc231025699f557b27e248475f48c"
dependencies = [
"graphql_client_codegen",
- "proc-macro2 1.0.66",
+ "proc-macro2 1.0.76",
"syn 1.0.109",
]
@@ -3206,7 +3170,7 @@ dependencies = [
"futures-core",
"futures-sink",
"futures-util",
- "http",
+ "http 0.2.11",
"indexmap 2.1.0",
"slab",
"tokio",
@@ -3273,7 +3237,7 @@ dependencies = [
"bitflags 1.3.2",
"bytes",
"headers-core",
- "http",
+ "http 0.2.11",
"httpdate",
"mime",
"sha1",
@@ -3285,7 +3249,7 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429"
dependencies = [
- "http",
+ "http 0.2.11",
]
[[package]]
@@ -3362,11 +3326,11 @@ dependencies = [
[[package]]
name = "home"
-version = "0.5.5"
+version = "0.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb"
+checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5"
dependencies = [
- "windows-sys 0.48.0",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -3391,6 +3355,17 @@ dependencies = [
"itoa",
]
+[[package]]
+name = "http"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea"
+dependencies = [
+ "bytes",
+ "fnv",
+ "itoa",
+]
+
[[package]]
name = "http-body"
version = "0.4.6"
@@ -3398,7 +3373,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
dependencies = [
"bytes",
- "http",
+ "http 0.2.11",
"pin-project-lite",
]
@@ -3414,7 +3389,7 @@ version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f560b665ad9f1572cfcaf034f7fb84338a7ce945216d64a90fd81f046a3caee"
dependencies = [
- "http",
+ "http 0.2.11",
"serde",
]
@@ -3428,7 +3403,7 @@ dependencies = [
"async-channel 1.9.0",
"base64 0.13.1",
"futures-lite 1.13.0",
- "http",
+ "http 0.2.11",
"infer",
"pin-project-lite",
"rand 0.7.3",
@@ -3487,13 +3462,13 @@ dependencies = [
"futures-core",
"futures-util",
"h2",
- "http",
+ "http 0.2.11",
"http-body",
"httparse",
"httpdate",
"itoa",
"pin-project-lite",
- "socket2 0.4.9",
+ "socket2 0.5.5",
"tokio",
"tower-service",
"tracing",
@@ -3507,7 +3482,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
dependencies = [
"futures-util",
- "http",
+ "http 0.2.11",
"hyper",
"log",
"rustls",
@@ -3581,7 +3556,7 @@ version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7baab56125e25686df467fe470785512329883aab42696d661247aca2a2896e4"
dependencies = [
- "console 0.15.7",
+ "console 0.15.8",
"lazy_static",
"number_prefix",
"regex",
@@ -3629,7 +3604,7 @@ version = "1.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d64600be34b2fcfc267740a243fa7744441bb4947a619ac4e5bb6507f35fbfc"
dependencies = [
- "console 0.15.7",
+ "console 0.15.8",
"lazy_static",
"linked-hash-map",
"pest",
@@ -3700,7 +3675,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
dependencies = [
"hermit-abi 0.3.2",
- "rustix 0.38.21",
+ "rustix 0.38.30",
"windows-sys 0.48.0",
]
@@ -3796,7 +3771,7 @@ checksum = "2a071f4f7efc9a9118dfb627a0a94ef247986e1ab8606a4c806ae2b3aa3b6978"
dependencies = [
"ahash",
"anyhow",
- "base64 0.21.5",
+ "base64 0.21.7",
"bytecount",
"fancy-regex",
"fraction",
@@ -3822,7 +3797,7 @@ version = "8.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
"pem",
"ring 0.16.20",
"serde",
@@ -3836,7 +3811,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -3888,8 +3863,8 @@ version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8edfc11b8f56ce85e207e62ea21557cfa09bb24a8f6b04ae181b086ff8611c22"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"regex",
"syn 1.0.109",
]
@@ -3905,9 +3880,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.151"
+version = "0.2.152"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4"
+checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7"
[[package]]
name = "libfuzzer-sys"
@@ -3987,22 +3962,22 @@ dependencies = [
[[package]]
name = "linkme"
-version = "0.3.19"
+version = "0.3.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73cd2fa5f00af00e5ed9ea726c496bf0e58cb7c54bf9f14b7e0f80b5d14a3578"
+checksum = "8b53ad6a33de58864705954edb5ad5d571a010f9e296865ed43dc72a5621b430"
dependencies = [
"linkme-impl",
]
[[package]]
name = "linkme-impl"
-version = "0.3.19"
+version = "0.3.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b43a34f4fbf8b3e0e163af8764916780c7c6fac8422183590f877a67036b85e"
+checksum = "04e542a18c94a9b6fcc7adb090fa3ba6b79ee220a16404f325672729f32a66ff"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -4013,9 +3988,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
[[package]]
name = "linux-raw-sys"
-version = "0.4.11"
+version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829"
+checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
[[package]]
name = "lock_api"
@@ -4089,15 +4064,15 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
[[package]]
name = "mediatype"
-version = "0.19.16"
+version = "0.19.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf0bc9784973713e4a90d515a4302991ca125a7c4516951cb607f2298cb757e5"
+checksum = "83a018c36a54f4e12c30464bbc59311f85d3f6f4d6c1b4fa4ea9db2b174ddefc"
[[package]]
name = "memchr"
-version = "2.6.4"
+version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
+checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
[[package]]
name = "memoffset"
@@ -4202,8 +4177,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb"
dependencies = [
"cfg-if",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -4216,7 +4191,7 @@ dependencies = [
"bytes",
"encoding_rs",
"futures-util",
- "http",
+ "http 0.2.11",
"httparse",
"log",
"memchr",
@@ -4436,7 +4411,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -4503,7 +4478,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5f4ecf595095d3b641dd2761a0c3d1f175d3d6c28f38e65418d8004ea3255dd"
dependencies = [
"futures-core",
- "http",
+ "http 0.2.11",
"indexmap 1.9.3",
"itertools 0.10.5",
"once_cell",
@@ -4524,7 +4499,7 @@ checksum = "c7594ec0e11d8e33faf03530a4c49af7064ebba81c1480e01be67d90b356508b"
dependencies = [
"async-trait",
"bytes",
- "http",
+ "http 0.2.11",
"opentelemetry_api",
"reqwest",
]
@@ -4539,7 +4514,7 @@ dependencies = [
"futures-core",
"futures-util",
"headers",
- "http",
+ "http 0.2.11",
"opentelemetry",
"opentelemetry-http",
"opentelemetry-semantic-conventions",
@@ -4556,7 +4531,7 @@ checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275"
dependencies = [
"async-trait",
"futures-core",
- "http",
+ "http 0.2.11",
"opentelemetry-http",
"opentelemetry-proto",
"opentelemetry-semantic-conventions",
@@ -4625,7 +4600,7 @@ checksum = "eb966f01235207a6933c0aec98374fe9782df1c1d2b3d1db35c458451d138143"
dependencies = [
"async-trait",
"futures-core",
- "http",
+ "http 0.2.11",
"once_cell",
"opentelemetry",
"opentelemetry-http",
@@ -4868,9 +4843,9 @@ checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929"
dependencies = [
"pest",
"pest_meta",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -4911,9 +4886,9 @@ version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -5017,9 +4992,9 @@ version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "52a40bc70c2c58040d2d8b167ba9a5ff59fc9dab7ad44771cfde3dcfde7a09c6"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -5047,7 +5022,7 @@ dependencies = [
"cfg-if",
"concurrent-queue",
"pin-project-lite",
- "rustix 0.38.21",
+ "rustix 0.38.30",
"tracing",
"windows-sys 0.52.0",
]
@@ -5112,7 +5087,7 @@ version = "0.1.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86"
dependencies = [
- "proc-macro2 1.0.66",
+ "proc-macro2 1.0.76",
"syn 1.0.109",
]
@@ -5142,8 +5117,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
dependencies = [
"proc-macro-error-attr",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
"version_check",
]
@@ -5154,8 +5129,8 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"version_check",
]
@@ -5176,9 +5151,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.66"
+version = "1.0.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
+checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c"
dependencies = [
"unicode-ident",
]
@@ -5205,7 +5180,7 @@ dependencies = [
"anyhow",
"apollo-router",
"async-trait",
- "http",
+ "http 0.2.11",
"schemars",
"serde",
"serde_json",
@@ -5263,8 +5238,8 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4"
dependencies = [
"anyhow",
"itertools 0.10.5",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -5276,9 +5251,9 @@ checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e"
dependencies = [
"anyhow",
"itertools 0.11.0",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -5336,11 +5311,11 @@ dependencies = [
[[package]]
name = "quote"
-version = "1.0.33"
+version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
+checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
dependencies = [
- "proc-macro2 1.0.66",
+ "proc-macro2 1.0.76",
]
[[package]]
@@ -5490,13 +5465,13 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.10.2"
+version = "1.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343"
+checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15"
dependencies = [
"aho-corasick",
"memchr",
- "regex-automata 0.4.3",
+ "regex-automata 0.4.5",
"regex-syntax 0.8.2",
]
@@ -5511,15 +5486,21 @@ dependencies = [
[[package]]
name = "regex-automata"
-version = "0.4.3"
+version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f"
+checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax 0.8.2",
]
+[[package]]
+name = "regex-lite"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30b661b2f27137bdbc16f00eda72866a92bb28af1753ffbd56744fb6e2e9cd8e"
+
[[package]]
name = "regex-syntax"
version = "0.6.29"
@@ -5538,13 +5519,13 @@ version = "0.11.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
"bytes",
"encoding_rs",
"futures-core",
"futures-util",
"h2",
- "http",
+ "http 0.2.11",
"http-body",
"hyper",
"hyper-rustls",
@@ -5635,7 +5616,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -5647,7 +5628,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -5659,7 +5640,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -5671,7 +5652,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -5683,7 +5664,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"serde_json",
"tokio",
"tower",
@@ -5695,8 +5676,8 @@ version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db74e3fdd29d969a0ec1f8e79171a6f0f71d0429293656901db382d248c4c021"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -5742,9 +5723,9 @@ dependencies = [
[[package]]
name = "router-bridge"
-version = "0.5.14+v2.6.3"
+version = "0.5.16+v2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e9a7b9f24a33123d8cab236ac916c07aeefe9f1ce422b0e7f49fc0019d3ac71"
+checksum = "224f69e11bdc5c16b582f82cd18647e305d31c9c20110635fcdf790c46405777"
dependencies = [
"anyhow",
"async-channel 1.9.0",
@@ -5827,9 +5808,9 @@ dependencies = [
[[package]]
name = "rust-embed"
-version = "8.1.0"
+version = "8.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "810294a8a4a0853d4118e3b94bb079905f2107c7fe979d8f0faae98765eb6378"
+checksum = "a82c0bbc10308ed323529fd3c1dce8badda635aa319a5ff0e6466f33b8101e3f"
dependencies = [
"rust-embed-impl",
"rust-embed-utils",
@@ -5838,22 +5819,22 @@ dependencies = [
[[package]]
name = "rust-embed-impl"
-version = "8.1.0"
+version = "8.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bfc144a1273124a67b8c1d7cd19f5695d1878b31569c0512f6086f0f4676604e"
+checksum = "6227c01b1783cdfee1bcf844eb44594cd16ec71c35305bf1c9fb5aade2735e16"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"rust-embed-utils",
- "syn 2.0.32",
+ "syn 2.0.48",
"walkdir",
]
[[package]]
name = "rust-embed-utils"
-version = "8.1.0"
+version = "8.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "816ccd4875431253d6bb54b804bcff4369cbde9bae33defde25fdf6c2ef91d40"
+checksum = "8cb0a25bfbb2d4b4402179c2cf030387d9990857ce08a32592c6238db9fa8665"
dependencies = [
"sha2",
"walkdir",
@@ -5886,7 +5867,7 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [
- "semver 1.0.18",
+ "semver 1.0.21",
]
[[package]]
@@ -5905,15 +5886,15 @@ dependencies = [
[[package]]
name = "rustix"
-version = "0.38.21"
+version = "0.38.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3"
+checksum = "322394588aaf33c24007e8bb3238ee3e4c5c09c084ab32bc73890b99ff326bca"
dependencies = [
"bitflags 2.4.0",
"errno",
"libc",
- "linux-raw-sys 0.4.11",
- "windows-sys 0.48.0",
+ "linux-raw-sys 0.4.13",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -5946,7 +5927,7 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c"
dependencies = [
- "base64 0.21.5",
+ "base64 0.21.7",
]
[[package]]
@@ -5995,8 +5976,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd3904a4ba0a9d0211816177fd34b04c7095443f8cdacd11175064fe541c8fe2"
dependencies = [
"heck 0.3.3",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -6037,8 +6018,8 @@ version = "0.8.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"serde_derive_internals",
"syn 1.0.109",
]
@@ -6121,9 +6102,9 @@ dependencies = [
[[package]]
name = "semver"
-version = "1.0.18"
+version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918"
+checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0"
[[package]]
name = "semver-parser"
@@ -6133,9 +6114,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "serde"
-version = "1.0.193"
+version = "1.0.195"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89"
+checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02"
dependencies = [
"serde_derive",
]
@@ -6151,13 +6132,13 @@ dependencies = [
[[package]]
name = "serde_derive"
-version = "1.0.193"
+version = "1.0.195"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
+checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -6178,16 +6159,16 @@ version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
]
[[package]]
name = "serde_json"
-version = "1.0.108"
+version = "1.0.111"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b"
+checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4"
dependencies = [
"indexmap 2.1.0",
"itoa",
@@ -6297,9 +6278,9 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -6379,9 +6360,9 @@ dependencies = [
[[package]]
name = "similar"
-version = "2.3.0"
+version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597"
+checksum = "32fea41aca09ee824cc9724996433064c89f7777e60762749a4170a14abbfa21"
[[package]]
name = "simple_asn1"
@@ -6558,8 +6539,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0"
dependencies = [
"heck 0.3.3",
"proc-macro-error",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -6579,10 +6560,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0"
dependencies = [
"heck 0.4.1",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"rustversion",
- "syn 2.0.32",
+ "syn 2.0.48",
]
[[package]]
@@ -6620,19 +6601,19 @@ version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"unicode-ident",
]
[[package]]
name = "syn"
-version = "2.0.32"
+version = "2.0.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2"
+checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"unicode-ident",
]
@@ -6675,15 +6656,15 @@ dependencies = [
[[package]]
name = "tempfile"
-version = "3.8.1"
+version = "3.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5"
+checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa"
dependencies = [
"cfg-if",
"fastrand 2.0.0",
"redox_syscall 0.4.1",
- "rustix 0.38.21",
- "windows-sys 0.48.0",
+ "rustix 0.38.30",
+ "windows-sys 0.52.0",
]
[[package]]
@@ -6746,9 +6727,9 @@ version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ba277e77219e9eea169e8508942db1bf5d8a41ff2db9b20aab5a5aadc9fa25d"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -6778,8 +6759,8 @@ version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f972445f2c781bb6d47ee4a715db3a0e404a79d977f751fd4eb2b0d44c6eb9d"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -6800,22 +6781,22 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "1.0.51"
+version = "1.0.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7"
+checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.51"
+version = "1.0.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df"
+checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -6862,7 +6843,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"apollo-router",
- "http",
+ "http 0.2.11",
"hyper",
"serde_json",
"tokio",
@@ -6954,9 +6935,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.35.0"
+version = "1.35.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "841d45b238a16291a4e1584e61820b8ae57d696cc5015c459c229ccc6990cc1c"
+checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104"
dependencies = [
"backtrace",
"bytes",
@@ -6988,9 +6969,9 @@ version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -7121,13 +7102,13 @@ dependencies = [
"async-stream",
"async-trait",
"axum",
- "base64 0.21.5",
+ "base64 0.21.7",
"bytes",
"flate2",
"futures-core",
"futures-util",
"h2",
- "http",
+ "http 0.2.11",
"http-body",
"hyper",
"hyper-timeout",
@@ -7154,10 +7135,10 @@ dependencies = [
"async-stream",
"async-trait",
"axum",
- "base64 0.21.5",
+ "base64 0.21.7",
"bytes",
"h2",
- "http",
+ "http 0.2.11",
"http-body",
"hyper",
"hyper-timeout",
@@ -7179,9 +7160,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07"
dependencies = [
"prettyplease",
- "proc-macro2 1.0.66",
+ "proc-macro2 1.0.76",
"prost-build",
- "quote 1.0.33",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -7217,7 +7198,7 @@ dependencies = [
"bytes",
"futures-core",
"futures-util",
- "http",
+ "http 0.2.11",
"http-body",
"http-range-header",
"pin-project-lite",
@@ -7273,9 +7254,9 @@ version = "0.1.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -7388,7 +7369,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "258bc1c4f8e2e73a977812ab339d503e6feeb92700f6d07a6de4d321522d5c08"
dependencies = [
"lazy_static",
- "quote 1.0.33",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -7469,9 +7450,9 @@ version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0a91713132798caecb23c977488945566875e7b61b902fb111979871cbff34e"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -7483,7 +7464,7 @@ dependencies = [
"byteorder",
"bytes",
"data-encoding",
- "http",
+ "http 0.2.11",
"httparse",
"log",
"rand 0.8.5",
@@ -7500,8 +7481,8 @@ version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6179333b981641242a768f30f371c9baccbfcc03749627000c500ab88bf4528b"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -7530,8 +7511,8 @@ version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e60147782cc30833c05fba3bab1d9b5771b2685a2557672ac96fa5d154099c0e"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
"syn 1.0.109",
]
@@ -7721,9 +7702,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "uuid"
-version = "1.6.1"
+version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560"
+checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a"
dependencies = [
"getrandom 0.2.10",
"serde",
@@ -7840,9 +7821,9 @@ dependencies = [
"bumpalo",
"log",
"once_cell",
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
"wasm-bindgen-shared",
]
@@ -7864,7 +7845,7 @@ version = "0.2.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
dependencies = [
- "quote 1.0.33",
+ "quote 1.0.35",
"wasm-bindgen-macro-support",
]
@@ -7874,9 +7855,9 @@ version = "0.2.87"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
@@ -7925,7 +7906,7 @@ dependencies = [
"either",
"home",
"once_cell",
- "rustix 0.38.21",
+ "rustix 0.38.30",
]
[[package]]
@@ -8190,7 +8171,7 @@ checksum = "13a3a53eaf34f390dd30d7b1b078287dd05df2aa2e21a589ccb80f5c7253c2e9"
dependencies = [
"assert-json-diff",
"async-trait",
- "base64 0.21.5",
+ "base64 0.21.7",
"deadpool",
"futures",
"futures-timer",
@@ -8258,9 +8239,9 @@ version = "0.7.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
@@ -8278,9 +8259,9 @@ version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
dependencies = [
- "proc-macro2 1.0.66",
- "quote 1.0.33",
- "syn 2.0.32",
+ "proc-macro2 1.0.76",
+ "quote 1.0.35",
+ "syn 2.0.48",
]
[[package]]
diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml
index affbcf5781..e3a20d4e62 100644
--- a/apollo-router-benchmarks/Cargo.toml
+++ b/apollo-router-benchmarks/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "apollo-router-benchmarks"
-version = "1.38.0"
+version = "1.39.0"
authors = ["Apollo Graph, Inc. "]
edition = "2021"
license = "Elastic-2.0"
diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml
index 480ed45d65..5622ca124d 100644
--- a/apollo-router-scaffold/Cargo.toml
+++ b/apollo-router-scaffold/Cargo.toml
@@ -1,18 +1,18 @@
[package]
name = "apollo-router-scaffold"
-version = "1.38.0"
+version = "1.39.0"
authors = ["Apollo Graph, Inc. "]
edition = "2021"
license = "Elastic-2.0"
publish = false
[dependencies]
-anyhow = "1.0.75"
-clap = { version = "4.4.11", features = ["derive"] }
+anyhow = "1.0.79"
+clap = { version = "4.4.18", features = ["derive"] }
cargo-scaffold = { version = "0.8.14", default-features = false }
regex = "1"
str_inflector = "0.12.0"
toml = "0.5.11"
[dev-dependencies]
-tempfile = "3.8.1"
+tempfile = "3.9.0"
copy_dir = "0.1.3"
diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml
index d14ea1662c..8f237b8cc0 100644
--- a/apollo-router-scaffold/templates/base/Cargo.toml
+++ b/apollo-router-scaffold/templates/base/Cargo.toml
@@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" }
apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
{{else}}
# Note if you update these dependencies then also update xtask/Cargo.toml
-apollo-router = "1.38.0"
+apollo-router = "1.39.0"
{{/if}}
{{/if}}
async-trait = "0.1.52"
diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml
index 619577525a..a0adb00fdb 100644
--- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml
+++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml
@@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" }
{{#if branch}}
apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
{{else}}
-apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.38.0" }
+apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.39.0" }
{{/if}}
{{/if}}
anyhow = "1.0.58"
diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml
index b52b0e1f8c..befb57e8ee 100644
--- a/apollo-router/Cargo.toml
+++ b/apollo-router/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "apollo-router"
-version = "1.38.0"
+version = "1.39.0"
authors = ["Apollo Graph, Inc. "]
repository = "https://github.com/apollographql/router/"
documentation = "https://docs.rs/apollo-router"
@@ -63,23 +63,23 @@ features = ["docs_rs"]
[dependencies]
askama = "0.12.1"
access-json = "0.1.0"
-anyhow = "1.0.75"
+anyhow = "1.0.79"
apollo-compiler = "=1.0.0-beta.10"
apollo-federation = "0.0.5"
arc-swap = "1.6.0"
-async-compression = { version = "0.4.5", features = [
+async-compression = { version = "0.4.6", features = [
"tokio",
"brotli",
"gzip",
"deflate",
] }
-async-trait = "0.1.74"
+async-trait = "0.1.77"
axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] }
-base64 = "0.21.5"
+base64 = "0.21.7"
bloomfilter = "1.0.13"
buildstructor = "0.5.4"
bytes = "1.5.0"
-clap = { version = "4.4.11", default-features = false, features = [
+clap = { version = "4.4.18", default-features = false, features = [
"env",
"derive",
"std",
@@ -98,8 +98,8 @@ diff = "0.1.13"
directories = "5.0.1"
displaydoc = "0.2"
flate2 = "1.0.28"
-fred = { version = "7.1.0", features = ["enable-rustls"] }
-futures = { version = "0.3.29", features = ["thread-pool"] }
+fred = { version = "7.1.2", features = ["enable-rustls"] }
+futures = { version = "0.3.30", features = ["thread-pool"] }
graphql_client = "0.13.0"
hex = { version = "0.4.3", features = ["serde"] }
http = "0.2.11"
@@ -116,11 +116,11 @@ jsonpath-rust = "0.3.5"
jsonschema = { version = "0.17.1", default-features = false }
jsonwebtoken = "8.3.0"
lazy_static = "1.4.0"
-libc = "0.2.151"
-linkme = "0.3.19"
+libc = "0.2.152"
+linkme = "0.3.22"
lru = "0.12.1"
maplit = "1.0.2"
-mediatype = "0.19.16"
+mediatype = "0.19.17"
mockall = "0.11.4"
mime = "0.3.17"
multer = "2.1.0"
@@ -176,7 +176,7 @@ prost-types = "0.12.3"
proteus = "0.5.0"
rand = "0.8.5"
rhai = { version = "1.16.3", features = ["sync", "serde", "internals"] }
-regex = "1.10.2"
+regex = "1.10.3"
reqwest = { version = "0.11.23", default-features = false, features = [
"rustls-tls",
"rustls-native-certs",
@@ -184,17 +184,18 @@ reqwest = { version = "0.11.23", default-features = false, features = [
"stream",
] }
# note: this dependency should _always_ be pinned, prefix the version with an `=`
-router-bridge = "=0.5.14+v2.6.3"
-rust-embed = "8.1.0"
+router-bridge = "=0.5.16+v2.7.1"
+rust-embed = "8.2.0"
rustls = "0.21.10"
rustls-pemfile = "1.0.4"
schemars = { version = "0.8.16", features = ["url"] }
shellexpand = "3.1.0"
sha2 = "0.10.8"
-serde = { version = "1.0.193", features = ["derive", "rc"] }
+semver = "1.0.21"
+serde = { version = "1.0.195", features = ["derive", "rc"] }
serde_derive_default = "0.1"
serde_json_bytes = { version = "0.2.2", features = ["preserve_order"] }
-serde_json = { version = "1.0.108", features = [
+serde_json = { version = "1.0.111", features = [
"preserve_order",
"float_roundtrip",
] }
@@ -203,8 +204,8 @@ serde_yaml = "0.8.26"
static_assertions = "1.1.0"
strum_macros = "0.25.3"
sys-info = "0.9.1"
-thiserror = "1.0.51"
-tokio = { version = "1.35.0", features = ["full"] }
+thiserror = "1.0.56"
+tokio = { version = "1.35.1", features = ["full"] }
tokio-stream = { version = "0.1.14", features = ["sync", "net"] }
tokio-util = { version = "0.7.10", features = ["net", "codec", "time"] }
tonic = { version = "0.9.2", features = [
@@ -235,7 +236,7 @@ tracing-subscriber = { version = "0.3.18", features = ["env-filter", "json"] }
trust-dns-resolver = "0.23.2"
url = { version = "2.5.0", features = ["serde"] }
urlencoding = "2.1.3"
-uuid = { version = "1.6.1", features = ["serde", "v4"] }
+uuid = { version = "1.7.0", features = ["serde", "v4"] }
yaml-rust = "0.4.5"
wiremock = "0.5.22"
wsl = "0.1.0"
@@ -246,20 +247,21 @@ tokio-rustls = "0.24.1"
http-serde = "1.1.3"
hmac = "0.12.1"
parking_lot = "0.12.1"
-memchr = "2.6.4"
+memchr = "2.7.1"
brotli = "3.4.0"
zstd = "0.13.0"
zstd-safe = "7.0.0"
# note: AWS dependencies should always use the same version
-aws-sigv4 = "0.56.1"
-aws-credential-types = "0.56.1"
-aws-config = "0.56.1"
-aws-types = "0.56.1"
+aws-sigv4 = "1.1.4"
+aws-credential-types = "1.1.4"
+aws-config = "1.1.4"
+aws-types = "1.1.4"
+aws-smithy-runtime-api = { version = "1.1.4", features = ["client"] }
sha1 = "0.10.6"
tracing-serde = "0.1.3"
time = { version = "0.3.31", features = ["serde"] }
-similar = { version = "2.3.0", features = ["inline"] }
-console = "0.15.7"
+similar = { version = "2.4.0", features = ["inline"] }
+console = "0.15.8"
[target.'cfg(macos)'.dependencies]
uname = "0.1.1"
@@ -278,10 +280,11 @@ axum = { version = "0.6.20", features = [
"ws",
] }
ecdsa = { version = "0.16.9", features = ["signing", "pem", "pkcs8"] }
-futures-test = "0.3.29"
+fred = { version = "7.1.0", features = ["enable-rustls", "mocks"] }
+futures-test = "0.3.30"
insta = { version = "1.34.0", features = ["json", "redactions", "yaml"] }
maplit = "1.0.2"
-memchr = { version = "2.6.4", default-features = false }
+memchr = { version = "2.7.1", default-features = false }
mockall = "0.11.4"
num-traits = "0.2.17"
once_cell = "1.19.0"
@@ -300,7 +303,7 @@ rhai = { version = "1.16.3", features = [
"testing-environ",
] }
serial_test = { version = "2.0.0" }
-tempfile = "3.8.1"
+tempfile = "3.9.0"
test-log = { version = "0.2.14", default-features = false, features = [
"trace",
] }
diff --git a/apollo-router/src/axum_factory/axum_http_server_factory.rs b/apollo-router/src/axum_factory/axum_http_server_factory.rs
index 0c0dadf33f..1ba7d70669 100644
--- a/apollo-router/src/axum_factory/axum_http_server_factory.rs
+++ b/apollo-router/src/axum_factory/axum_http_server_factory.rs
@@ -42,6 +42,7 @@ use super::listeners::ListenersAndRouters;
use super::utils::decompress_request_body;
use super::utils::PropagatingMakeSpan;
use super::ListenAddrAndRouter;
+use super::ENDPOINT_CALLBACK;
use crate::axum_factory::compression::Compressor;
use crate::axum_factory::listeners::get_extra_listeners;
use crate::axum_factory::listeners::serve_router_on_listen_addr;
@@ -418,7 +419,7 @@ where
})?;
let span_mode = span_mode(configuration);
- let main_route = main_router::(configuration)
+ let mut main_route = main_router::(configuration)
.layer(middleware::from_fn(decompress_request_body))
.layer(middleware::from_fn_with_state(
(license, Instant::now(), Arc::new(AtomicU64::new(0))),
@@ -433,9 +434,20 @@ where
)
.layer(middleware::from_fn(metrics_handler));
+ if let Some(main_endpoint_layer) = ENDPOINT_CALLBACK.get() {
+ main_route = main_endpoint_layer(main_route);
+ }
+
let route = endpoints_on_main_listener
.into_iter()
- .fold(main_route, |acc, r| acc.merge(r.into_router()));
+ .fold(main_route, |acc, r| {
+ let mut router = r.into_router();
+ if let Some(main_endpoint_layer) = ENDPOINT_CALLBACK.get() {
+ router = main_endpoint_layer(router);
+ }
+
+ acc.merge(router)
+ });
let listener = configuration.supergraph.listen.clone();
Ok(ListenAddrAndRouter(listener, route))
diff --git a/apollo-router/src/axum_factory/listeners.rs b/apollo-router/src/axum_factory/listeners.rs
index 4b4ec9ff31..cfa1cf2362 100644
--- a/apollo-router/src/axum_factory/listeners.rs
+++ b/apollo-router/src/axum_factory/listeners.rs
@@ -23,6 +23,7 @@ use tower_service::Service;
use crate::axum_factory::utils::ConnectionInfo;
use crate::axum_factory::utils::InjectConnectionInfo;
+use crate::axum_factory::ENDPOINT_CALLBACK;
use crate::configuration::Configuration;
use crate::http_server_factory::Listener;
use crate::http_server_factory::NetworkStream;
@@ -90,7 +91,15 @@ pub(super) fn extra_endpoints(
mm.extend(endpoints.into_iter().map(|(listen_addr, e)| {
(
listen_addr,
- e.into_iter().map(|e| e.into_router()).collect::>(),
+ e.into_iter()
+ .map(|e| {
+ let mut router = e.into_router();
+ if let Some(main_endpoint_layer) = ENDPOINT_CALLBACK.get() {
+ router = main_endpoint_layer(router);
+ }
+ router
+ })
+ .collect::>(),
)
}));
mm
diff --git a/apollo-router/src/axum_factory/mod.rs b/apollo-router/src/axum_factory/mod.rs
index 23a32629e8..be264da75c 100644
--- a/apollo-router/src/axum_factory/mod.rs
+++ b/apollo-router/src/axum_factory/mod.rs
@@ -6,6 +6,23 @@ mod listeners;
pub(crate) mod tests;
pub(crate) mod utils;
+use std::sync::Arc;
+use std::sync::OnceLock;
+
+use axum::Router;
pub(crate) use axum_http_server_factory::span_mode;
pub(crate) use axum_http_server_factory::AxumHttpServerFactory;
pub(crate) use listeners::ListenAddrAndRouter;
+
+static ENDPOINT_CALLBACK: OnceLock Router + Send + Sync>> = OnceLock::new();
+
+/// Set a callback that may wrap or mutate `axum::Router` as they are added to the main router.
+/// Although part of the public API, this is not intended for use by end users, and may change at any time.
+#[doc(hidden)]
+pub fn unsupported_set_axum_router_callback(
+ callback: impl Fn(Router) -> Router + Send + Sync + 'static,
+) -> axum::response::Result<(), &'static str> {
+ ENDPOINT_CALLBACK
+ .set(Arc::new(callback))
+ .map_err(|_| "endpoint decorator was already set")
+}
diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs
index dcf5f49968..9e0b5cf4c9 100644
--- a/apollo-router/src/axum_factory/tests.rs
+++ b/apollo-router/src/axum_factory/tests.rs
@@ -386,7 +386,7 @@ async fn it_displays_sandbox() {
"{}",
response.text().await.unwrap()
);
- assert_eq!(response.text().await.unwrap(), sandbox_page_content());
+ assert_eq!(response.bytes().await.unwrap(), sandbox_page_content());
}
#[tokio::test]
@@ -432,7 +432,7 @@ async fn it_displays_sandbox_with_different_supergraph_path() {
"{}",
response.text().await.unwrap()
);
- assert_eq!(response.text().await.unwrap(), sandbox_page_content());
+ assert_eq!(response.bytes().await.unwrap(), sandbox_page_content());
}
#[tokio::test]
@@ -1201,8 +1201,8 @@ async fn it_displays_homepage() {
assert_eq!(response.status(), StatusCode::OK);
assert_eq!(
- response.text().await.unwrap(),
- home_page_content(Homepage::fake_builder().enabled(false).build())
+ response.bytes().await.unwrap(),
+ home_page_content(&Homepage::fake_builder().enabled(false).build())
);
server.shutdown().await.unwrap();
}
@@ -2308,7 +2308,7 @@ async fn test_supergraph_timeout() {
// we do the entire supergraph rebuilding instead of using `from_supergraph_mock_callback_and_configuration`
// because we need the plugins to apply on the supergraph
- let plugins = create_plugins(&conf, &schema, None).await.unwrap();
+ let plugins = create_plugins(&conf, &schema, None, None).await.unwrap();
let mut builder = PluggableSupergraphServiceBuilder::new(planner)
.with_configuration(conf.clone())
diff --git a/apollo-router/src/cache/redis.rs b/apollo-router/src/cache/redis.rs
index 809e17d94a..31429810c0 100644
--- a/apollo-router/src/cache/redis.rs
+++ b/apollo-router/src/cache/redis.rs
@@ -3,6 +3,8 @@ use std::sync::Arc;
use std::time::Duration;
use fred::interfaces::EventInterface;
+#[cfg(test)]
+use fred::mocks::Mocks;
use fred::prelude::ClientLike;
use fred::prelude::KeysInterface;
use fred::prelude::RedisClient;
@@ -198,6 +200,54 @@ impl RedisCacheStorage {
})
}
+ #[cfg(test)]
+ pub(crate) async fn from_mocks(mocks: Arc) -> Result {
+ let client_config = RedisConfig {
+ mocks: Some(mocks),
+ ..Default::default()
+ };
+
+ let client = RedisClient::new(
+ client_config,
+ Some(PerformanceConfig {
+ default_command_timeout: Duration::from_millis(2),
+ ..Default::default()
+ }),
+ None,
+ Some(ReconnectPolicy::new_exponential(0, 1, 2000, 5)),
+ );
+ let _handle = client.connect();
+
+ // spawn tasks that listen for connection close or reconnect events
+ let mut error_rx = client.error_rx();
+ let mut reconnect_rx = client.reconnect_rx();
+
+ tokio::spawn(async move {
+ while let Ok(error) = error_rx.recv().await {
+ tracing::error!("Client disconnected with error: {:?}", error);
+ }
+ });
+ tokio::spawn(async move {
+ while reconnect_rx.recv().await.is_ok() {
+ tracing::info!("Redis client reconnected.");
+ }
+ });
+
+ // a TLS connection to a TCP Redis could hang, so we add a timeout
+ tokio::time::timeout(Duration::from_secs(5), client.wait_for_connect())
+ .await
+ .map_err(|_| {
+ RedisError::new(RedisErrorKind::Timeout, "timeout connecting to Redis")
+ })??;
+
+ tracing::trace!("redis connection established");
+ Ok(Self {
+ inner: Arc::new(client),
+ ttl: None,
+ namespace: None,
+ })
+ }
+
pub(crate) fn ttl(&self) -> Option {
self.ttl
}
diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap
index 170b2752cf..e3d79b384f 100644
--- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap
+++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap
@@ -1,5 +1,6 @@
---
source: apollo-router/src/configuration/tests.rs
+assertion_line: 31
expression: "&schema"
---
{
@@ -663,6 +664,113 @@ expression: "&schema"
"url"
],
"properties": {
+ "execution": {
+ "description": "The execution stage request/response configuration",
+ "default": {
+ "request": {
+ "headers": false,
+ "context": false,
+ "body": false,
+ "sdl": false,
+ "method": false,
+ "query_plan": false
+ },
+ "response": {
+ "headers": false,
+ "context": false,
+ "body": false,
+ "sdl": false,
+ "status_code": false
+ }
+ },
+ "type": "object",
+ "properties": {
+ "request": {
+ "description": "The request configuration",
+ "default": {
+ "headers": false,
+ "context": false,
+ "body": false,
+ "sdl": false,
+ "method": false,
+ "query_plan": false
+ },
+ "type": "object",
+ "properties": {
+ "body": {
+ "description": "Send the body",
+ "default": false,
+ "type": "boolean"
+ },
+ "context": {
+ "description": "Send the context",
+ "default": false,
+ "type": "boolean"
+ },
+ "headers": {
+ "description": "Send the headers",
+ "default": false,
+ "type": "boolean"
+ },
+ "method": {
+ "description": "Send the method",
+ "default": false,
+ "type": "boolean"
+ },
+ "query_plan": {
+ "description": "Send the query plan",
+ "default": false,
+ "type": "boolean"
+ },
+ "sdl": {
+ "description": "Send the SDL",
+ "default": false,
+ "type": "boolean"
+ }
+ },
+ "additionalProperties": false
+ },
+ "response": {
+ "description": "What information is passed to a router request/response stage",
+ "default": {
+ "headers": false,
+ "context": false,
+ "body": false,
+ "sdl": false,
+ "status_code": false
+ },
+ "type": "object",
+ "properties": {
+ "body": {
+ "description": "Send the body",
+ "default": false,
+ "type": "boolean"
+ },
+ "context": {
+ "description": "Send the context",
+ "default": false,
+ "type": "boolean"
+ },
+ "headers": {
+ "description": "Send the headers",
+ "default": false,
+ "type": "boolean"
+ },
+ "sdl": {
+ "description": "Send the SDL",
+ "default": false,
+ "type": "boolean"
+ },
+ "status_code": {
+ "description": "Send the HTTP status",
+ "default": false,
+ "type": "boolean"
+ }
+ },
+ "additionalProperties": false
+ }
+ }
+ },
"router": {
"description": "The router stage request/response configuration",
"default": {
@@ -1215,6 +1323,28 @@ expression: "&schema"
"type": "boolean",
"nullable": true
},
+ "metrics": {
+ "description": "Entity caching evaluation metrics",
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "description": "enables metrics evaluating the benefits of entity caching",
+ "default": false,
+ "type": "boolean"
+ },
+ "separate_per_type": {
+ "description": "Adds the entity type name to attributes. This can greatly increase the cardinality",
+ "default": false,
+ "type": "boolean"
+ },
+ "ttl": {
+ "description": "Metrics counter TTL",
+ "type": "string",
+ "nullable": true
+ }
+ },
+ "additionalProperties": false
+ },
"redis": {
"description": "Redis cache configuration",
"type": "object",
@@ -2035,6 +2165,10 @@ expression: "&schema"
},
"additionalProperties": false
},
+ "progressive_override": {
+ "description": "Configuration for the progressive override plugin",
+ "type": "object"
+ },
"rhai": {
"description": "Configuration for the Rhai Plugin",
"type": "object",
@@ -3046,6 +3180,33 @@ expression: "&schema"
]
}
]
+ },
+ "rate_limit": {
+ "description": "Log rate limiting. The limit is set per type of log message",
+ "type": "object",
+ "properties": {
+ "capacity": {
+ "description": "Number of log lines allowed in interval per message",
+ "default": 1,
+ "type": "integer",
+ "format": "uint32",
+ "minimum": 0.0
+ },
+ "enabled": {
+ "description": "Set to true to limit the rate of log messages",
+ "default": false,
+ "type": "boolean"
+ },
+ "interval": {
+ "description": "Interval for rate limiting",
+ "default": {
+ "secs": 1,
+ "nanos": 0
+ },
+ "type": "string"
+ }
+ },
+ "additionalProperties": false
}
},
"additionalProperties": false
@@ -5024,23 +5185,6 @@ expression: "&schema"
"format": "double"
}
},
- "experimental_cache_metrics": {
- "description": "Experimental metrics to know more about caching strategies",
- "type": "object",
- "properties": {
- "enabled": {
- "description": "Enable experimental metrics",
- "default": false,
- "type": "boolean"
- },
- "ttl": {
- "description": "Potential TTL for a cache if we had one (default: 5secs)",
- "default": "5s",
- "type": "string"
- }
- },
- "additionalProperties": false
- },
"resource": {
"description": "The Open Telemetry resource",
"default": {},
@@ -5532,6 +5676,25 @@ expression: "&schema"
"default": false,
"type": "boolean"
},
+ "format": {
+ "description": "Format of the trace ID in response headers",
+ "oneOf": [
+ {
+ "description": "Format the Trace ID as a hexadecimal number\n\n(e.g. Trace ID 16 -> 00000000000000000000000000000010)",
+ "type": "string",
+ "enum": [
+ "hexadecimal"
+ ]
+ },
+ {
+ "description": "Format the Trace ID as a decimal number\n\n(e.g. Trace ID 16 -> 16)",
+ "type": "string",
+ "enum": [
+ "decimal"
+ ]
+ }
+ ]
+ },
"header_name": {
"description": "Choose the header name to expose trace_id (default: apollo-trace-id)",
"type": "string",
@@ -6022,6 +6185,12 @@ expression: "&schema"
"description": "Custom attributes that are attached to the router span.",
"type": "object",
"properties": {
+ "baggage": {
+ "description": "All key values from trace baggage.",
+ "default": null,
+ "type": "boolean",
+ "nullable": true
+ },
"dd.trace_id": {
"description": "The datadog trace ID. This can be output in logs and used to correlate traces in Datadog.",
"default": null,
@@ -6525,6 +6694,9 @@ expression: "&schema"
}
},
"additionalProperties": false
+ },
+ {
+ "type": "string"
}
]
}
@@ -7258,6 +7430,9 @@ expression: "&schema"
}
},
"additionalProperties": false
+ },
+ {
+ "type": "string"
}
]
}
@@ -7723,6 +7898,9 @@ expression: "&schema"
}
},
"additionalProperties": false
+ },
+ {
+ "type": "string"
}
]
}
@@ -7910,6 +8088,13 @@ expression: "&schema"
"enum": [
"br"
]
+ },
+ {
+ "description": "identity",
+ "type": "string",
+ "enum": [
+ "identity"
+ ]
}
],
"nullable": true
@@ -8079,6 +8264,13 @@ expression: "&schema"
"enum": [
"br"
]
+ },
+ {
+ "description": "identity",
+ "type": "string",
+ "enum": [
+ "identity"
+ ]
}
],
"nullable": true
diff --git a/apollo-router/src/context/extensions.rs b/apollo-router/src/context/extensions/mod.rs
similarity index 87%
rename from apollo-router/src/context/extensions.rs
rename to apollo-router/src/context/extensions/mod.rs
index c868d4f73f..cbee53db86 100644
--- a/apollo-router/src/context/extensions.rs
+++ b/apollo-router/src/context/extensions/mod.rs
@@ -1,3 +1,5 @@
+pub(crate) mod sync;
+
// NOTE: this module is taken from tokio's tracing span's extensions
// which is taken from https://github.com/hyperium/http/blob/master/src/extensions.rs
@@ -37,7 +39,7 @@ impl Hasher for IdHasher {
/// `Extensions` can be used by `Request` and `Response` to store
/// extra data derived from the underlying protocol.
#[derive(Default)]
-pub(crate) struct Extensions {
+pub struct Extensions {
// If extensions are never used, no need to carry around an empty HashMap.
// That's 3 words. Instead, this is only 1 word.
map: Option>,
@@ -55,7 +57,7 @@ impl Extensions {
///
/// If a extension of this type already existed, it will
/// be returned.
- pub(crate) fn insert(&mut self, val: T) -> Option {
+ pub fn insert(&mut self, val: T) -> Option {
self.map
.get_or_insert_with(Box::default)
.insert(TypeId::of::(), Box::new(val))
@@ -68,7 +70,7 @@ impl Extensions {
}
/// Get a reference to a type previously inserted on this `Extensions`.
- pub(crate) fn get(&self) -> Option<&T> {
+ pub fn get(&self) -> Option<&T> {
self.map
.as_ref()
.and_then(|map| map.get(&TypeId::of::()))
@@ -76,14 +78,15 @@ impl Extensions {
}
/// Get a mutable reference to a type previously inserted on this `Extensions`.
- pub(crate) fn get_mut(&mut self) -> Option<&mut T> {
+ pub fn get_mut(&mut self) -> Option<&mut T> {
self.map
.as_mut()
.and_then(|map| map.get_mut(&TypeId::of::()))
.and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut())
}
- pub(crate) fn contains_key(&self) -> bool {
+ /// Returns `true` type has been stored in `Extensions`.
+ pub fn contains_key(&self) -> bool {
self.map
.as_ref()
.map(|map| map.contains_key(&TypeId::of::()))
@@ -93,7 +96,7 @@ impl Extensions {
/// Remove a type from this `Extensions`.
///
/// If a extension of this type existed, it will be returned.
- pub(crate) fn remove(&mut self) -> Option {
+ pub fn remove(&mut self) -> Option {
self.map
.as_mut()
.and_then(|map| map.remove(&TypeId::of::()))
@@ -107,7 +110,7 @@ impl Extensions {
/// Clear the `Extensions` of all inserted extensions.
#[inline]
- pub(crate) fn clear(&mut self) {
+ pub fn clear(&mut self) {
if let Some(ref mut map) = self.map {
map.clear();
}
@@ -115,13 +118,13 @@ impl Extensions {
/// Check whether the extension set is empty or not.
#[inline]
- pub(crate) fn is_empty(&self) -> bool {
+ pub fn is_empty(&self) -> bool {
self.map.as_ref().map_or(true, |map| map.is_empty())
}
/// Get the numer of extensions available.
#[inline]
- pub(crate) fn len(&self) -> usize {
+ pub fn len(&self) -> usize {
self.map.as_ref().map_or(0, |map| map.len())
}
}
diff --git a/apollo-router/src/context/extensions/sync.rs b/apollo-router/src/context/extensions/sync.rs
new file mode 100644
index 0000000000..c5350f9791
--- /dev/null
+++ b/apollo-router/src/context/extensions/sync.rs
@@ -0,0 +1,71 @@
+use std::ops::Deref;
+use std::ops::DerefMut;
+use std::sync::Arc;
+#[cfg(debug_assertions)]
+use std::time::Duration;
+#[cfg(debug_assertions)]
+use std::time::Instant;
+
+/// You can use `Extensions` to pass data between plugins that is not serializable. Such data is not accessible from Rhai or co-processoers.
+///
+/// This can be accessed at any point in the request lifecycle and is useful for passing data between services.
+/// Extensions are thread safe, and must be locked for mutation.
+///
+/// For example:
+/// `context.extensions().lock().insert::(data);`
+#[derive(Default, Clone, Debug)]
+pub struct ExtensionsMutex {
+ extensions: Arc>,
+}
+
+impl ExtensionsMutex {
+ /// Locks the extensions for mutation.
+ ///
+ /// It is CRITICAL to avoid holding on to the mutex guard for too long, particularly across async calls.
+ /// Doing so may cause performance degradation or even deadlocks.
+ ///
+ /// See related clippy lint for examples:
+ pub fn lock(&self) -> ExtensionsGuard {
+ ExtensionsGuard {
+ #[cfg(debug_assertions)]
+ start: Instant::now(),
+ guard: self.extensions.lock(),
+ }
+ }
+}
+
+pub struct ExtensionsGuard<'a> {
+ #[cfg(debug_assertions)]
+ start: Instant,
+ guard: parking_lot::MutexGuard<'a, super::Extensions>,
+}
+
+impl<'a> Deref for ExtensionsGuard<'a> {
+ type Target = super::Extensions;
+
+ fn deref(&self) -> &super::Extensions {
+ &self.guard
+ }
+}
+
+impl DerefMut for ExtensionsGuard<'_> {
+ fn deref_mut(&mut self) -> &mut super::Extensions {
+ &mut self.guard
+ }
+}
+
+#[cfg(debug_assertions)]
+impl Drop for ExtensionsGuard<'_> {
+ fn drop(&mut self) {
+ // In debug builds we check that extensions is never held for too long.
+ // We only check if the current runtime is multi-threaded, because a bunch of unit tests fail the assertion and these need to be investigated separately.
+ if let Ok(runtime) = tokio::runtime::Handle::try_current() {
+ if runtime.runtime_flavor() == tokio::runtime::RuntimeFlavor::MultiThread {
+ let elapsed = self.start.elapsed();
+ if elapsed > Duration::from_millis(10) {
+ panic!("ExtensionsGuard held for {}ms. This is probably a bug that will stall the Router and cause performance problems. Run with `RUST_BACKTRACE=1` environment variable to display a backtrace", elapsed.as_millis());
+ }
+ }
+ }
+ }
+}
diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs
index bd1b7a49e1..568a43671f 100644
--- a/apollo-router/src/context/mod.rs
+++ b/apollo-router/src/context/mod.rs
@@ -11,12 +11,12 @@ use dashmap::mapref::multiple::RefMulti;
use dashmap::mapref::multiple::RefMutMulti;
use dashmap::DashMap;
use derivative::Derivative;
+use extensions::sync::ExtensionsMutex;
use parking_lot::Mutex;
use serde::Deserialize;
use serde::Serialize;
use tower::BoxError;
-use self::extensions::Extensions;
use crate::json_ext::Value;
pub(crate) mod extensions;
@@ -48,7 +48,7 @@ pub struct Context {
entries: Entries,
#[serde(skip)]
- pub(crate) private_entries: Arc>,
+ extensions: ExtensionsMutex,
/// Creation time
#[serde(skip)]
@@ -71,7 +71,7 @@ impl Context {
.to_string();
Context {
entries: Default::default(),
- private_entries: Arc::new(parking_lot::Mutex::new(Extensions::default())),
+ extensions: ExtensionsMutex::default(),
created_at: Instant::now(),
busy_timer: Arc::new(Mutex::new(BusyTimer::new())),
id,
@@ -80,6 +80,18 @@ impl Context {
}
impl Context {
+ /// Returns extensions of the context.
+ ///
+ /// You can use `Extensions` to pass data between plugins that is not serializable. Such data is not accessible from Rhai or co-processoers.
+ ///
+ /// It is CRITICAL to avoid holding on to the mutex guard for too long, particularly across async calls.
+ /// Doing so may cause performance degradation or even deadlocks.
+ ///
+ /// See related clippy lint for examples:
+ pub fn extensions(&self) -> &ExtensionsMutex {
+ &self.extensions
+ }
+
/// Returns true if the context contains a value for the specified key.
pub fn contains_key(&self, key: K) -> bool
where
@@ -373,4 +385,14 @@ mod test {
assert_eq!(c.get("one").unwrap(), Some(2));
assert_eq!(c.get("two").unwrap(), Some(3));
}
+
+ #[test]
+ fn context_extensions() {
+ // This is mostly tested in the extensions module.
+ let c = Context::new();
+ let mut extensions = c.extensions().lock();
+ extensions.insert(1usize);
+ let v = extensions.get::();
+ assert_eq!(v, Some(&1usize));
+ }
}
diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs
index 87ccd4ad5f..9a6ca7aec2 100644
--- a/apollo-router/src/error.rs
+++ b/apollo-router/src/error.rs
@@ -261,7 +261,7 @@ pub(crate) enum QueryPlannerError {
/// Cache resolution failed: {0}
CacheResolverError(Arc),
- /// empty query plan. This often means an unhandled Introspection query was sent. Please file an issue to apollographql/router.
+ /// empty query plan. This behavior is unexpected and we suggest opening an issue to apollographql/router with a reproduction.
EmptyPlan(UsageReporting), // usage_reporting_signature
/// unhandled planner result
diff --git a/apollo-router/src/executable.rs b/apollo-router/src/executable.rs
index 271e55c4f4..0d5675d92e 100644
--- a/apollo-router/src/executable.rs
+++ b/apollo-router/src/executable.rs
@@ -12,6 +12,7 @@ use std::time::Duration;
use anyhow::anyhow;
use anyhow::Result;
+use clap::builder::FalseyValueParser;
use clap::ArgAction;
use clap::Args;
use clap::CommandFactory;
@@ -234,7 +235,7 @@ pub struct Opt {
apollo_uplink_poll_interval: Duration,
/// Disable sending anonymous usage information to Apollo.
- #[clap(long, env = "APOLLO_TELEMETRY_DISABLED")]
+ #[clap(long, env = "APOLLO_TELEMETRY_DISABLED", value_parser = FalseyValueParser::new())]
anonymous_telemetry_disabled: bool,
/// The timeout for an http call to Apollo uplink. Defaults to 30s.
@@ -293,6 +294,10 @@ impl Opt {
})
}
+ pub(crate) fn is_telemetry_disabled(&self) -> bool {
+ self.anonymous_telemetry_disabled
+ }
+
fn parse_endpoints(endpoints: &str) -> std::result::Result {
Ok(Endpoints::fallback(
endpoints
@@ -433,7 +438,15 @@ impl Executable {
}
copy_args_to_env();
- init_telemetry(&opt.log_level)?;
+
+ let apollo_telemetry_initialized = if graph_os() {
+ init_telemetry(&opt.log_level)?;
+ true
+ } else {
+ // Best effort init telemetry
+ init_telemetry(&opt.log_level).is_ok()
+ };
+
setup_panic_handler();
if opt.schema {
@@ -474,12 +487,14 @@ impl Executable {
None => Self::inner_start(shutdown, schema, config, license, opt).await,
};
- // We should be good to shutdown OpenTelemetry now as the router should have finished everything.
- tokio::task::spawn_blocking(move || {
- opentelemetry::global::shutdown_tracer_provider();
- meter_provider().shutdown();
- })
- .await?;
+ if apollo_telemetry_initialized {
+ // We should be good to shutdown OpenTelemetry now as the router should have finished everything.
+ tokio::task::spawn_blocking(move || {
+ opentelemetry::global::shutdown_tracer_provider();
+ meter_provider().shutdown();
+ })
+ .await?;
+ }
result
}
@@ -680,6 +695,10 @@ impl Executable {
}
}
+fn graph_os() -> bool {
+ std::env::var("APOLLO_KEY").is_ok() && std::env::var("APOLLO_GRAPH_REF").is_ok()
+}
+
fn setup_panic_handler() {
// Redirect panics to the logs.
let backtrace_env = std::env::var("RUST_BACKTRACE");
diff --git a/apollo-router/src/lib.rs b/apollo-router/src/lib.rs
index 584711997d..cef6ac055f 100644
--- a/apollo-router/src/lib.rs
+++ b/apollo-router/src/lib.rs
@@ -79,8 +79,11 @@ pub mod test_harness;
pub mod tracer;
mod uplink;
+pub use crate::axum_factory::unsupported_set_axum_router_callback;
pub use crate::configuration::Configuration;
pub use crate::configuration::ListenAddr;
+pub use crate::context::extensions::sync::ExtensionsMutex;
+pub use crate::context::extensions::Extensions;
pub use crate::context::Context;
pub use crate::executable::main;
pub use crate::executable::Executable;
diff --git a/apollo-router/src/logging/mod.rs b/apollo-router/src/logging/mod.rs
index 5f6a615892..c70612b571 100644
--- a/apollo-router/src/logging/mod.rs
+++ b/apollo-router/src/logging/mod.rs
@@ -4,6 +4,7 @@ pub(crate) mod test {
use std::sync::Mutex;
use serde_json::Value;
+ use tracing_core::LevelFilter;
use tracing_core::Subscriber;
pub(crate) struct SnapshotSubscriber {
@@ -41,7 +42,7 @@ pub(crate) mod test {
.unwrap()
.as_object_mut()
.unwrap();
- let message = fields.remove("message").unwrap();
+ let message = fields.remove("message").unwrap_or_default();
line.as_object_mut()
.unwrap()
.insert("message".to_string(), message);
@@ -56,7 +57,10 @@ pub(crate) mod test {
}
impl SnapshotSubscriber {
- pub(crate) fn create_subscriber(assertion: fn(Value)) -> impl Subscriber {
+ pub(crate) fn create_subscriber(
+ level: LevelFilter,
+ assertion: fn(Value),
+ ) -> impl Subscriber {
let collector = Self {
buffer: Arc::new(Mutex::new(Vec::new())),
assertion,
@@ -64,6 +68,7 @@ pub(crate) mod test {
tracing_subscriber::fmt()
.json()
+ .with_max_level(level)
.without_time()
.with_target(false)
.with_file(false)
@@ -81,23 +86,19 @@ pub(crate) mod test {
/// You can also use subscriber::with_default(assert_snapshot_subscriber!(), || { ... }) to assert the logs in non async code.
macro_rules! assert_snapshot_subscriber {
() => {
- $crate::logging::test::SnapshotSubscriber::create_subscriber(|yaml| {
- insta::with_settings!({sort_maps => true}, {
- // the tests here will force maps to sort
- let mut settings = insta::Settings::clone_current();
- settings.set_snapshot_suffix("logs");
- settings.set_sort_maps(true);
- settings.bind(|| {
- // runs the assertion with the changed settings enabled
- insta::assert_yaml_snapshot!(yaml);
- });
-
- });
- })
+ $crate::assert_snapshot_subscriber!(tracing_core::LevelFilter::INFO, {})
};
($redactions:tt) => {
- $crate::logging::test::SnapshotSubscriber::create_subscriber(|yaml| {
+ $crate::assert_snapshot_subscriber!(tracing_core::LevelFilter::INFO, $redactions)
+ };
+
+ ($level:expr) => {
+ $crate::assert_snapshot_subscriber!($level, {})
+ };
+
+ ($level:expr, $redactions:tt) => {
+ $crate::logging::test::SnapshotSubscriber::create_subscriber($level, |yaml| {
insta::with_settings!({sort_maps => true}, {
// the tests here will force maps to sort
let mut settings = insta::Settings::clone_current();
diff --git a/apollo-router/src/notification.rs b/apollo-router/src/notification.rs
index 700d3f5a60..c7e3b69440 100644
--- a/apollo-router/src/notification.rs
+++ b/apollo-router/src/notification.rs
@@ -391,7 +391,7 @@ struct HandleGuard
where
K: Clone,
{
- topic: K,
+ topic: Arc,
pubsub_sender: mpsc::Sender>,
}
@@ -413,7 +413,7 @@ where
{
fn drop(&mut self) {
let err = self.pubsub_sender.try_send(Notification::Unsubscribe {
- topic: self.topic.clone(),
+ topic: self.topic.as_ref().clone(),
});
if let Err(err) = err {
tracing::trace!("cannot unsubscribe {err:?}");
@@ -460,7 +460,7 @@ where
) -> Self {
Self {
handle_guard: HandleGuard {
- topic,
+ topic: Arc::new(topic),
pubsub_sender,
},
msg_sender,
@@ -583,7 +583,7 @@ where
}
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> {
- let topic = self.handle_guard.topic.clone();
+ let topic = self.handle_guard.topic.as_ref().clone();
let _ = self
.handle_guard
.pubsub_sender
@@ -614,8 +614,10 @@ async fn task(
_ = ttl_fut.next() => {
let heartbeat_error_message = heartbeat_error_message.clone();
pubsub.kill_dead_topics(heartbeat_error_message).await;
- tracing::info!(
- value.apollo_router_opened_subscriptions = pubsub.subscriptions.len() as u64,
+ u64_counter!(
+ "apollo_router_opened_subscriptions",
+ "Number of opened subscriptions",
+ pubsub.subscriptions.len() as u64
);
}
message = receiver.next() => {
diff --git a/apollo-router/src/orbiter/mod.rs b/apollo-router/src/orbiter/mod.rs
index ef4be4ac0c..30945f5e64 100644
--- a/apollo-router/src/orbiter/mod.rs
+++ b/apollo-router/src/orbiter/mod.rs
@@ -1,10 +1,10 @@
use std::collections::HashMap;
-use std::env;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use clap::CommandFactory;
+use clap::Parser;
use http::header::CONTENT_TYPE;
use http::header::USER_AGENT;
use jsonschema::output::BasicOutput;
@@ -110,7 +110,10 @@ impl RouterSuperServiceFactory for OrbiterRouterSuperServiceFactory {
)
.await
.map(|factory| {
- if env::var("APOLLO_TELEMETRY_DISABLED").unwrap_or_default() != "true" {
+ // TODO: We should have a way to access the original CLI args here so that we can just see what the
+ // value of `anonymous_telemetry_disabled` really is instead of parsing it twice.
+ let telemetry_disabled = Opt::parse().is_telemetry_disabled();
+ if !telemetry_disabled {
let schema = factory.supergraph_creator.schema();
tokio::task::spawn(async move {
@@ -162,7 +165,7 @@ fn create_report(configuration: Arc, _schema: Arc) -> Usa
// Check the command line options. This encapsulates both env and command line functionality
// This won't work in tests so we have separate test code.
#[cfg(not(test))]
- visit_args(&mut usage, env::args().collect());
+ visit_args(&mut usage, std::env::args().collect());
UsageReport {
session_id: *SESSION_ID.get_or_init(Uuid::new_v4),
diff --git a/apollo-router/src/plugins/authentication/subgraph.rs b/apollo-router/src/plugins/authentication/subgraph.rs
index 0ad2abb7f5..68ece88fce 100644
--- a/apollo-router/src/plugins/authentication/subgraph.rs
+++ b/apollo-router/src/plugins/authentication/subgraph.rs
@@ -4,14 +4,14 @@ use std::time::SystemTime;
use aws_credential_types::provider::ProvideCredentials;
use aws_credential_types::Credentials;
-use aws_sigv4::http_request;
use aws_sigv4::http_request::sign;
use aws_sigv4::http_request::PayloadChecksumKind;
use aws_sigv4::http_request::SignableBody;
use aws_sigv4::http_request::SignableRequest;
use aws_sigv4::http_request::SigningSettings;
-use aws_sigv4::signing_params;
+use aws_smithy_runtime_api::client::identity::Identity;
use aws_types::region::Region;
+use http::HeaderMap;
use http::Request;
use hyper::Body;
use schemars::JsonSchema;
@@ -121,7 +121,7 @@ impl AWSSigV4Config {
let chain = aws_config.build().await;
if let Some(assume_role_provider) = role_provider_builder {
- Arc::new(assume_role_provider.build(chain))
+ Arc::new(assume_role_provider.build_from_provider(chain).await)
} else {
Arc::new(chain)
}
@@ -132,7 +132,7 @@ impl AWSSigV4Config {
.build()
.await;
if let Some(assume_role_provider) = role_provider_builder {
- Arc::new(assume_role_provider.build(chain))
+ Arc::new(assume_role_provider.build_from_provider(chain).await)
} else {
Arc::new(config.clone())
}
@@ -206,24 +206,24 @@ impl SigningParamsConfig {
let credentials = self.credentials().await?;
let builder = self.signing_params_builder(&credentials).await?;
let (parts, body) = req.into_parts();
- // Depending on the servicve, AWS refuses sigv4 payloads that contain specific headers.
+ // Depending on the service, AWS refuses sigv4 payloads that contain specific headers.
// We'll go with default signed headers
- let headers = Default::default();
+ let headers = HeaderMap::<&'static str>::default();
// UnsignedPayload only applies to lattice
let body_bytes = hyper::body::to_bytes(body).await?.to_vec();
let signable_request = SignableRequest::new(
- &parts.method,
- &parts.uri,
- &headers,
+ parts.method.as_str(),
+ parts.uri.to_string(),
+ headers.iter().map(|(name, value)| (name.as_str(), *value)),
match self.service_name.as_str() {
"vpc-lattice-svcs" => SignableBody::UnsignedPayload,
_ => SignableBody::Bytes(body_bytes.as_slice()),
},
- );
+ )?;
let signing_params = builder.build().expect("all required fields set");
- let (signing_instructions, _signature) = sign(signable_request, &signing_params)
+ let (signing_instructions, _signature) = sign(signable_request, &signing_params.into())
.map_err(|err| {
increment_failure_counter(subgraph_name);
let error = format!("failed to sign GraphQL body for AWS SigV4: {}", err);
@@ -232,7 +232,7 @@ impl SigningParamsConfig {
})?
.into_parts();
req = Request::::from_parts(parts, body_bytes.into());
- signing_instructions.apply_to_request(&mut req);
+ signing_instructions.apply_to_request_http0x(&mut req);
increment_success_counter(subgraph_name);
Ok(req)
}
@@ -245,23 +245,23 @@ impl SigningParamsConfig {
let credentials = self.credentials().await?;
let builder = self.signing_params_builder(&credentials).await?;
let (parts, _) = req.into_parts();
- // Depending on the servicve, AWS refuses sigv4 payloads that contain specific headers.
+ // Depending on the service, AWS refuses sigv4 payloads that contain specific headers.
// We'll go with default signed headers
- let headers = Default::default();
+ let headers = HeaderMap::<&'static str>::default();
// UnsignedPayload only applies to lattice
let signable_request = SignableRequest::new(
- &parts.method,
- &parts.uri,
- &headers,
+ parts.method.as_str(),
+ parts.uri.to_string(),
+ headers.iter().map(|(name, value)| (name.as_str(), *value)),
match self.service_name.as_str() {
"vpc-lattice-svcs" => SignableBody::UnsignedPayload,
_ => SignableBody::Bytes(&[]),
},
- );
+ )?;
let signing_params = builder.build().expect("all required fields set");
- let (signing_instructions, _signature) = sign(signable_request, &signing_params)
+ let (signing_instructions, _signature) = sign(signable_request, &signing_params.into())
.map_err(|err| {
increment_failure_counter(subgraph_name);
let error = format!("failed to sign GraphQL body for AWS SigV4: {}", err);
@@ -270,28 +270,26 @@ impl SigningParamsConfig {
})?
.into_parts();
req = Request::<()>::from_parts(parts, ());
- signing_instructions.apply_to_request(&mut req);
+ signing_instructions.apply_to_request_http0x(&mut req);
increment_success_counter(subgraph_name);
Ok(req)
}
async fn signing_params_builder<'s>(
&'s self,
- credentials: &'s Credentials,
- ) -> Result, BoxError> {
+ identity: &'s Identity,
+ ) -> Result, BoxError> {
let settings = get_signing_settings(self);
- let mut builder = http_request::SigningParams::builder()
- .access_key(credentials.access_key_id())
- .secret_key(credentials.secret_access_key())
+ let builder = aws_sigv4::sign::v4::SigningParams::builder()
+ .identity(identity)
.region(self.region.as_ref())
- .service_name(&self.service_name)
+ .name(&self.service_name)
.time(SystemTime::now())
.settings(settings);
- builder.set_security_token(credentials.session_token());
Ok(builder)
}
- async fn credentials(&self) -> Result {
+ async fn credentials(&self) -> Result {
self.credentials_provider
.provide_credentials()
.await
@@ -301,6 +299,7 @@ impl SigningParamsConfig {
tracing::error!("{}", error);
error.into()
})
+ .map(Into::into)
}
}
@@ -374,7 +373,7 @@ impl SubgraphAuth {
ServiceBuilder::new()
.map_request(move |req: SubgraphRequest| {
let signing_params = signing_params.clone();
- req.context.private_entries.lock().insert(signing_params);
+ req.context.extensions().lock().insert(signing_params);
req
})
.service(service)
@@ -643,7 +642,7 @@ mod test {
service_name: String,
) -> hyper::Request {
let signing_params = {
- let ctx = request.context.private_entries.lock();
+ let ctx = request.context.extensions().lock();
let sp = ctx.get::();
sp.cloned().unwrap()
};
diff --git a/apollo-router/src/plugins/authorization/authenticated.rs b/apollo-router/src/plugins/authorization/authenticated.rs
index 60e1412160..fe5e359977 100644
--- a/apollo-router/src/plugins/authorization/authenticated.rs
+++ b/apollo-router/src/plugins/authorization/authenticated.rs
@@ -16,7 +16,8 @@ use crate::spec::Schema;
use crate::spec::TYPENAME;
pub(crate) const AUTHENTICATED_DIRECTIVE_NAME: &str = "authenticated";
-pub(crate) const AUTHENTICATED_SPEC_URL: &str = "https://specs.apollo.dev/authenticated/v0.1";
+pub(crate) const AUTHENTICATED_SPEC_BASE_URL: &str = "https://specs.apollo.dev/authenticated";
+pub(crate) const AUTHENTICATED_SPEC_VERSION_RANGE: &str = ">=0.1.0, <=0.1.0";
pub(crate) struct AuthenticatedCheckVisitor<'a> {
schema: &'a schema::Schema,
@@ -39,7 +40,8 @@ impl<'a> AuthenticatedCheckVisitor<'a> {
found: false,
authenticated_directive_name: Schema::directive_name(
schema,
- AUTHENTICATED_SPEC_URL,
+ AUTHENTICATED_SPEC_BASE_URL,
+ AUTHENTICATED_SPEC_VERSION_RANGE,
AUTHENTICATED_DIRECTIVE_NAME,
)?,
})
@@ -205,7 +207,8 @@ impl<'a> AuthenticatedVisitor<'a> {
current_path: Path::default(),
authenticated_directive_name: Schema::directive_name(
schema,
- AUTHENTICATED_SPEC_URL,
+ AUTHENTICATED_SPEC_BASE_URL,
+ AUTHENTICATED_SPEC_VERSION_RANGE,
AUTHENTICATED_DIRECTIVE_NAME,
)?,
})
@@ -1656,7 +1659,7 @@ mod tests {
.unwrap();*/
let mut headers: MultiMap = MultiMap::new();
headers.insert("Accept".into(), "multipart/mixed;deferSpec=20220824".into());
- context.private_entries.lock().insert(ClientRequestAccepts {
+ context.extensions().lock().insert(ClientRequestAccepts {
multipart_defer: true,
multipart_subscription: true,
json: true,
diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs
index f69e0618ba..3d7a65a280 100644
--- a/apollo-router/src/plugins/authorization/mod.rs
+++ b/apollo-router/src/plugins/authorization/mod.rs
@@ -17,13 +17,16 @@ use tower::ServiceExt;
use self::authenticated::AuthenticatedCheckVisitor;
use self::authenticated::AuthenticatedVisitor;
-use self::authenticated::AUTHENTICATED_SPEC_URL;
+use self::authenticated::AUTHENTICATED_SPEC_BASE_URL;
+use self::authenticated::AUTHENTICATED_SPEC_VERSION_RANGE;
use self::policy::PolicyExtractionVisitor;
use self::policy::PolicyFilteringVisitor;
-use self::policy::POLICY_SPEC_URL;
+use self::policy::POLICY_SPEC_BASE_URL;
+use self::policy::POLICY_SPEC_VERSION_RANGE;
use self::scopes::ScopeExtractionVisitor;
use self::scopes::ScopeFilteringVisitor;
-use self::scopes::REQUIRES_SCOPES_SPEC_URL;
+use self::scopes::REQUIRES_SCOPES_SPEC_BASE_URL;
+use self::scopes::REQUIRES_SCOPES_SPEC_VERSION_RANGE;
use crate::error::QueryPlannerError;
use crate::error::ServiceBuildError;
use crate::graphql;
@@ -145,9 +148,14 @@ impl AuthorizationPlugin {
.and_then(|(_, v)| v.get("directives").and_then(|v| v.as_object()))
.and_then(|v| v.get("enabled").and_then(|v| v.as_bool()));
- let has_authorization_directives = schema.has_spec(AUTHENTICATED_SPEC_URL)
- || schema.has_spec(REQUIRES_SCOPES_SPEC_URL)
- || schema.has_spec(POLICY_SPEC_URL);
+ let has_authorization_directives = schema.has_spec(
+ AUTHENTICATED_SPEC_BASE_URL,
+ AUTHENTICATED_SPEC_VERSION_RANGE,
+ ) || schema.has_spec(
+ REQUIRES_SCOPES_SPEC_BASE_URL,
+ REQUIRES_SCOPES_SPEC_VERSION_RANGE,
+ ) || schema
+ .has_spec(POLICY_SPEC_BASE_URL, POLICY_SPEC_VERSION_RANGE);
Ok(has_config.unwrap_or(true) && has_authorization_directives)
}
@@ -280,7 +288,7 @@ impl AuthorizationPlugin {
.unwrap_or_default();
policies.sort();
- context.private_entries.lock().insert(CacheKeyMetadata {
+ context.extensions().lock().insert(CacheKeyMetadata {
is_authenticated,
scopes,
policies,
diff --git a/apollo-router/src/plugins/authorization/policy.rs b/apollo-router/src/plugins/authorization/policy.rs
index 25202d1bf8..2f4ceb9baf 100644
--- a/apollo-router/src/plugins/authorization/policy.rs
+++ b/apollo-router/src/plugins/authorization/policy.rs
@@ -30,7 +30,8 @@ pub(crate) struct PolicyExtractionVisitor<'a> {
}
pub(crate) const POLICY_DIRECTIVE_NAME: &str = "policy";
-pub(crate) const POLICY_SPEC_URL: &str = "https://specs.apollo.dev/policy/v0.1";
+pub(crate) const POLICY_SPEC_BASE_URL: &str = "https://specs.apollo.dev/policy";
+pub(crate) const POLICY_SPEC_VERSION_RANGE: &str = ">=0.1.0, <=0.1.0";
impl<'a> PolicyExtractionVisitor<'a> {
#[allow(dead_code)]
@@ -46,7 +47,8 @@ impl<'a> PolicyExtractionVisitor<'a> {
extracted_policies: HashSet::new(),
policy_directive_name: Schema::directive_name(
schema,
- POLICY_SPEC_URL,
+ POLICY_SPEC_BASE_URL,
+ POLICY_SPEC_VERSION_RANGE,
POLICY_DIRECTIVE_NAME,
)?,
})
@@ -238,7 +240,8 @@ impl<'a> PolicyFilteringVisitor<'a> {
current_path: Path::default(),
policy_directive_name: Schema::directive_name(
schema,
- POLICY_SPEC_URL,
+ POLICY_SPEC_BASE_URL,
+ POLICY_SPEC_VERSION_RANGE,
POLICY_DIRECTIVE_NAME,
)?,
})
diff --git a/apollo-router/src/plugins/authorization/scopes.rs b/apollo-router/src/plugins/authorization/scopes.rs
index 83cb428028..4348d1fa39 100644
--- a/apollo-router/src/plugins/authorization/scopes.rs
+++ b/apollo-router/src/plugins/authorization/scopes.rs
@@ -30,7 +30,8 @@ pub(crate) struct ScopeExtractionVisitor<'a> {
}
pub(crate) const REQUIRES_SCOPES_DIRECTIVE_NAME: &str = "requiresScopes";
-pub(crate) const REQUIRES_SCOPES_SPEC_URL: &str = "https://specs.apollo.dev/requiresScopes/v0.1";
+pub(crate) const REQUIRES_SCOPES_SPEC_BASE_URL: &str = "https://specs.apollo.dev/requiresScopes";
+pub(crate) const REQUIRES_SCOPES_SPEC_VERSION_RANGE: &str = ">=0.1.0, <=0.1.0";
impl<'a> ScopeExtractionVisitor<'a> {
#[allow(dead_code)]
@@ -46,7 +47,8 @@ impl<'a> ScopeExtractionVisitor<'a> {
extracted_scopes: HashSet::new(),
requires_scopes_directive_name: Schema::directive_name(
schema,
- REQUIRES_SCOPES_SPEC_URL,
+ REQUIRES_SCOPES_SPEC_BASE_URL,
+ REQUIRES_SCOPES_SPEC_VERSION_RANGE,
REQUIRES_SCOPES_DIRECTIVE_NAME,
)?,
})
@@ -236,7 +238,8 @@ impl<'a> ScopeFilteringVisitor<'a> {
current_path: Path::default(),
requires_scopes_directive_name: Schema::directive_name(
schema,
- REQUIRES_SCOPES_SPEC_URL,
+ REQUIRES_SCOPES_SPEC_BASE_URL,
+ REQUIRES_SCOPES_SPEC_VERSION_RANGE,
REQUIRES_SCOPES_DIRECTIVE_NAME,
)?,
})
diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs
index d6dbc89d2c..2d5219a776 100644
--- a/apollo-router/src/plugins/cache/entity.rs
+++ b/apollo-router/src/plugins/cache/entity.rs
@@ -18,6 +18,7 @@ use tower_service::Service;
use tracing::Level;
use super::cache_control::CacheControl;
+use super::metrics::CacheMetricsService;
use crate::cache::redis::RedisCacheStorage;
use crate::cache::redis::RedisKey;
use crate::cache::redis::RedisValue;
@@ -44,16 +45,17 @@ pub(crate) const CONTEXT_CACHE_KEY: &str = "apollo_entity_cache::key";
register_plugin!("apollo", "experimental_entity_cache", EntityCache);
-struct EntityCache {
+pub(crate) struct EntityCache {
storage: RedisCacheStorage,
subgraphs: Arc>,
enabled: Option,
+ metrics: Metrics,
}
/// Configuration for entity caching
#[derive(Clone, Debug, JsonSchema, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
-struct Config {
+pub(crate) struct Config {
redis: RedisCache,
/// activates caching for all subgraphs, unless overriden in subgraph specific configuration
#[serde(default)]
@@ -61,12 +63,16 @@ struct Config {
/// Per subgraph configuration
#[serde(default)]
subgraphs: HashMap,
+
+ /// Entity caching evaluation metrics
+ #[serde(default)]
+ metrics: Metrics,
}
/// Per subgraph configuration for entity caching
#[derive(Clone, Debug, JsonSchema, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
-struct Subgraph {
+pub(crate) struct Subgraph {
/// expiration for all keys
pub(crate) ttl: Option,
@@ -78,12 +84,26 @@ struct Subgraph {
/// Per subgraph configuration for entity caching
#[derive(Clone, Debug, JsonSchema, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
-struct Ttl(
+pub(crate) struct Ttl(
#[serde(deserialize_with = "humantime_serde::deserialize")]
#[schemars(with = "String")]
- Duration,
+ pub(crate) Duration,
);
+/// Per subgraph configuration for entity caching
+#[derive(Clone, Debug, Default, JsonSchema, Deserialize)]
+#[serde(rename_all = "snake_case", deny_unknown_fields)]
+struct Metrics {
+ /// enables metrics evaluating the benefits of entity caching
+ #[serde(default)]
+ pub(crate) enabled: bool,
+ /// Metrics counter TTL
+ pub(crate) ttl: Option,
+ /// Adds the entity type name to attributes. This can greatly increase the cardinality
+ #[serde(default)]
+ pub(crate) separate_per_type: bool,
+}
+
#[async_trait::async_trait]
impl Plugin for EntityCache {
type Config = Config;
@@ -98,17 +118,15 @@ impl Plugin for EntityCache {
storage,
enabled: init.config.enabled,
subgraphs: Arc::new(init.config.subgraphs),
+ metrics: init.config.metrics,
})
}
fn supergraph_service(&self, service: supergraph::BoxService) -> supergraph::BoxService {
ServiceBuilder::new()
.map_response(|mut response: supergraph::Response| {
- if let Some(cache_control) = response
- .context
- .private_entries
- .lock()
- .get::()
+ if let Some(cache_control) =
+ response.context.extensions().lock().get::()
{
let _ = cache_control.to_headers(response.response.headers_mut());
}
@@ -119,7 +137,11 @@ impl Plugin for EntityCache {
.boxed()
}
- fn subgraph_service(&self, name: &str, service: subgraph::BoxService) -> subgraph::BoxService {
+ fn subgraph_service(
+ &self,
+ name: &str,
+ mut service: subgraph::BoxService,
+ ) -> subgraph::BoxService {
let storage = self.storage.clone();
let (subgraph_ttl, subgraph_enabled) = if let Some(config) = self.subgraphs.get(name) {
@@ -136,6 +158,15 @@ impl Plugin for EntityCache {
};
let name = name.to_string();
+ if self.metrics.enabled {
+ service = CacheMetricsService::create(
+ name.to_string(),
+ service,
+ self.metrics.ttl.as_ref(),
+ self.metrics.separate_per_type,
+ );
+ }
+
if subgraph_enabled {
tower::util::BoxService::new(CacheService(Some(InnerCacheService {
service,
@@ -149,6 +180,24 @@ impl Plugin for EntityCache {
}
}
+impl EntityCache {
+ #[cfg(test)]
+ pub(crate) async fn with_mocks(
+ storage: RedisCacheStorage,
+ subgraphs: HashMap,
+ ) -> Result
+ where
+ Self: Sized,
+ {
+ Ok(Self {
+ storage,
+ enabled: Some(true),
+ subgraphs: Arc::new(subgraphs),
+ metrics: Metrics::default(),
+ })
+ }
+}
+
struct CacheService(Option);
struct InnerCacheService {
service: subgraph::BoxService,
@@ -260,11 +309,7 @@ async fn cache_lookup_root(
match cache_result {
Some(value) => {
- request
- .context
- .private_entries
- .lock()
- .insert(value.0.control);
+ request.context.extensions().lock().insert(value.0.control);
Ok(ControlFlow::Break(
subgraph::Response::builder()
@@ -342,12 +387,12 @@ async fn cache_lookup_entities(
}
fn update_cache_control(context: &Context, cache_control: &CacheControl) {
- if let Some(c) = context.private_entries.lock().get_mut::() {
+ if let Some(c) = context.extensions().lock().get_mut::() {
*c = c.merge(cache_control);
return;
}
//FIXME: race condition. We need an Entry API for private entries
- context.private_entries.lock().insert(cache_control.clone());
+ context.extensions().lock().insert(cache_control.clone());
}
#[derive(Clone, Debug, Serialize, Deserialize)]
diff --git a/apollo-router/src/plugins/cache/metrics.rs b/apollo-router/src/plugins/cache/metrics.rs
new file mode 100644
index 0000000000..3c1f51107c
--- /dev/null
+++ b/apollo-router/src/plugins/cache/metrics.rs
@@ -0,0 +1,276 @@
+use std::collections::HashMap;
+use std::sync::Arc;
+use std::time::Duration;
+use std::time::Instant;
+
+use bloomfilter::Bloom;
+use http::header;
+use parking_lot::Mutex;
+use serde_json_bytes::Value;
+use tower::BoxError;
+use tower_service::Service;
+
+use super::entity::hash_query;
+use super::entity::hash_vary_headers;
+use super::entity::Ttl;
+use super::entity::REPRESENTATIONS;
+use crate::services::subgraph;
+use crate::spec::TYPENAME;
+
+pub(crate) struct CacheMetricsService(Option);
+
+impl CacheMetricsService {
+ pub(crate) fn create(
+ name: String,
+ service: subgraph::BoxService,
+ ttl: Option<&Ttl>,
+ separate_per_type: bool,
+ ) -> subgraph::BoxService {
+ tower::util::BoxService::new(CacheMetricsService(Some(InnerCacheMetricsService {
+ service,
+ name: Arc::new(name),
+ counter: Some(Arc::new(Mutex::new(CacheCounter::new(
+ ttl.map(|t| t.0).unwrap_or_else(|| Duration::from_secs(60)),
+ separate_per_type,
+ )))),
+ })))
+ }
+}
+
+pub(crate) struct InnerCacheMetricsService {
+ service: subgraph::BoxService,
+ name: Arc,
+ counter: Option>>,
+}
+
+impl Service for CacheMetricsService {
+ type Response = subgraph::Response;
+ type Error = BoxError;
+ type Future = >::Future;
+
+ fn poll_ready(
+ &mut self,
+ cx: &mut std::task::Context<'_>,
+ ) -> std::task::Poll> {
+ match &mut self.0 {
+ Some(s) => s.service.poll_ready(cx),
+ None => panic!("service should have been called only once"),
+ }
+ }
+
+ fn call(&mut self, request: subgraph::Request) -> Self::Future {
+ match self.0.take() {
+ None => panic!("service should have been called only once"),
+ Some(s) => Box::pin(s.call_inner(request)),
+ }
+ }
+}
+
+impl InnerCacheMetricsService {
+ async fn call_inner(
+ mut self,
+ mut request: subgraph::Request,
+ ) -> Result {
+ let cache_attributes = Self::get_cache_attributes(&mut request);
+ println!(
+ "inner metrics cache attributes in root req for {}: {:?}",
+ self.name, cache_attributes
+ );
+
+ let response = self.service.call(request).await?;
+
+ if let Some(cache_attributes) = cache_attributes {
+ if let Some(counter) = &self.counter {
+ println!("inner metrics cache {}: will update metrics", self.name,);
+ Self::update_cache_metrics(&self.name, counter, &response, cache_attributes)
+ }
+ }
+
+ Ok(response)
+ }
+
+ fn get_cache_attributes(sub_request: &mut subgraph::Request) -> Option {
+ let body = sub_request.subgraph_request.body_mut();
+ let hashed_query = hash_query(&sub_request.query_hash, body);
+ let representations = body
+ .variables
+ .get(REPRESENTATIONS)
+ .and_then(|value| value.as_array())?;
+
+ let keys = extract_cache_attributes(representations).ok()?;
+
+ Some(CacheAttributes {
+ headers: sub_request.subgraph_request.headers().clone(),
+ hashed_query: Arc::new(hashed_query),
+ representations: keys,
+ })
+ }
+
+ fn update_cache_metrics(
+ subgraph_name: &Arc,
+ counter: &Mutex,
+ sub_response: &subgraph::Response,
+ cache_attributes: CacheAttributes,
+ ) {
+ let mut vary_headers = sub_response
+ .response
+ .headers()
+ .get_all(header::VARY)
+ .into_iter()
+ .filter_map(|val| {
+ val.to_str().ok().map(|v| {
+ v.to_string()
+ .split(", ")
+ .map(|s| s.to_string())
+ .collect::>()
+ })
+ })
+ .flatten()
+ .collect::>();
+ vary_headers.sort();
+ let vary_headers = vary_headers.join(", ");
+
+ let hashed_headers = if vary_headers.is_empty() {
+ Arc::default()
+ } else {
+ Arc::new(hash_vary_headers(&cache_attributes.headers))
+ };
+ println!("will update cache counter");
+
+ CacheCounter::record(
+ counter,
+ cache_attributes.hashed_query.clone(),
+ subgraph_name,
+ hashed_headers,
+ cache_attributes.representations,
+ );
+ }
+}
+
+#[derive(Debug, Clone)]
+pub(crate) struct CacheAttributes {
+ pub(crate) headers: http::HeaderMap,
+ pub(crate) hashed_query: Arc,
+ // Typename + hashed_representation
+ pub(crate) representations: Vec<(Arc, Value)>,
+}
+
+#[derive(Debug, Hash, Clone)]
+pub(crate) struct CacheKey {
+ pub(crate) representation: Value,
+ pub(crate) typename: Arc,
+ pub(crate) query: Arc,
+ pub(crate) subgraph_name: Arc,
+ pub(crate) hashed_headers: Arc,
+}
+
+// Get typename and hashed representation for each representations in the subgraph query
+pub(crate) fn extract_cache_attributes(
+ representations: &[Value],
+) -> Result, Value)>, BoxError> {
+ let mut res = Vec::new();
+ for representation in representations {
+ let opt_type = representation
+ .as_object()
+ .and_then(|o| o.get(TYPENAME))
+ .ok_or("missing __typename in representation")?;
+ let typename = opt_type.as_str().unwrap_or("");
+
+ res.push((Arc::new(typename.to_string()), representation.clone()));
+ }
+ Ok(res)
+}
+
+pub(crate) struct CacheCounter {
+ primary: Bloom,
+ secondary: Bloom,
+ created_at: Instant,
+ ttl: Duration,
+ per_type: bool,
+}
+
+impl CacheCounter {
+ pub(crate) fn new(ttl: Duration, per_type: bool) -> Self {
+ Self {
+ primary: Self::make_filter(),
+ secondary: Self::make_filter(),
+ created_at: Instant::now(),
+ ttl,
+ per_type,
+ }
+ }
+
+ fn make_filter() -> Bloom {
+ // the filter is around 4kB in size (can be calculated with `Bloom::compute_bitmap_size`)
+ Bloom::new_for_fp_rate(10000, 0.2)
+ }
+
+ pub(crate) fn record(
+ counter: &Mutex,
+ query: Arc,
+ subgraph_name: &Arc,
+ hashed_headers: Arc,
+ representations: Vec<(Arc, Value)>,
+ ) {
+ let separate_metrics_per_type;
+ {
+ let mut c = counter.lock();
+ if c.created_at.elapsed() >= c.ttl {
+ c.clear();
+ }
+ separate_metrics_per_type = c.per_type;
+ }
+
+ // typename -> (nb of cache hits, nb of entities)
+ let mut seen: HashMap, (usize, usize)> = HashMap::new();
+ let mut key = CacheKey {
+ representation: Value::Null,
+ typename: Arc::new(String::new()),
+ query,
+ subgraph_name: subgraph_name.clone(),
+ hashed_headers,
+ };
+ for (typename, representation) in representations {
+ let cache_hit;
+ key.typename = typename.clone();
+ key.representation = representation;
+
+ {
+ let mut c = counter.lock();
+ cache_hit = c.check(&key);
+ }
+
+ let seen_entry = seen.entry(typename.clone()).or_default();
+ if cache_hit {
+ seen_entry.0 += 1;
+ }
+ seen_entry.1 += 1;
+ }
+
+ for (typename, (cache_hit, total_entities)) in seen.into_iter() {
+ if separate_metrics_per_type {
+ ::tracing::info!(
+ histogram.apollo.router.operations.entity.cache_hit = (cache_hit as f64 / total_entities as f64) * 100f64,
+ entity_type = %typename,
+ subgraph = %subgraph_name,
+ );
+ } else {
+ ::tracing::info!(
+ histogram.apollo.router.operations.entity.cache_hit = (cache_hit as f64 / total_entities as f64) * 100f64,
+ subgraph = %subgraph_name,
+ );
+ }
+ }
+ }
+
+ fn check(&mut self, key: &CacheKey) -> bool {
+ self.primary.check_and_set(key) || self.secondary.check(key)
+ }
+
+ fn clear(&mut self) {
+ let secondary = std::mem::replace(&mut self.primary, Self::make_filter());
+ self.secondary = secondary;
+
+ self.created_at = Instant::now();
+ }
+}
diff --git a/apollo-router/src/plugins/cache/mod.rs b/apollo-router/src/plugins/cache/mod.rs
index a95d610a2a..084578434f 100644
--- a/apollo-router/src/plugins/cache/mod.rs
+++ b/apollo-router/src/plugins/cache/mod.rs
@@ -1,2 +1,5 @@
pub(crate) mod cache_control;
pub(crate) mod entity;
+pub(crate) mod metrics;
+#[cfg(test)]
+pub(crate) mod tests;
diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert-2.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert-2.snap
new file mode 100644
index 0000000000..e3d6799c33
--- /dev/null
+++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert-2.snap
@@ -0,0 +1,17 @@
+---
+source: apollo-router/src/plugins/cache/tests.rs
+expression: response
+---
+{
+ "data": {
+ "currentUser": {
+ "activeOrganization": {
+ "id": "1",
+ "creatorUser": {
+ "__typename": "User",
+ "id": 2
+ }
+ }
+ }
+ }
+}
diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert.snap
new file mode 100644
index 0000000000..e3d6799c33
--- /dev/null
+++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert.snap
@@ -0,0 +1,17 @@
+---
+source: apollo-router/src/plugins/cache/tests.rs
+expression: response
+---
+{
+ "data": {
+ "currentUser": {
+ "activeOrganization": {
+ "id": "1",
+ "creatorUser": {
+ "__typename": "User",
+ "id": 2
+ }
+ }
+ }
+ }
+}
diff --git a/apollo-router/src/plugins/cache/tests.rs b/apollo-router/src/plugins/cache/tests.rs
new file mode 100644
index 0000000000..aedfd08562
--- /dev/null
+++ b/apollo-router/src/plugins/cache/tests.rs
@@ -0,0 +1,212 @@
+use std::collections::HashMap;
+use std::sync::Arc;
+
+use bytes::Bytes;
+use fred::error::RedisErrorKind;
+use fred::mocks::MockCommand;
+use fred::mocks::Mocks;
+use fred::prelude::RedisError;
+use fred::prelude::RedisValue;
+use parking_lot::Mutex;
+use tower::ServiceExt;
+
+use super::entity::EntityCache;
+use crate::cache::redis::RedisCacheStorage;
+use crate::plugin::test::MockSubgraph;
+use crate::services::supergraph;
+use crate::Context;
+use crate::MockedSubgraphs;
+use crate::TestHarness;
+
+const SCHEMA: &str = r#"schema
+ @core(feature: "https://specs.apollo.dev/core/v0.1")
+ @core(feature: "https://specs.apollo.dev/join/v0.1")
+ @core(feature: "https://specs.apollo.dev/inaccessible/v0.1")
+ {
+ query: Query
+ subscription: Subscription
+ }
+ directive @core(feature: String!) repeatable on SCHEMA
+ directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet) on FIELD_DEFINITION
+ directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on OBJECT | INTERFACE
+ directive @join__owner(graph: join__Graph!) on OBJECT | INTERFACE
+ directive @join__graph(name: String!, url: String!) on ENUM_VALUE
+ directive @inaccessible on OBJECT | FIELD_DEFINITION | INTERFACE | UNION
+ scalar join__FieldSet
+ enum join__Graph {
+ USER @join__graph(name: "user", url: "http://localhost:4001/graphql")
+ ORGA @join__graph(name: "orga", url: "http://localhost:4002/graphql")
+ }
+ type Query {
+ currentUser: User @join__field(graph: USER)
+ }
+
+ type Subscription @join__type(graph: USER) {
+ userWasCreated: User
+ }
+
+ type User
+ @join__owner(graph: USER)
+ @join__type(graph: ORGA, key: "id")
+ @join__type(graph: USER, key: "id"){
+ id: ID!
+ name: String
+ activeOrganization: Organization
+ }
+ type Organization
+ @join__owner(graph: ORGA)
+ @join__type(graph: ORGA, key: "id")
+ @join__type(graph: USER, key: "id") {
+ id: ID
+ creatorUser: User
+ name: String
+ nonNullId: ID!
+ suborga: [Organization]
+ }"#;
+
+#[derive(Debug)]
+pub(crate) struct Mock1 {
+ set: Mutex,
+}
+
+impl Mock1 {
+ fn new() -> Mock1 {
+ Mock1 {
+ set: Mutex::new(false),
+ }
+ }
+}
+
+static USER_RESPONSE:&str = "{\"control\":{\"created\":1705069368},\"data\":{\"currentUser\":{\"activeOrganization\":{\"__typename\":\"Organization\",\"id\":\"1\"}}}}";
+static ORGA_RESPONSE:&str = "{\"control\":{\"created\":1705072093},\"data\":{\"creatorUser\":{\"__typename\":\"User\",\"id\":2}}}";
+impl Mocks for Mock1 {
+ fn process_command(&self, command: MockCommand) -> Result {
+ println!("received redis command: {command:?}");
+
+ match &*command.cmd {
+ "GET" => {
+ if let Some(RedisValue::Bytes(b)) = command.args.get(0) {
+ if b == &b"subgraph:user:Query:146a735f805c55554b5233253c17756deaa6ffd06696fafa4d6e3186e6efe592:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c"[..]{
+ let set = self.set.lock();
+ if *set {
+ return Ok(RedisValue::Bytes(Bytes::from(USER_RESPONSE)));
+ }
+ } else if b == &b"subgraph:orga:Organization:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:655f22a6af21d7ffe671d3ce4b33464a76ddfea0bf179740b15e804b11983c04:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c"[..] {
+ return Ok(RedisValue::Bytes(Bytes::from(ORGA_RESPONSE)));
+ }
+ }
+ }
+ "SET" => {
+ if let Some(RedisValue::Bytes(b)) = command.args.get(0) {
+ if b ==
+ &b"subgraph:user:Query:146a735f805c55554b5233253c17756deaa6ffd06696fafa4d6e3186e6efe592:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c"[..] {
+ let mut set = self.set.lock();
+ *set = true;
+
+ //FIXME: can't assert because the creatin date changes
+ //assert_eq!(USER_RESPONSE, command.args.get(1).unwrap().as_str().unwrap(), );
+ return Ok(RedisValue::Null)
+ }
+ }
+ }
+ _ => {}
+ }
+ Err(RedisError::new(RedisErrorKind::NotFound, "mock not found"))
+ }
+}
+
+#[tokio::test]
+async fn insert() {
+ let query = "query { currentUser { activeOrganization { id creatorUser { __typename id } } } }";
+
+ let subgraphs = MockedSubgraphs([
+ ("user", MockSubgraph::builder().with_json(
+ serde_json::json!{{"query":"{currentUser{activeOrganization{__typename id}}}"}},
+ serde_json::json!{{"data": {"currentUser": { "activeOrganization": {
+ "__typename": "Organization",
+ "id": "1"
+ } }}}}
+ ).build()),
+ ("orga", MockSubgraph::builder().with_json(
+ serde_json::json!{{
+ "query": "query($representations:[_Any!]!){_entities(representations:$representations){...on Organization{creatorUser{__typename id}}}}",
+ "variables": {
+ "representations": [
+ {
+ "id": "1",
+ "__typename": "Organization",
+ }
+ ]
+ }}},
+ serde_json::json!{{"data": {
+ "_entities": [{
+ "creatorUser": {
+ "__typename": "User",
+ "id": 2
+ }
+ }]
+ }}}
+ ).build())
+ ].into_iter().collect());
+
+ let redis_cache = RedisCacheStorage::from_mocks(Arc::new(Mock1::new()))
+ .await
+ .unwrap();
+ let entity_cache = EntityCache::with_mocks(redis_cache.clone(), HashMap::new())
+ .await
+ .unwrap();
+
+ let service = TestHarness::builder()
+ .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } }))
+ .unwrap()
+ .schema(SCHEMA)
+ .extra_plugin(entity_cache)
+ .extra_plugin(subgraphs)
+ .build_supergraph()
+ .await
+ .unwrap();
+
+ let request = supergraph::Request::fake_builder()
+ .query(query)
+ .context(Context::new())
+ .build()
+ .unwrap();
+ let response = service
+ .oneshot(request)
+ .await
+ .unwrap()
+ .next_response()
+ .await
+ .unwrap();
+
+ insta::assert_json_snapshot!(response);
+
+ // Now testing without any mock subgraphs, all the data should come from the cache
+ let entity_cache = EntityCache::with_mocks(redis_cache.clone(), HashMap::new())
+ .await
+ .unwrap();
+
+ let service = TestHarness::builder()
+ .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } }))
+ .unwrap()
+ .schema(SCHEMA)
+ .extra_plugin(entity_cache)
+ .build_supergraph()
+ .await
+ .unwrap();
+
+ let request = supergraph::Request::fake_builder()
+ .query(query)
+ .context(Context::new())
+ .build()
+ .unwrap();
+ let response = service
+ .oneshot(request)
+ .await
+ .unwrap()
+ .next_response()
+ .await
+ .unwrap();
+
+ insta::assert_json_snapshot!(response);
+}
diff --git a/apollo-router/src/plugins/coprocessor/execution.rs b/apollo-router/src/plugins/coprocessor/execution.rs
new file mode 100644
index 0000000000..9bcbf4ce82
--- /dev/null
+++ b/apollo-router/src/plugins/coprocessor/execution.rs
@@ -0,0 +1,1015 @@
+use std::ops::ControlFlow;
+use std::sync::Arc;
+
+use futures::future;
+use futures::stream;
+use schemars::JsonSchema;
+use serde::Deserialize;
+use serde::Serialize;
+use tower::BoxError;
+use tower::ServiceBuilder;
+use tower_service::Service;
+
+use super::externalize_header_map;
+use super::*;
+use crate::graphql;
+use crate::layers::async_checkpoint::OneShotAsyncCheckpointLayer;
+use crate::layers::ServiceBuilderExt;
+use crate::plugins::coprocessor::EXTERNAL_SPAN_NAME;
+use crate::response;
+use crate::services::execution;
+
+/// What information is passed to a router request/response stage
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)]
+#[serde(default, deny_unknown_fields)]
+pub(super) struct ExecutionRequestConf {
+ /// Send the headers
+ pub(super) headers: bool,
+ /// Send the context
+ pub(super) context: bool,
+ /// Send the body
+ pub(super) body: bool,
+ /// Send the SDL
+ pub(super) sdl: bool,
+ /// Send the method
+ pub(super) method: bool,
+ /// Send the query plan
+ pub(super) query_plan: bool,
+}
+
+/// What information is passed to a router request/response stage
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)]
+#[serde(default, deny_unknown_fields)]
+pub(super) struct ExecutionResponseConf {
+ /// Send the headers
+ pub(super) headers: bool,
+ /// Send the context
+ pub(super) context: bool,
+ /// Send the body
+ pub(super) body: bool,
+ /// Send the SDL
+ pub(super) sdl: bool,
+ /// Send the HTTP status
+ pub(super) status_code: bool,
+}
+
+#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize, JsonSchema)]
+#[serde(default)]
+pub(super) struct ExecutionStage {
+ /// The request configuration
+ pub(super) request: ExecutionRequestConf,
+ // /// The response configuration
+ pub(super) response: ExecutionResponseConf,
+}
+
+impl ExecutionStage {
+ pub(crate) fn as_service(
+ &self,
+ http_client: C,
+ service: execution::BoxService,
+ coprocessor_url: String,
+ sdl: Arc,
+ ) -> execution::BoxService
+ where
+ C: Service, Response = hyper::Response, Error = BoxError>
+ + Clone
+ + Send
+ + Sync
+ + 'static,
+ >>::Future: Send + 'static,
+ {
+ let request_layer = (self.request != Default::default()).then_some({
+ let request_config = self.request.clone();
+ let coprocessor_url = coprocessor_url.clone();
+ let http_client = http_client.clone();
+ let sdl = sdl.clone();
+
+ OneShotAsyncCheckpointLayer::new(move |request: execution::Request| {
+ let request_config = request_config.clone();
+ let coprocessor_url = coprocessor_url.clone();
+ let http_client = http_client.clone();
+ let sdl = sdl.clone();
+
+ async move {
+ let mut succeeded = true;
+ let result = process_execution_request_stage(
+ http_client,
+ coprocessor_url,
+ sdl,
+ request,
+ request_config,
+ )
+ .await
+ .map_err(|error| {
+ succeeded = false;
+ tracing::error!(
+ "external extensibility: execution request stage error: {error}"
+ );
+ error
+ });
+
+ u64_counter!(
+ "apollo.router.operations.coprocessor",
+ "Total operations with co-processors enabled",
+ 1,
+ "coprocessor.stage" = PipelineStep::ExecutionRequest,
+ "coprocessor.succeeded" = succeeded
+ );
+ result
+ }
+ })
+ });
+
+ let response_layer = (self.response != Default::default()).then_some({
+ let response_config = self.response.clone();
+
+ MapFutureLayer::new(move |fut| {
+ let coprocessor_url = coprocessor_url.clone();
+ let sdl: Arc = sdl.clone();
+ let http_client = http_client.clone();
+ let response_config = response_config.clone();
+
+ async move {
+ let response: execution::Response = fut.await?;
+
+ let mut succeeded = true;
+ let result = process_execution_response_stage(
+ http_client,
+ coprocessor_url,
+ sdl,
+ response,
+ response_config,
+ )
+ .await
+ .map_err(|error| {
+ succeeded = false;
+ tracing::error!(
+ "external extensibility: router response stage error: {error}"
+ );
+ error
+ });
+
+ u64_counter!(
+ "apollo.router.operations.coprocessor",
+ "Total operations with co-processors enabled",
+ 1,
+ "coprocessor.stage" = PipelineStep::ExecutionResponse,
+ "coprocessor.succeeded" = succeeded
+ );
+ result
+ }
+ })
+ });
+
+ fn external_service_span() -> impl Fn(&execution::Request) -> tracing::Span + Clone {
+ move |_request: &execution::Request| {
+ tracing::info_span!(
+ EXTERNAL_SPAN_NAME,
+ "external service" = stringify!(execution::Request),
+ "otel.kind" = "INTERNAL"
+ )
+ }
+ }
+
+ ServiceBuilder::new()
+ .instrument(external_service_span())
+ .option_layer(request_layer)
+ .option_layer(response_layer)
+ .service(service)
+ .boxed()
+ }
+}
+
+async fn process_execution_request_stage(
+ http_client: C,
+ coprocessor_url: String,
+ sdl: Arc,
+ mut request: execution::Request,
+ request_config: ExecutionRequestConf,
+) -> Result, BoxError>
+where
+ C: Service, Response = hyper::Response, Error = BoxError>
+ + Clone
+ + Send
+ + Sync
+ + 'static,
+ >>::Future: Send + 'static,
+{
+ // Call into our out of process processor with a body of our body
+ // First, extract the data we need from our request and prepare our
+ // external call. Use our configuration to figure out which data to send.
+ let (parts, body) = request.supergraph_request.into_parts();
+ let bytes = Bytes::from(serde_json::to_vec(&body)?);
+
+ let headers_to_send = request_config
+ .headers
+ .then(|| externalize_header_map(&parts.headers))
+ .transpose()?;
+
+ let body_to_send = request_config
+ .body
+ .then(|| serde_json::from_slice::(&bytes))
+ .transpose()?;
+ let context_to_send = request_config.context.then(|| request.context.clone());
+ let sdl_to_send = request_config.sdl.then(|| sdl.clone().to_string());
+ let method = request_config.method.then(|| parts.method.to_string());
+ let query_plan = request_config
+ .query_plan
+ .then(|| request.query_plan.clone());
+
+ let payload = Externalizable::execution_builder()
+ .stage(PipelineStep::ExecutionRequest)
+ .control(Control::default())
+ .id(request.context.id.clone())
+ .and_headers(headers_to_send)
+ .and_body(body_to_send)
+ .and_context(context_to_send)
+ .and_method(method)
+ .and_sdl(sdl_to_send)
+ .and_query_plan(query_plan)
+ .build();
+
+ tracing::debug!(?payload, "externalized output");
+ let guard = request.context.enter_active_request();
+ let start = Instant::now();
+ let co_processor_result = payload.call(http_client, &coprocessor_url).await;
+ let duration = start.elapsed().as_secs_f64();
+ drop(guard);
+ tracing::info!(
+ histogram.apollo.router.operations.coprocessor.duration = duration,
+ coprocessor.stage = %PipelineStep::ExecutionRequest,
+ );
+
+ tracing::debug!(?co_processor_result, "co-processor returned");
+ let co_processor_output = co_processor_result?;
+ validate_coprocessor_output(&co_processor_output, PipelineStep::ExecutionRequest)?;
+ // unwrap is safe here because validate_coprocessor_output made sure control is available
+ let control = co_processor_output.control.expect("validated above; qed");
+
+ // Thirdly, we need to interpret the control flow which may have been
+ // updated by our co-processor and decide if we should proceed or stop.
+
+ if matches!(control, Control::Break(_)) {
+ // Ensure the code is a valid http status code
+ let code = control.get_http_status()?;
+
+ let res = {
+ let graphql_response: crate::graphql::Response =
+ serde_json::from_value(co_processor_output.body.unwrap_or(serde_json::Value::Null))
+ .unwrap_or_else(|error| {
+ crate::graphql::Response::builder()
+ .errors(vec![Error::builder()
+ .message(format!(
+ "couldn't deserialize coprocessor output body: {error}"
+ ))
+ .extension_code("EXTERNAL_DESERIALIZATION_ERROR")
+ .build()])
+ .build()
+ });
+
+ let mut http_response = http::Response::builder()
+ .status(code)
+ .body(stream::once(future::ready(graphql_response)).boxed())?;
+ if let Some(headers) = co_processor_output.headers {
+ *http_response.headers_mut() = internalize_header_map(headers)?;
+ }
+
+ let execution_response = execution::Response {
+ response: http_response,
+ context: request.context,
+ };
+
+ if let Some(context) = co_processor_output.context {
+ for (key, value) in context.try_into_iter()? {
+ execution_response
+ .context
+ .upsert_json_value(key, move |_current| value);
+ }
+ }
+
+ execution_response
+ };
+ return Ok(ControlFlow::Break(res));
+ }
+
+ // Finally, process our reply and act on the contents. Our processing logic is
+ // that we replace "bits" of our incoming request with the updated bits if they
+ // are present in our co_processor_output.
+
+ let new_body: crate::graphql::Request = match co_processor_output.body {
+ Some(value) => serde_json::from_value(value)?,
+ None => body,
+ };
+
+ request.supergraph_request = http::Request::from_parts(parts, new_body);
+
+ if let Some(context) = co_processor_output.context {
+ for (key, value) in context.try_into_iter()? {
+ request
+ .context
+ .upsert_json_value(key, move |_current| value);
+ }
+ }
+
+ if let Some(headers) = co_processor_output.headers {
+ *request.supergraph_request.headers_mut() = internalize_header_map(headers)?;
+ }
+
+ if let Some(uri) = co_processor_output.uri {
+ *request.supergraph_request.uri_mut() = uri.parse()?;
+ }
+
+ Ok(ControlFlow::Continue(request))
+}
+
+async fn process_execution_response_stage(
+ http_client: C,
+ coprocessor_url: String,
+ sdl: Arc,
+ response: execution::Response,
+ response_config: ExecutionResponseConf,
+) -> Result
+where
+ C: Service, Response = hyper::Response, Error = BoxError>
+ + Clone
+ + Send
+ + Sync
+ + 'static,
+ >>::Future: Send + 'static,
+{
+ // split the response into parts + body
+ let (mut parts, body) = response.response.into_parts();
+
+ // we split the body (which is a stream) into first response + rest of responses,
+ // for which we will implement mapping later
+ let (first, rest): (Option, graphql::ResponseStream) =
+ body.into_future().await;
+
+ // If first is None, we return an error
+ let first = first.ok_or_else(|| {
+ BoxError::from("Coprocessor cannot convert body into future due to problem with first part")
+ })?;
+
+ // Now we process our first chunk of response
+ // Encode headers, body, status, context, sdl to create a payload
+ let headers_to_send = response_config
+ .headers
+ .then(|| externalize_header_map(&parts.headers))
+ .transpose()?;
+ let body_to_send = response_config
+ .body
+ .then(|| serde_json::to_value(&first).expect("serialization will not fail"));
+ let status_to_send = response_config.status_code.then(|| parts.status.as_u16());
+ let context_to_send = response_config.context.then(|| response.context.clone());
+ let sdl_to_send = response_config.sdl.then(|| sdl.clone().to_string());
+
+ let payload = Externalizable::execution_builder()
+ .stage(PipelineStep::ExecutionResponse)
+ .id(response.context.id.clone())
+ .and_headers(headers_to_send)
+ .and_body(body_to_send)
+ .and_context(context_to_send)
+ .and_status_code(status_to_send)
+ .and_sdl(sdl_to_send.clone())
+ .and_has_next(first.has_next)
+ .build();
+
+ // Second, call our co-processor and get a reply.
+ tracing::debug!(?payload, "externalized output");
+ let guard = response.context.enter_active_request();
+ let start = Instant::now();
+ let co_processor_result = payload.call(http_client.clone(), &coprocessor_url).await;
+ let duration = start.elapsed().as_secs_f64();
+ drop(guard);
+ tracing::info!(
+ histogram.apollo.router.operations.coprocessor.duration = duration,
+ coprocessor.stage = %PipelineStep::ExecutionResponse,
+ );
+
+ tracing::debug!(?co_processor_result, "co-processor returned");
+ let co_processor_output = co_processor_result?;
+
+ validate_coprocessor_output(&co_processor_output, PipelineStep::ExecutionResponse)?;
+
+ // Third, process our reply and act on the contents. Our processing logic is
+ // that we replace "bits" of our incoming response with the updated bits if they
+ // are present in our co_processor_output. If they aren't present, just use the
+ // bits that we sent to the co_processor.
+ let new_body: crate::response::Response = match co_processor_output.body {
+ Some(value) => serde_json::from_value(value)?,
+ None => first,
+ };
+
+ if let Some(control) = co_processor_output.control {
+ parts.status = control.get_http_status()?
+ }
+
+ if let Some(context) = co_processor_output.context {
+ for (key, value) in context.try_into_iter()? {
+ response
+ .context
+ .upsert_json_value(key, move |_current| value);
+ }
+ }
+
+ if let Some(headers) = co_processor_output.headers {
+ parts.headers = internalize_header_map(headers)?;
+ }
+
+ // Clone all the bits we need
+ let context = response.context.clone();
+ let map_context = response.context.clone();
+
+ // Map the rest of our body to process subsequent chunks of response
+ let mapped_stream = rest
+ .then(move |deferred_response| {
+ let generator_client = http_client.clone();
+ let generator_coprocessor_url = coprocessor_url.clone();
+ let generator_map_context = map_context.clone();
+ let generator_sdl_to_send = sdl_to_send.clone();
+ let generator_id = map_context.id.clone();
+
+ async move {
+ let body_to_send = response_config.body.then(|| {
+ serde_json::to_value(&deferred_response).expect("serialization will not fail")
+ });
+ let context_to_send = response_config
+ .context
+ .then(|| generator_map_context.clone());
+
+ // Note: We deliberately DO NOT send headers or status_code even if the user has
+ // requested them. That's because they are meaningless on a deferred response and
+ // providing them will be a source of confusion.
+ let payload = Externalizable::execution_builder()
+ .stage(PipelineStep::ExecutionResponse)
+ .id(generator_id)
+ .and_body(body_to_send)
+ .and_context(context_to_send)
+ .and_sdl(generator_sdl_to_send)
+ .and_has_next(deferred_response.has_next)
+ .build();
+
+ // Second, call our co-processor and get a reply.
+ tracing::debug!(?payload, "externalized output");
+ let guard = generator_map_context.enter_active_request();
+ let co_processor_result = payload
+ .call(generator_client, &generator_coprocessor_url)
+ .await;
+ drop(guard);
+ tracing::debug!(?co_processor_result, "co-processor returned");
+ let co_processor_output = co_processor_result?;
+
+ validate_coprocessor_output(&co_processor_output, PipelineStep::ExecutionResponse)?;
+
+ // Third, process our reply and act on the contents. Our processing logic is
+ // that we replace "bits" of our incoming response with the updated bits if they
+ // are present in our co_processor_output. If they aren't present, just use the
+ // bits that we sent to the co_processor.
+ let new_deferred_response: crate::response::Response =
+ match co_processor_output.body {
+ Some(value) => serde_json::from_value(value)?,
+ None => deferred_response,
+ };
+
+ if let Some(context) = co_processor_output.context {
+ for (key, value) in context.try_into_iter()? {
+ generator_map_context.upsert_json_value(key, move |_current| value);
+ }
+ }
+
+ // We return the deferred_response into our stream of response chunks
+ Ok(new_deferred_response)
+ }
+ })
+ .map(|res: Result| match res {
+ Ok(response) => response,
+ Err(e) => {
+ tracing::error!("coprocessor error handling deferred execution response: {e}");
+ response::Response::builder()
+ .error(
+ Error::builder()
+ .message("Internal error handling deferred response")
+ .extension_code("INTERNAL_ERROR")
+ .build(),
+ )
+ .build()
+ }
+ });
+
+ // Create our response stream which consists of our first body chained with the
+ // rest of the responses in our mapped stream.
+ let stream = once(ready(new_body)).chain(mapped_stream).boxed();
+
+ // Finally, return a response which has a Body that wraps our stream of response chunks.
+ Ok(execution::Response {
+ context,
+ response: http::Response::from_parts(parts, stream),
+ })
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use futures::future::BoxFuture;
+ use http::StatusCode;
+ use hyper::Body;
+ use serde_json::json;
+ use tower::BoxError;
+ use tower::ServiceExt;
+
+ use super::super::*;
+ use super::*;
+ use crate::plugin::test::MockExecutionService;
+ use crate::plugin::test::MockHttpClientService;
+ use crate::services::execution;
+
+ #[allow(clippy::type_complexity)]
+ pub(crate) fn mock_with_callback(
+ callback: fn(
+ hyper::Request,
+ ) -> BoxFuture<'static, Result, BoxError>>,
+ ) -> MockHttpClientService {
+ let mut mock_http_client = MockHttpClientService::new();
+ mock_http_client.expect_clone().returning(move || {
+ let mut mock_http_client = MockHttpClientService::new();
+
+ mock_http_client.expect_clone().returning(move || {
+ let mut mock_http_client = MockHttpClientService::new();
+ mock_http_client.expect_call().returning(callback);
+ mock_http_client
+ });
+ mock_http_client
+ });
+
+ mock_http_client
+ }
+
+ #[allow(clippy::type_complexity)]
+ fn mock_with_deferred_callback(
+ callback: fn(
+ hyper::Request,
+ ) -> BoxFuture<'static, Result, BoxError>>,
+ ) -> MockHttpClientService {
+ let mut mock_http_client = MockHttpClientService::new();
+ mock_http_client.expect_clone().returning(move || {
+ let mut mock_http_client = MockHttpClientService::new();
+ mock_http_client.expect_clone().returning(move || {
+ let mut mock_http_client = MockHttpClientService::new();
+ mock_http_client.expect_clone().returning(move || {
+ let mut mock_http_client = MockHttpClientService::new();
+ mock_http_client.expect_call().returning(callback);
+ mock_http_client
+ });
+ mock_http_client
+ });
+ mock_http_client
+ });
+
+ mock_http_client
+ }
+
+ #[tokio::test]
+ async fn external_plugin_execution_request() {
+ let execution_stage = ExecutionStage {
+ request: ExecutionRequestConf {
+ headers: false,
+ context: false,
+ body: true,
+ sdl: false,
+ method: false,
+ query_plan: false,
+ },
+ response: Default::default(),
+ };
+
+ // This will never be called because we will fail at the coprocessor.
+ let mut mock_execution_service = MockExecutionService::new();
+
+ mock_execution_service
+ .expect_call()
+ .returning(|req: execution::Request| {
+ // Let's assert that the subgraph request has been transformed as it should have.
+ assert_eq!(
+ req.supergraph_request.headers().get("cookie").unwrap(),
+ "tasty_cookie=strawberry"
+ );
+
+ assert_eq!(
+ req.context
+ .get::<&str, u8>("this-is-a-test-context")
+ .unwrap()
+ .unwrap(),
+ 42
+ );
+
+ // The subgraph uri should have changed
+ assert_eq!(
+ Some("MyQuery"),
+ req.supergraph_request.body().operation_name.as_deref()
+ );
+
+ // The query should have changed
+ assert_eq!(
+ "query Long {\n me {\n name\n}\n}",
+ req.supergraph_request.body().query.as_ref().unwrap()
+ );
+
+ Ok(execution::Response::builder()
+ .data(json!({ "test": 1234_u32 }))
+ .errors(Vec::new())
+ .extensions(crate::json_ext::Object::new())
+ .context(req.context)
+ .build()
+ .unwrap())
+ });
+
+ let mock_http_client = mock_with_callback(move |_: hyper::Request| {
+ Box::pin(async {
+ Ok(hyper::Response::builder()
+ .body(Body::from(
+ r#"{
+ "version": 1,
+ "stage": "ExecutionRequest",
+ "control": "continue",
+ "headers": {
+ "cookie": [
+ "tasty_cookie=strawberry"
+ ],
+ "content-type": [
+ "application/json"
+ ],
+ "host": [
+ "127.0.0.1:4000"
+ ],
+ "apollo-federation-include-trace": [
+ "ftv1"
+ ],
+ "apollographql-client-name": [
+ "manual"
+ ],
+ "accept": [
+ "*/*"
+ ],
+ "user-agent": [
+ "curl/7.79.1"
+ ],
+ "content-length": [
+ "46"
+ ]
+ },
+ "body": {
+ "query": "query Long {\n me {\n name\n}\n}",
+ "operationName": "MyQuery"
+ },
+ "context": {
+ "entries": {
+ "accepts-json": false,
+ "accepts-wildcard": true,
+ "accepts-multipart": false,
+ "this-is-a-test-context": 42
+ }
+ },
+ "serviceName": "service name shouldn't change",
+ "uri": "http://thisurihaschanged"
+ }"#,
+ ))
+ .unwrap())
+ })
+ });
+
+ let service = execution_stage.as_service(
+ mock_http_client,
+ mock_execution_service.boxed(),
+ "http://test".to_string(),
+ Arc::new("".to_string()),
+ );
+
+ let request = execution::Request::fake_builder().build();
+
+ assert_eq!(
+ serde_json_bytes::json!({ "test": 1234_u32 }),
+ service
+ .oneshot(request)
+ .await
+ .unwrap()
+ .response
+ .into_body()
+ .next()
+ .await
+ .unwrap()
+ .data
+ .unwrap()
+ );
+ }
+
+ #[tokio::test]
+ async fn external_plugin_execution_request_controlflow_break() {
+ let execution_stage = ExecutionStage {
+ request: ExecutionRequestConf {
+ headers: false,
+ context: false,
+ body: true,
+ sdl: false,
+ method: false,
+ query_plan: false,
+ },
+ response: Default::default(),
+ };
+
+ // This will never be called because we will fail at the coprocessor.
+ let mock_execution_service = MockExecutionService::new();
+
+ let mock_http_client = mock_with_callback(move |_: hyper::Request| {
+ Box::pin(async {
+ Ok(hyper::Response::builder()
+ .body(Body::from(
+ r#"{
+ "version": 1,
+ "stage": "ExecutionRequest",
+ "control": {
+ "break": 200
+ },
+ "body": {
+ "errors": [{ "message": "my error message" }]
+ },
+ "context": {
+ "entries": {
+ "testKey": true
+ }
+ },
+ "headers": {
+ "aheader": ["a value"]
+ }
+ }"#,
+ ))
+ .unwrap())
+ })
+ });
+
+ let service = execution_stage.as_service(
+ mock_http_client,
+ mock_execution_service.boxed(),
+ "http://test".to_string(),
+ Arc::new("".to_string()),
+ );
+
+ let request = execution::Request::fake_builder().build();
+
+ let crate::services::execution::Response {
+ mut response,
+ context,
+ } = service.oneshot(request).await.unwrap();
+
+ assert!(context.get::<_, bool>("testKey").unwrap().unwrap());
+
+ let value = response.headers().get("aheader").unwrap();
+
+ assert_eq!(value, "a value");
+
+ assert_eq!(
+ response.body_mut().next().await.unwrap().errors[0]
+ .message
+ .as_str(),
+ "my error message"
+ );
+ }
+
+ #[tokio::test]
+ async fn external_plugin_execution_response() {
+ let execution_stage = ExecutionStage {
+ response: ExecutionResponseConf {
+ headers: true,
+ context: true,
+ body: true,
+ sdl: true,
+ status_code: false,
+ },
+ request: Default::default(),
+ };
+
+ let mut mock_execution_service = MockExecutionService::new();
+
+ mock_execution_service
+ .expect_call()
+ .returning(|req: execution::Request| {
+ Ok(execution::Response::builder()
+ .data(json!({ "test": 1234_u32 }))
+ .errors(Vec::new())
+ .extensions(crate::json_ext::Object::new())
+ .context(req.context)
+ .build()
+ .unwrap())
+ });
+
+ let mock_http_client = mock_with_deferred_callback(move |res: hyper::Request| {
+ Box::pin(async {
+ let deserialized_response: Externalizable =
+ serde_json::from_slice(&hyper::body::to_bytes(res.into_body()).await.unwrap())
+ .unwrap();
+
+ assert_eq!(EXTERNALIZABLE_VERSION, deserialized_response.version);
+ assert_eq!(
+ PipelineStep::ExecutionResponse.to_string(),
+ deserialized_response.stage
+ );
+
+ assert_eq!(
+ json! {{"data":{ "test": 1234_u32 }}},
+ deserialized_response.body.unwrap()
+ );
+
+ let input = json!(
+ {
+ "version": 1,
+ "stage": "ExecutionResponse",
+ "control": {
+ "break": 400
+ },
+ "id": "1b19c05fdafc521016df33148ad63c1b",
+ "headers": {
+ "cookie": [
+ "tasty_cookie=strawberry"
+ ],
+ "content-type": [
+ "application/json"
+ ],
+ "host": [
+ "127.0.0.1:4000"
+ ],
+ "apollo-federation-include-trace": [
+ "ftv1"
+ ],
+ "apollographql-client-name": [
+ "manual"
+ ],
+ "accept": [
+ "*/*"
+ ],
+ "user-agent": [
+ "curl/7.79.1"
+ ],
+ "content-length": [
+ "46"
+ ]
+ },
+ "body": {
+ "data": { "test": 42 }
+ },
+ "context": {
+ "entries": {
+ "accepts-json": false,
+ "accepts-wildcard": true,
+ "accepts-multipart": false,
+ "this-is-a-test-context": 42
+ }
+ },
+ "sdl": "the sdl shouldn't change"
+ });
+ Ok(hyper::Response::builder()
+ .body(Body::from(serde_json::to_string(&input).unwrap()))
+ .unwrap())
+ })
+ });
+
+ let service = execution_stage.as_service(
+ mock_http_client,
+ mock_execution_service.boxed(),
+ "http://test".to_string(),
+ Arc::new("".to_string()),
+ );
+
+ let request = execution::Request::fake_builder().build();
+
+ let mut res = service.oneshot(request).await.unwrap();
+
+ // Let's assert that the router request has been transformed as it should have.
+ assert_eq!(res.response.status(), StatusCode::BAD_REQUEST);
+ assert_eq!(
+ res.response.headers().get("cookie").unwrap(),
+ "tasty_cookie=strawberry"
+ );
+
+ assert_eq!(
+ res.context
+ .get::<&str, u8>("this-is-a-test-context")
+ .unwrap()
+ .unwrap(),
+ 42
+ );
+
+ let body = res.response.body_mut().next().await.unwrap();
+ // the body should have changed:
+ assert_eq!(
+ serde_json::to_value(&body).unwrap(),
+ json!({ "data": { "test": 42_u32 } }),
+ );
+ }
+
+ #[tokio::test]
+ async fn multi_part() {
+ let execution_stage = ExecutionStage {
+ response: ExecutionResponseConf {
+ headers: true,
+ context: true,
+ body: true,
+ sdl: true,
+ status_code: false,
+ },
+ request: Default::default(),
+ };
+
+ let mut mock_execution_service = MockExecutionService::new();
+
+ mock_execution_service
+ .expect_call()
+ .returning(|req: execution::Request| {
+ Ok(execution::Response::fake_stream_builder()
+ .response(
+ graphql::Response::builder()
+ .data(json!({ "test": 1 }))
+ .has_next(true)
+ .build(),
+ )
+ .response(
+ graphql::Response::builder()
+ .data(json!({ "test": 2 }))
+ .has_next(true)
+ .build(),
+ )
+ .response(
+ graphql::Response::builder()
+ .data(json!({ "test": 3 }))
+ .has_next(false)
+ .build(),
+ )
+ .context(req.context)
+ .build()
+ .unwrap())
+ });
+
+ let mock_http_client = mock_with_deferred_callback(move |res: hyper::Request| {
+ Box::pin(async {
+ let mut deserialized_response: Externalizable =
+ serde_json::from_slice(&hyper::body::to_bytes(res.into_body()).await.unwrap())
+ .unwrap();
+ assert_eq!(EXTERNALIZABLE_VERSION, deserialized_response.version);
+ assert_eq!(
+ PipelineStep::ExecutionResponse.to_string(),
+ deserialized_response.stage
+ );
+
+ // Copy the has_next from the body into the data for checking later
+ deserialized_response
+ .body
+ .as_mut()
+ .unwrap()
+ .as_object_mut()
+ .unwrap()
+ .get_mut("data")
+ .unwrap()
+ .as_object_mut()
+ .unwrap()
+ .insert(
+ "has_next".to_string(),
+ serde_json::Value::from(deserialized_response.has_next.unwrap_or_default()),
+ );
+
+ Ok(hyper::Response::builder()
+ .body(Body::from(
+ serde_json::to_string(&deserialized_response).unwrap_or_default(),
+ ))
+ .unwrap())
+ })
+ });
+
+ let service = execution_stage.as_service(
+ mock_http_client,
+ mock_execution_service.boxed(),
+ "http://test".to_string(),
+ Arc::new("".to_string()),
+ );
+
+ let request = execution::Request::fake_builder()
+ //.query("foo")
+ .build();
+
+ let mut res = service.oneshot(request).await.unwrap();
+
+ let body = res.response.body_mut().next().await.unwrap();
+ assert_eq!(
+ serde_json::to_value(&body).unwrap(),
+ json!({ "data": { "test": 1, "has_next": true }, "hasNext": true }),
+ );
+ let body = res.response.body_mut().next().await.unwrap();
+ assert_eq!(
+ serde_json::to_value(&body).unwrap(),
+ json!({ "data": { "test": 2, "has_next": true }, "hasNext": true }),
+ );
+ let body = res.response.body_mut().next().await.unwrap();
+ assert_eq!(
+ serde_json::to_value(&body).unwrap(),
+ json!({ "data": { "test": 3, "has_next": false }, "hasNext": false }),
+ );
+ }
+}
diff --git a/apollo-router/src/plugins/coprocessor/mod.rs b/apollo-router/src/plugins/coprocessor/mod.rs
index d32943aba4..a79ff4da2b 100644
--- a/apollo-router/src/plugins/coprocessor/mod.rs
+++ b/apollo-router/src/plugins/coprocessor/mod.rs
@@ -51,6 +51,7 @@ use crate::services::trust_dns_connector::AsyncHyperResolver;
#[cfg(test)]
mod test;
+mod execution;
mod supergraph;
pub(crate) const EXTERNAL_SPAN_NAME: &str = "external_plugin";
@@ -105,6 +106,13 @@ impl Plugin for CoprocessorPlugin {
self.supergraph_service(service)
}
+ fn execution_service(
+ &self,
+ service: services::execution::BoxService,
+ ) -> services::execution::BoxService {
+ self.execution_service(service)
+ }
+
fn subgraph_service(&self, name: &str, service: subgraph::BoxService) -> subgraph::BoxService {
self.subgraph_service(name, service)
}
@@ -180,6 +188,18 @@ where
)
}
+ fn execution_service(
+ &self,
+ service: services::execution::BoxService,
+ ) -> services::execution::BoxService {
+ self.configuration.execution.as_service(
+ self.http_client.clone(),
+ service,
+ self.configuration.url.clone(),
+ self.sdl.clone(),
+ )
+ }
+
fn subgraph_service(&self, name: &str, service: subgraph::BoxService) -> subgraph::BoxService {
self.configuration.subgraph.all.as_service(
self.http_client.clone(),
@@ -273,6 +293,9 @@ struct Conf {
/// The supergraph stage request/response configuration
#[serde(default)]
supergraph: supergraph::SupergraphStage,
+ /// The execution stage request/response configuration
+ #[serde(default)]
+ execution: execution::ExecutionStage,
/// The subgraph stage request/response configuration
#[serde(default)]
subgraph: SubgraphStages,
diff --git a/apollo-router/src/plugins/headers.rs b/apollo-router/src/plugins/headers.rs
index 5be415624c..73a15a0b13 100644
--- a/apollo-router/src/plugins/headers.rs
+++ b/apollo-router/src/plugins/headers.rs
@@ -1,11 +1,15 @@
use std::collections::HashMap;
+use std::collections::HashSet;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
use access_json::JSONQuery;
use http::header::HeaderName;
+use http::header::ACCEPT;
+use http::header::ACCEPT_ENCODING;
use http::header::CONNECTION;
+use http::header::CONTENT_ENCODING;
use http::header::CONTENT_LENGTH;
use http::header::CONTENT_TYPE;
use http::header::HOST;
@@ -16,7 +20,6 @@ use http::header::TRAILER;
use http::header::TRANSFER_ENCODING;
use http::header::UPGRADE;
use http::HeaderValue;
-use lazy_static::lazy_static;
use regex::Regex;
use schemars::JsonSchema;
use serde::Deserialize;
@@ -235,11 +238,15 @@ impl Plugin for Headers {
struct HeadersLayer {
operations: Arc>,
+ reserved_headers: Arc>,
}
impl HeadersLayer {
fn new(operations: Arc>) -> Self {
- Self { operations }
+ Self {
+ operations,
+ reserved_headers: Arc::new(RESERVED_HEADERS.iter().collect()),
+ }
}
}
@@ -250,35 +257,37 @@ impl Layer for HeadersLayer {
HeadersService {
inner,
operations: self.operations.clone(),
+ reserved_headers: self.reserved_headers.clone(),
}
}
}
struct HeadersService {
inner: S,
operations: Arc>,
+ reserved_headers: Arc>,
}
-lazy_static! {
- // Headers from https://datatracker.ietf.org/doc/html/rfc2616#section-13.5.1
- // These are not propagated by default using a regex match as they will not make sense for the
- // second hop.
- // In addition because our requests are not regular proxy requests content-type, content-length
- // and host are also in the exclude list.
- static ref RESERVED_HEADERS: Vec = [
- CONNECTION,
- PROXY_AUTHENTICATE,
- PROXY_AUTHORIZATION,
- TE,
- TRAILER,
- TRANSFER_ENCODING,
- UPGRADE,
- CONTENT_LENGTH,
- CONTENT_TYPE,
- HOST,
- HeaderName::from_static("keep-alive")
- ]
- .into();
-}
+// Headers from https://datatracker.ietf.org/doc/html/rfc2616#section-13.5.1
+// These are not propagated by default using a regex match as they will not make sense for the
+// second hop.
+// In addition because our requests are not regular proxy requests content-type, content-length
+// and host are also in the exclude list.
+static RESERVED_HEADERS: [HeaderName; 14] = [
+ CONNECTION,
+ PROXY_AUTHENTICATE,
+ PROXY_AUTHORIZATION,
+ TE,
+ TRAILER,
+ TRANSFER_ENCODING,
+ UPGRADE,
+ CONTENT_LENGTH,
+ CONTENT_TYPE,
+ CONTENT_ENCODING,
+ HOST,
+ ACCEPT,
+ ACCEPT_ENCODING,
+ HeaderName::from_static("keep-alive"),
+];
impl Service for HeadersService
where
@@ -293,6 +302,15 @@ where
}
fn call(&mut self, mut req: SubgraphRequest) -> Self::Future {
+ self.modify_request(&mut req);
+ self.inner.call(req)
+ }
+}
+
+impl HeadersService {
+ fn modify_request(&self, req: &mut SubgraphRequest) {
+ let mut already_propagated: HashSet<&str> = HashSet::new();
+
for operation in &*self.operations {
match operation {
Operation::Insert(insert_config) => match insert_config {
@@ -358,7 +376,7 @@ where
.drain()
.filter_map(|(name, value)| {
name.and_then(|name| {
- (RESERVED_HEADERS.contains(&name)
+ (self.reserved_headers.contains(&name)
|| !matching.is_match(name.as_str()))
.then_some((name, value))
})
@@ -372,33 +390,55 @@ where
rename,
default,
}) => {
- let headers = req.subgraph_request.headers_mut();
- let values = req.supergraph_request.headers().get_all(named);
- if values.iter().count() == 0 {
- if let Some(default) = default {
- headers.append(rename.as_ref().unwrap_or(named), default.clone());
- }
- } else {
- for value in values {
- headers.append(rename.as_ref().unwrap_or(named), value.clone());
+ if !already_propagated.contains(named.as_str()) {
+ let headers = req.subgraph_request.headers_mut();
+ let values = req.supergraph_request.headers().get_all(named);
+ if values.iter().count() == 0 {
+ if let Some(default) = default {
+ headers.append(rename.as_ref().unwrap_or(named), default.clone());
+ }
+ } else {
+ for value in values {
+ headers.append(rename.as_ref().unwrap_or(named), value.clone());
+ }
}
+ already_propagated.insert(named.as_str());
}
}
Operation::Propagate(Propagate::Matching { matching }) => {
+ let mut previous_name = None;
let headers = req.subgraph_request.headers_mut();
req.supergraph_request
.headers()
.iter()
.filter(|(name, _)| {
- !RESERVED_HEADERS.contains(name) && matching.is_match(name.as_str())
+ !self.reserved_headers.contains(*name)
+ && matching.is_match(name.as_str())
})
.for_each(|(name, value)| {
- headers.append(name, value.clone());
+ if !already_propagated.contains(name.as_str()) {
+ headers.append(name, value.clone());
+
+ // we have to this because don't want to propagate headers that are accounted for in the
+ // `already_propagated` set, but in the iteration here we might go through the same header
+ // multiple times
+ match previous_name {
+ None => previous_name = Some(name),
+ Some(previous) => {
+ if previous != name {
+ already_propagated.insert(previous.as_str());
+ previous_name = Some(name);
+ }
+ }
+ }
+ }
});
+ if let Some(name) = previous_name {
+ already_propagated.insert(name.as_str());
+ }
}
}
}
- self.inner.call(req)
}
}
@@ -752,6 +792,140 @@ mod test {
Ok(())
}
+ #[tokio::test]
+ async fn test_propagate_reserved() -> Result<(), BoxError> {
+ let service = HeadersService {
+ inner: MockSubgraphService::new(),
+ operations: Arc::new(vec![Operation::Propagate(Propagate::Matching {
+ matching: Regex::from_str(".*")?,
+ })]),
+ reserved_headers: Arc::new(RESERVED_HEADERS.iter().collect()),
+ };
+
+ let mut request = SubgraphRequest {
+ supergraph_request: Arc::new(
+ http::Request::builder()
+ .header("da", "vda")
+ .header("db", "vdb")
+ .header("db", "vdb")
+ .header("db", "vdb2")
+ .header(HOST, "host")
+ .header(CONTENT_LENGTH, "2")
+ .header(CONTENT_TYPE, "graphql")
+ .header(CONTENT_ENCODING, "identity")
+ .header(ACCEPT, "application/json")
+ .header(ACCEPT_ENCODING, "gzip")
+ .body(
+ Request::builder()
+ .query("query")
+ .operation_name("my_operation_name")
+ .build(),
+ )
+ .expect("expecting valid request"),
+ ),
+ subgraph_request: http::Request::builder()
+ .header("aa", "vaa")
+ .header("ab", "vab")
+ .header("ac", "vac")
+ .header(HOST, "rhost")
+ .header(CONTENT_LENGTH, "22")
+ .header(CONTENT_TYPE, "graphql")
+ .body(Request::builder().query("query").build())
+ .expect("expecting valid request"),
+ operation_kind: OperationKind::Query,
+ context: Context::new(),
+ subgraph_name: String::from("test").into(),
+ subscription_stream: None,
+ connection_closed_signal: None,
+ query_hash: Default::default(),
+ authorization: Default::default(),
+ };
+ service.modify_request(&mut request);
+ let headers = request
+ .subgraph_request
+ .headers()
+ .iter()
+ .map(|(name, value)| (name.as_str(), value.to_str().unwrap()))
+ .collect::>();
+ assert_eq!(
+ headers,
+ vec![
+ ("aa", "vaa"),
+ ("ab", "vab"),
+ ("ac", "vac"),
+ ("host", "rhost"),
+ ("content-length", "22"),
+ ("content-type", "graphql"),
+ ("da", "vda"),
+ ("db", "vdb"),
+ ("db", "vdb"),
+ ("db", "vdb2"),
+ ]
+ );
+
+ Ok(())
+ }
+
+ #[tokio::test]
+ async fn test_propagate_multiple_matching_rules() -> Result<(), BoxError> {
+ let service = HeadersService {
+ inner: MockSubgraphService::new(),
+ operations: Arc::new(vec![
+ Operation::Propagate(Propagate::Named {
+ named: HeaderName::from_static("dc"),
+ rename: None,
+ default: None,
+ }),
+ Operation::Propagate(Propagate::Matching {
+ matching: Regex::from_str("dc")?,
+ }),
+ ]),
+ reserved_headers: Arc::new(RESERVED_HEADERS.iter().collect()),
+ };
+
+ let mut request = SubgraphRequest {
+ supergraph_request: Arc::new(
+ http::Request::builder()
+ .header("da", "vda")
+ .header("db", "vdb")
+ .header("dc", "vdb2")
+ .body(
+ Request::builder()
+ .query("query")
+ .operation_name("my_operation_name")
+ .build(),
+ )
+ .expect("expecting valid request"),
+ ),
+ subgraph_request: http::Request::builder()
+ .header("aa", "vaa")
+ .header("ab", "vab")
+ .header("ac", "vac")
+ .body(Request::builder().query("query").build())
+ .expect("expecting valid request"),
+ operation_kind: OperationKind::Query,
+ context: Context::new(),
+ subgraph_name: String::from("test").into(),
+ subscription_stream: None,
+ connection_closed_signal: None,
+ query_hash: Default::default(),
+ authorization: Default::default(),
+ };
+ service.modify_request(&mut request);
+ let headers = request
+ .subgraph_request
+ .headers()
+ .iter()
+ .map(|(name, value)| (name.as_str(), value.to_str().unwrap()))
+ .collect::>();
+ assert_eq!(
+ headers,
+ vec![("aa", "vaa"), ("ab", "vab"), ("ac", "vac"), ("dc", "vdb2"),]
+ );
+
+ Ok(())
+ }
+
fn example_response(_: SubgraphRequest) -> Result {
Ok(SubgraphResponse::new_from_response(
http::Response::default(),
diff --git a/apollo-router/src/plugins/include_subgraph_errors.rs b/apollo-router/src/plugins/include_subgraph_errors.rs
index 72abeac89e..ae3d25b061 100644
--- a/apollo-router/src/plugins/include_subgraph_errors.rs
+++ b/apollo-router/src/plugins/include_subgraph_errors.rs
@@ -197,7 +197,7 @@ mod test {
let mut builder = PluggableSupergraphServiceBuilder::new(planner);
- let plugins = create_plugins(&Configuration::default(), &schema, None)
+ let plugins = create_plugins(&Configuration::default(), &schema, None, None)
.await
.unwrap();
diff --git a/apollo-router/src/plugins/mod.rs b/apollo-router/src/plugins/mod.rs
index a0fdcd2c03..6a0ec63274 100644
--- a/apollo-router/src/plugins/mod.rs
+++ b/apollo-router/src/plugins/mod.rs
@@ -30,6 +30,7 @@ mod forbid_mutations;
mod headers;
mod include_subgraph_errors;
pub(crate) mod override_url;
+pub(crate) mod progressive_override;
mod record_replay;
pub(crate) mod rhai;
pub(crate) mod subscription;
diff --git a/apollo-router/src/plugins/progressive_override/mod.rs b/apollo-router/src/plugins/progressive_override/mod.rs
new file mode 100644
index 0000000000..a339e99d35
--- /dev/null
+++ b/apollo-router/src/plugins/progressive_override/mod.rs
@@ -0,0 +1,276 @@
+use std::collections::HashMap;
+use std::collections::HashSet;
+use std::sync::Arc;
+
+use apollo_compiler::schema::ExtendedType;
+use apollo_compiler::Schema;
+use dashmap::DashMap;
+use schemars::JsonSchema;
+use serde::Deserialize;
+use sha2::Digest;
+use sha2::Sha256;
+use tower::BoxError;
+use tower::ServiceBuilder;
+use tower::ServiceExt;
+
+use self::layers::query_analysis::ParsedDocument;
+use self::visitor::OverrideLabelVisitor;
+use crate::plugin::Plugin;
+use crate::plugin::PluginInit;
+use crate::register_plugin;
+use crate::services::*;
+use crate::spec;
+use crate::spec::query::traverse;
+
+pub(crate) mod visitor;
+pub(crate) const UNRESOLVED_LABELS_KEY: &str = "apollo_override::unresolved_labels";
+pub(crate) const LABELS_TO_OVERRIDE_KEY: &str = "apollo_override::labels_to_override";
+
+pub(crate) const JOIN_FIELD_DIRECTIVE_NAME: &str = "join__field";
+pub(crate) const JOIN_SPEC_BASE_URL: &str = "https://specs.apollo.dev/join";
+pub(crate) const JOIN_SPEC_VERSION_RANGE: &str = ">=0.4.0, <=0.4.0";
+pub(crate) const OVERRIDE_LABEL_ARG_NAME: &str = "overrideLabel";
+
+/// Configuration for the progressive override plugin
+#[derive(Debug, Default, Deserialize, JsonSchema)]
+pub(crate) struct Config {}
+
+pub(crate) struct ProgressiveOverridePlugin {
+ enabled: bool,
+ schema: Schema,
+ labels_from_schema: LabelsFromSchema,
+ // We have to visit each operation to find out which labels from the schema
+ // are relevant for any given operation. This allows us to minimize the
+ // number of labels we ultimately send to the query planner. Since these
+ // labels are a component of the query plan cache key, it's important we
+ // don't "overprovide" any labels, since doing so can explode the number of
+ // cache entries per operation.
+ labels_per_operation_cache: Arc>>>,
+}
+
+type LabelsFromSchema = (
+ Arc, Arc>>,
+ Arc>>,
+);
+
+fn collect_labels_from_schema(schema: &Schema) -> LabelsFromSchema {
+ let Some(join_field_directive_name_in_schema) = spec::Schema::directive_name(
+ schema,
+ JOIN_SPEC_BASE_URL,
+ JOIN_SPEC_VERSION_RANGE,
+ JOIN_FIELD_DIRECTIVE_NAME,
+ ) else {
+ tracing::debug!("No join spec >=v0.4 found in the schema. No labels will be overridden.");
+ return (Arc::new(HashMap::new()), Arc::new(HashSet::new()));
+ };
+
+ let all_override_labels = schema
+ .types
+ .values()
+ .filter_map(|extended_type| {
+ if let ExtendedType::Object(object_type) = extended_type {
+ Some(object_type)
+ } else {
+ None
+ }
+ })
+ .flat_map(|object_type| &object_type.fields)
+ .filter_map(|(_, field)| {
+ let join_field_directives = field
+ .directives
+ .iter()
+ .filter(|d| d.name.as_str() == join_field_directive_name_in_schema)
+ .collect::>();
+ if !join_field_directives.is_empty() {
+ Some(join_field_directives)
+ } else {
+ None
+ }
+ })
+ .flatten()
+ .filter_map(|join_directive| {
+ if let Some(override_label_arg) =
+ join_directive.argument_by_name(OVERRIDE_LABEL_ARG_NAME)
+ {
+ override_label_arg
+ .as_str()
+ .map(|str| Arc::new(str.to_string()))
+ } else {
+ None
+ }
+ })
+ .collect::>();
+
+ let (percentages, other_labels): (HashSet<_>, HashSet<_>) = all_override_labels
+ .into_iter()
+ .partition(|label| label.starts_with("percent("));
+
+ let static_percentages = percentages
+ .into_iter()
+ .filter_map(|unparsed_label| {
+ unparsed_label
+ .strip_prefix("percent(")
+ .and_then(|unparsed_label| unparsed_label.strip_suffix(')'))
+ .and_then(|percent_as_string| percent_as_string.parse::().ok())
+ .map(|parsed_float| (Arc::new(unparsed_label.to_string()), Arc::new(parsed_float)))
+ })
+ .collect::>();
+
+ tracing::debug!("static_percentages: {:?}", &static_percentages);
+ (Arc::new(static_percentages), Arc::new(other_labels))
+}
+
+#[async_trait::async_trait]
+impl Plugin for ProgressiveOverridePlugin {
+ type Config = Config;
+
+ async fn new(init: PluginInit) -> Result {
+ let schema = Schema::parse(&*init.supergraph_sdl, "schema.graphql")
+ .expect("Unexpectedly failed to parse supergraph");
+ let labels_from_schema = collect_labels_from_schema(&schema);
+ let enabled = !labels_from_schema.0.is_empty() || !labels_from_schema.1.is_empty();
+ Ok(ProgressiveOverridePlugin {
+ enabled,
+ schema,
+ labels_from_schema,
+ // we have to visit each operation to find out which labels from the schema are relevant.
+ labels_per_operation_cache: Arc::new(DashMap::new()),
+ })
+ }
+
+ // Add all arbitrary labels (non-percentage-based labels) from the schema to
+ // the context so coprocessors can resolve their values
+ fn router_service(&self, service: router::BoxService) -> router::BoxService {
+ if !self.enabled {
+ service
+ } else {
+ let (_, arbitrary_labels) = self.labels_from_schema.clone();
+ ServiceBuilder::new()
+ .map_request(move |request: router::Request| {
+ let _ = request
+ .context
+ .insert(UNRESOLVED_LABELS_KEY, arbitrary_labels.clone());
+ request
+ })
+ .service(service)
+ .boxed()
+ }
+ }
+
+ // Here we'll do a few things:
+ // 1. "Roll the dice" for all of our percentage-based labels and collect the
+ // subset that will be enabled for this request
+ // 2. Collect any externally-resolved labels from the context
+ // 3. Filter the set of labels to only those that are relevant to the
+ // operation
+ // 4. Add the filtered, sorted set of labels to the context for use by the
+ // query planner
+ fn supergraph_service(&self, service: supergraph::BoxService) -> supergraph::BoxService {
+ if !self.enabled {
+ service
+ } else {
+ let (percentage_labels, _) = self.labels_from_schema.clone();
+ let labels_per_operation_cache = self.labels_per_operation_cache.clone();
+
+ let schema = self.schema.clone();
+ ServiceBuilder::new()
+ .map_request(move |request: supergraph::Request| {
+ // evaluate each percentage-based label in the schema
+ let percentage_override_labels =
+ percentage_labels.iter().filter_map(|(label, percentage)| {
+ if rand::random::() * 100.0 >= **percentage {
+ None
+ } else {
+ Some(label.clone())
+ }
+ });
+
+ // collect any externally-resolved labels from the context
+ let externally_overridden_labels = request
+ .context
+ .get::<_, Vec>>(LABELS_TO_OVERRIDE_KEY)
+ .unwrap_or_default()
+ .unwrap_or_default();
+
+ let crate::graphql::Request {query, operation_name, ..} = request.supergraph_request.body();
+ let operation_hash = hash_operation(query, operation_name);
+
+ let maybe_parsed_doc = request.context.extensions().lock().get::().cloned();
+ if let Some(parsed_doc) = maybe_parsed_doc {
+ // we have to visit the operation to find out which subset
+ // of labels are relevant unless we've already cached that
+ // work
+ let relevant_labels = labels_per_operation_cache
+ .entry(operation_hash)
+ .or_insert_with(|| {
+ OverrideLabelVisitor::new(&schema)
+ .map(|mut visitor| {
+ let _ = traverse::document(&mut visitor, &parsed_doc.ast);
+ visitor.override_labels.into_iter().collect::>()
+ })
+ .unwrap_or_default()
+ })
+ .clone();
+
+ if !relevant_labels.is_empty() {
+ u64_counter!(
+ "apollo.router.operations.override.query",
+ "query with overridden fields",
+ 1,
+ query.label_count = relevant_labels.len() as i64
+ );
+ }
+
+ if !externally_overridden_labels.is_empty() {
+ u64_counter!(
+ "apollo.router.operations.override.external",
+ "override label(s) resolved by coprocessor/rhai",
+ 1
+ );
+ }
+
+ // the intersection of all provided labels (percentage and
+ // external) and the labels relevant to this operation is
+ // the set of labels we'll send to the query planner
+ let mut overridden_labels_for_operation = percentage_override_labels
+ .chain(externally_overridden_labels)
+ .filter(|l| relevant_labels.contains(l))
+ .collect::>();
+ overridden_labels_for_operation.sort();
+ // note: this only dedupes as expected since the vec is
+ // sorted immediately before
+ overridden_labels_for_operation.dedup();
+
+ tracing::debug!("ProgressiveOverridePlugin: overridden labels: {:?}", &overridden_labels_for_operation);
+
+ let _ = request
+ .context
+ .insert(LABELS_TO_OVERRIDE_KEY, overridden_labels_for_operation);
+
+ } else {
+ tracing::error!("No parsed document found in the context. All override labels will be ignored.");
+ }
+
+ request
+ })
+ .service(service)
+ .boxed()
+ }
+ }
+}
+
+fn hash_operation(operation: &Option, operation_name: &Option) -> String {
+ let mut digest = Sha256::new();
+ if let Some(operation) = operation {
+ digest.update(operation.as_bytes());
+ }
+ if let Some(operation_name) = operation_name {
+ digest.update(operation_name.as_bytes());
+ }
+ hex::encode(digest.finalize().as_slice())
+}
+
+register_plugin!("apollo", "progressive_override", ProgressiveOverridePlugin);
+
+#[cfg(test)]
+mod tests;
diff --git a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap
new file mode 100644
index 0000000000..905007adba
--- /dev/null
+++ b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap
@@ -0,0 +1,33 @@
+---
+source: apollo-router/src/plugins/progressive_override/tests.rs
+assertion_line: 253
+expression: query_plan
+---
+{
+ "data": null,
+ "extensions": {
+ "apolloQueryPlan": {
+ "object": {
+ "kind": "QueryPlan",
+ "node": {
+ "kind": "Fetch",
+ "serviceName": "Subgraph2",
+ "variableUsages": [],
+ "operation": "{percent0{foo}}",
+ "operationName": null,
+ "operationKind": "query",
+ "id": null,
+ "inputRewrites": null,
+ "outputRewrites": null,
+ "schemaAwareHash": "098137301a64979dbc957ad4134e40a6c4bcf3be50a08e984661e576c78d4a1b",
+ "authorization": {
+ "is_authenticated": false,
+ "scopes": [],
+ "policies": []
+ }
+ }
+ },
+ "text": "QueryPlan {\n Fetch(service: \"Subgraph2\") {\n {\n percent0 {\n foo\n }\n }\n },\n}"
+ }
+ }
+}
diff --git a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap
new file mode 100644
index 0000000000..6db5314f4a
--- /dev/null
+++ b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap
@@ -0,0 +1,79 @@
+---
+source: apollo-router/src/plugins/progressive_override/tests.rs
+assertion_line: 262
+expression: query_plan
+---
+{
+ "data": {
+ "percent100": null
+ },
+ "extensions": {
+ "apolloQueryPlan": {
+ "object": {
+ "kind": "QueryPlan",
+ "node": {
+ "kind": "Sequence",
+ "nodes": [
+ {
+ "kind": "Fetch",
+ "serviceName": "Subgraph1",
+ "variableUsages": [],
+ "operation": "{percent100{__typename id}}",
+ "operationName": null,
+ "operationKind": "query",
+ "id": null,
+ "inputRewrites": null,
+ "outputRewrites": null,
+ "schemaAwareHash": "3560295fe31a2e33da9dba4d775a6fd64727cb953cf790575bc83c27a486f8d2",
+ "authorization": {
+ "is_authenticated": false,
+ "scopes": [],
+ "policies": []
+ }
+ },
+ {
+ "kind": "Flatten",
+ "path": [
+ "percent100"
+ ],
+ "node": {
+ "kind": "Fetch",
+ "serviceName": "Subgraph2",
+ "requires": [
+ {
+ "kind": "InlineFragment",
+ "typeCondition": "T",
+ "selections": [
+ {
+ "kind": "Field",
+ "name": "__typename"
+ },
+ {
+ "kind": "Field",
+ "name": "id"
+ }
+ ]
+ }
+ ],
+ "variableUsages": [],
+ "operation": "query($representations:[_Any!]!){_entities(representations:$representations){...on T{foo}}}",
+ "operationName": null,
+ "operationKind": "query",
+ "id": null,
+ "inputRewrites": null,
+ "outputRewrites": null,
+ "schemaAwareHash": "0eb7c697f1b6125db1db0b099e0707e3a2984e4efb4aaee1d3bde8430b34bc6f",
+ "authorization": {
+ "is_authenticated": false,
+ "scopes": [],
+ "policies": []
+ }
+ }
+ }
+ ]
+ }
+ },
+ "text": "QueryPlan {\n Sequence {\n Fetch(service: \"Subgraph1\") {\n {\n percent100 {\n __typename\n id\n }\n }\n },\n Flatten(path: \"percent100\") {\n Fetch(service: \"Subgraph2\") {\n {\n ... on T {\n __typename\n id\n }\n } =>\n {\n ... on T {\n foo\n }\n }\n },\n },\n },\n}"
+ }
+ }
+}
diff --git a/apollo-router/src/plugins/progressive_override/testdata/supergraph.graphql b/apollo-router/src/plugins/progressive_override/testdata/supergraph.graphql
new file mode 100644
index 0000000000..e5ffb347c0
--- /dev/null
+++ b/apollo-router/src/plugins/progressive_override/testdata/supergraph.graphql
@@ -0,0 +1,98 @@
+schema
+ @link(url: "https://specs.apollo.dev/link/v1.0")
+ @link(url: "https://specs.apollo.dev/join/v0.4", for: EXECUTION) {
+ query: Query
+}
+
+directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE
+
+directive @join__field(
+ graph: join__Graph
+ requires: join__FieldSet
+ provides: join__FieldSet
+ type: String
+ external: Boolean
+ override: String
+ usedOverridden: Boolean
+ overrideLabel: String
+) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION
+
+directive @join__graph(name: String!, url: String!) on ENUM_VALUE
+
+directive @join__implements(
+ graph: join__Graph!
+ interface: String!
+) repeatable on OBJECT | INTERFACE
+
+directive @join__type(
+ graph: join__Graph!
+ key: join__FieldSet
+ extension: Boolean! = false
+ resolvable: Boolean! = true
+ isInterfaceObject: Boolean! = false
+) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR
+
+directive @join__unionMember(
+ graph: join__Graph!
+ member: String!
+) repeatable on UNION
+
+directive @link(
+ url: String
+ as: String
+ for: link__Purpose
+ import: [link__Import]
+) repeatable on SCHEMA
+
+scalar join__FieldSet
+
+enum join__Graph {
+ SUBGRAPH1 @join__graph(name: "Subgraph1", url: "https://Subgraph1")
+ SUBGRAPH2 @join__graph(name: "Subgraph2", url: "https://Subgraph2")
+}
+
+scalar link__Import
+
+enum link__Purpose {
+ """
+ \`SECURITY\` features provide metadata necessary to securely resolve fields.
+ """
+ SECURITY
+
+ """
+ \`EXECUTION\` features provide metadata necessary for operation execution.
+ """
+ EXECUTION
+}
+
+type Query @join__type(graph: SUBGRAPH1) @join__type(graph: SUBGRAPH2) {
+ percent100: T
+ @join__field(
+ graph: SUBGRAPH1
+ override: "Subgraph2"
+ overrideLabel: "percent(100)"
+ )
+ @join__field(graph: SUBGRAPH2, overrideLabel: "percent(100)")
+ percent0: T
+ @join__field(
+ graph: SUBGRAPH1
+ override: "Subgraph2"
+ overrideLabel: "percent(0)"
+ )
+ @join__field(graph: SUBGRAPH2, overrideLabel: "percent(0)")
+}
+
+type T
+ @join__type(graph: SUBGRAPH1, key: "id")
+ @join__type(graph: SUBGRAPH2, key: "id") {
+ id: ID
+ foo: Int
+ @join__field(graph: SUBGRAPH1, override: "Subgraph2", overrideLabel: "foo")
+ @join__field(graph: SUBGRAPH2, overrideLabel: "foo")
+ bar: Int
+ @join__field(graph: SUBGRAPH1, override: "Subgraph2", overrideLabel: "bar")
+ @join__field(graph: SUBGRAPH2, overrideLabel: "bar")
+ baz: Int
+ @join__field(graph: SUBGRAPH1, override: "Subgraph2", overrideLabel: "baz")
+ @join__field(graph: SUBGRAPH2, overrideLabel: "baz")
+}
diff --git a/apollo-router/src/plugins/progressive_override/testdata/supergraph_no_usages.graphql b/apollo-router/src/plugins/progressive_override/testdata/supergraph_no_usages.graphql
new file mode 100644
index 0000000000..c0f8ed51a0
--- /dev/null
+++ b/apollo-router/src/plugins/progressive_override/testdata/supergraph_no_usages.graphql
@@ -0,0 +1,80 @@
+schema
+ @link(url: "https://specs.apollo.dev/link/v1.0")
+ @link(url: "https://specs.apollo.dev/join/v0.4", for: EXECUTION) {
+ query: Query
+}
+
+directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE
+
+directive @join__field(
+ graph: join__Graph
+ requires: join__FieldSet
+ provides: join__FieldSet
+ type: String
+ external: Boolean
+ override: String
+ usedOverridden: Boolean
+ overrideLabel: String
+) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION
+
+directive @join__graph(name: String!, url: String!) on ENUM_VALUE
+
+directive @join__implements(
+ graph: join__Graph!
+ interface: String!
+) repeatable on OBJECT | INTERFACE
+
+directive @join__type(
+ graph: join__Graph!
+ key: join__FieldSet
+ extension: Boolean! = false
+ resolvable: Boolean! = true
+ isInterfaceObject: Boolean! = false
+) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR
+
+directive @join__unionMember(
+ graph: join__Graph!
+ member: String!
+) repeatable on UNION
+
+directive @link(
+ url: String
+ as: String
+ for: link__Purpose
+ import: [link__Import]
+) repeatable on SCHEMA
+
+scalar join__FieldSet
+
+enum join__Graph {
+ SUBGRAPH1 @join__graph(name: "Subgraph1", url: "https://Subgraph1")
+ SUBGRAPH2 @join__graph(name: "Subgraph2", url: "https://Subgraph2")
+}
+
+scalar link__Import
+
+enum link__Purpose {
+ """
+ \`SECURITY\` features provide metadata necessary to securely resolve fields.
+ """
+ SECURITY
+
+ """
+ \`EXECUTION\` features provide metadata necessary for operation execution.
+ """
+ EXECUTION
+}
+
+type Query @join__type(graph: SUBGRAPH1) @join__type(graph: SUBGRAPH2) {
+ percent100: T @join__field(graph: SUBGRAPH1, override: "Subgraph2")
+ percent0: T @join__field(graph: SUBGRAPH1, override: "Subgraph2")
+}
+
+type T
+ @join__type(graph: SUBGRAPH1, key: "id")
+ @join__type(graph: SUBGRAPH2, key: "id") {
+ id: ID
+ foo: Int @join__field(graph: SUBGRAPH1, override: "Subgraph2")
+ bar: Int @join__field(graph: SUBGRAPH1, override: "Subgraph2")
+ baz: Int @join__field(graph: SUBGRAPH1, override: "Subgraph2")
+}
diff --git a/apollo-router/src/plugins/progressive_override/tests.rs b/apollo-router/src/plugins/progressive_override/tests.rs
new file mode 100644
index 0000000000..c8853a9c3f
--- /dev/null
+++ b/apollo-router/src/plugins/progressive_override/tests.rs
@@ -0,0 +1,339 @@
+use std::sync::Arc;
+
+use apollo_compiler::ast::Document;
+use tower::ServiceExt;
+
+use crate::metrics::FutureMetricsExt;
+use crate::plugin::test::MockRouterService;
+use crate::plugin::test::MockSupergraphService;
+use crate::plugin::Plugin;
+use crate::plugin::PluginInit;
+use crate::plugins::progressive_override::Config;
+use crate::plugins::progressive_override::ProgressiveOverridePlugin;
+use crate::plugins::progressive_override::LABELS_TO_OVERRIDE_KEY;
+use crate::plugins::progressive_override::UNRESOLVED_LABELS_KEY;
+use crate::services::layers::query_analysis::ParsedDocument;
+use crate::services::layers::query_analysis::ParsedDocumentInner;
+use crate::services::router;
+use crate::services::supergraph;
+use crate::services::RouterResponse;
+use crate::services::SupergraphResponse;
+use crate::Context;
+use crate::TestHarness;
+
+const SCHEMA: &str = include_str!("testdata/supergraph.graphql");
+const SCHEMA_NO_USAGES: &str = include_str!("testdata/supergraph_no_usages.graphql");
+
+#[tokio::test]
+async fn plugin_disables_itself_with_no_progressive_override_usages() {
+ let plugin = ProgressiveOverridePlugin::new(PluginInit::fake_new(
+ Config {},
+ Arc::new(SCHEMA_NO_USAGES.to_string()),
+ ))
+ .await
+ .unwrap();
+
+ assert!(!plugin.enabled);
+}
+
+#[tokio::test]
+async fn plugin_enables_itself_with_progressive_override_usages() {
+ let plugin = ProgressiveOverridePlugin::new(PluginInit::fake_new(
+ Config {},
+ Arc::new(SCHEMA.to_string()),
+ ))
+ .await
+ .unwrap();
+
+ assert!(plugin.enabled);
+}
+
+#[tokio::test]
+async fn plugin_router_service_adds_all_arbitrary_labels_to_context() {
+ // This test ensures that the _router_service_ adds all of the arbitrary
+ // labels to the context so coprocessors can resolve them. At this stage,
+ // there's no concern about any percentage-based labels yet.
+ let mut mock_service = MockRouterService::new();
+ mock_service.expect_call().returning(move |request| {
+ let labels_on_context = request
+ .context
+ .get::<_, Vec>>(UNRESOLVED_LABELS_KEY)
+ .unwrap()
+ .unwrap();
+
+ // this plugin handles the percent-based labels, so we don't want to add
+ // those to the context for other coprocessors to resolve
+ assert!(!labels_on_context.contains(&Arc::new("percent(0)".to_string())));
+ assert!(!labels_on_context.contains(&Arc::new("percent(100)".to_string())));
+ assert!(labels_on_context.len() == 3);
+ assert!(vec!["bar", "baz", "foo"]
+ .into_iter()
+ .all(|s| labels_on_context.contains(&Arc::new(s.to_string()))));
+ RouterResponse::fake_builder().build()
+ });
+
+ let service_stack = ProgressiveOverridePlugin::new(PluginInit::fake_new(
+ Config {},
+ Arc::new(SCHEMA.to_string()),
+ ))
+ .await
+ .unwrap()
+ .router_service(mock_service.boxed());
+
+ let _ = service_stack
+ .oneshot(router::Request::fake_builder().build().unwrap())
+ .await;
+}
+
+struct LabelAssertions {
+ query: &'static str,
+ expected_labels: Vec<&'static str>,
+ absent_labels: Vec<&'static str>,
+ labels_from_coprocessors: Vec<&'static str>,
+}
+
+// We're testing a few things with this function. For a given query, we want to
+// assert:
+// 1. The expected labels are present in the context
+// 2. The absent labels are not present in the context
+//
+// Additionally, we can simulate the inclusion of any other labels that may have
+// been provided by "coprocessors".
+async fn assert_expected_and_absent_labels_for_supergraph_service(
+ label_assertions: LabelAssertions,
+) {
+ let LabelAssertions {
+ query,
+ expected_labels,
+ absent_labels,
+ labels_from_coprocessors,
+ } = label_assertions;
+
+ let mut mock_service = MockSupergraphService::new();
+
+ mock_service.expect_call().returning(move |request| {
+ let labels_to_override = request
+ .context
+ .get::<_, Vec>(LABELS_TO_OVERRIDE_KEY)
+ .unwrap()
+ .unwrap();
+
+ for label in &expected_labels {
+ assert!(labels_to_override.contains(&label.to_string()));
+ }
+ for label in &absent_labels {
+ assert!(!labels_to_override.contains(&label.to_string()));
+ }
+ SupergraphResponse::fake_builder().build()
+ });
+
+ let service_stack = ProgressiveOverridePlugin::new(PluginInit::fake_new(
+ Config {},
+ Arc::new(SCHEMA.to_string()),
+ ))
+ .await
+ .unwrap()
+ .supergraph_service(mock_service.boxed());
+
+ // plugin depends on the parsed document being in the context so we'll add
+ // it ourselves for testing purposes
+ let parsed_doc: ParsedDocument = Arc::from(ParsedDocumentInner {
+ ast: Document::parse(query, "query.graphql").unwrap(),
+ ..Default::default()
+ });
+
+ let context = Context::new();
+ context
+ .extensions()
+ .lock()
+ .insert::(parsed_doc);
+
+ context
+ .insert(
+ LABELS_TO_OVERRIDE_KEY,
+ labels_from_coprocessors
+ .iter()
+ .map(|s| s.to_string())
+ .collect::>(),
+ )
+ .unwrap();
+
+ let request = supergraph::Request::fake_builder()
+ .context(context)
+ .query(query)
+ .build()
+ .unwrap();
+
+ let _ = service_stack.oneshot(request).await;
+}
+
+#[tokio::test]
+async fn plugin_supergraph_service_adds_percent_labels_to_context() {
+ let label_assertions = LabelAssertions {
+ query: "{ percent100 { foo } }",
+ expected_labels: vec!["percent(100)"],
+ absent_labels: vec!["percent(0)"],
+ labels_from_coprocessors: vec![],
+ };
+ assert_expected_and_absent_labels_for_supergraph_service(label_assertions).await;
+}
+
+#[tokio::test]
+async fn plugin_supergraph_service_trims_extraneous_labels() {
+ let label_assertions = LabelAssertions {
+ query: "{ percent100 { foo } }",
+ // the foo label is relevant to the `foo` field (and resolved by the
+ // "coprocessor"), so we expect it to be preserved
+ expected_labels: vec!["percent(100)", "foo"],
+ // `baz` exists in the schema but is not relevant to this query, so we expect it to be trimmed
+ // `bogus` is not in the schema at all, so we expect it to be trimmed
+ absent_labels: vec!["percent(0)", "bogus", "baz"],
+ labels_from_coprocessors: vec!["foo", "baz", "bogus"],
+ };
+ assert_expected_and_absent_labels_for_supergraph_service(label_assertions).await;
+}
+
+#[tokio::test]
+async fn plugin_supergraph_service_trims_0pc_label() {
+ let label_assertions = LabelAssertions {
+ query: "{ percent0 { foo } }",
+ expected_labels: vec!["foo"],
+ // the router will always resolve percent(0) to false
+ absent_labels: vec!["percent(0)"],
+ labels_from_coprocessors: vec!["foo"],
+ };
+ assert_expected_and_absent_labels_for_supergraph_service(label_assertions).await;
+}
+
+async fn get_json_query_plan(query: &str) -> serde_json::Value {
+ let parsed_doc: ParsedDocument = Arc::from(ParsedDocumentInner {
+ ast: Document::parse(query, "query.graphql").unwrap(),
+ ..Default::default()
+ });
+
+ let context: Context = Context::new();
+ context
+ .extensions()
+ .lock()
+ .insert::(parsed_doc);
+
+ let request = supergraph::Request::fake_builder()
+ .query(query)
+ .context(context)
+ .header("Apollo-Expose-Query-Plan", "true")
+ .build()
+ .unwrap();
+
+ let supergraph_service = TestHarness::builder()
+ .configuration_json(serde_json::json! {{
+ "plugins": {
+ "experimental.expose_query_plan": true
+ }
+ }})
+ .unwrap()
+ .schema(SCHEMA)
+ .build_supergraph()
+ .await
+ .unwrap();
+
+ let response = supergraph_service
+ .oneshot(request)
+ .await
+ .unwrap()
+ .next_response()
+ .await
+ .unwrap();
+
+ serde_json::to_value(response).unwrap()
+}
+
+#[tokio::test]
+async fn non_overridden_field_yields_expected_query_plan() {
+ // `percent0` and `foo` should both be resolved in `Subgraph2`
+ let query_plan = get_json_query_plan("{ percent0 { foo } }").await;
+ insta::assert_json_snapshot!(query_plan);
+}
+
+#[tokio::test]
+async fn overridden_field_yields_expected_query_plan() {
+ // `percent100` should be overridden to `Subgraph1` while `foo` is not, so
+ // we expect a query plan with 2 fetches: the first to `Subgraph1` and a
+ // serial fetch after to resolve `foo` in `Subgraph2`
+ let query_plan = get_json_query_plan("{ percent100 { foo } }").await;
+ insta::assert_json_snapshot!(query_plan);
+}
+
+async fn query_with_labels(query: &str, labels_from_coprocessors: Vec<&str>) {
+ let mut mock_service = MockSupergraphService::new();
+ mock_service
+ .expect_call()
+ .returning(|_| SupergraphResponse::fake_builder().build());
+
+ let service_stack = ProgressiveOverridePlugin::new(PluginInit::fake_new(
+ Config {},
+ Arc::new(SCHEMA.to_string()),
+ ))
+ .await
+ .unwrap()
+ .supergraph_service(mock_service.boxed());
+
+ // plugin depends on the parsed document being in the context so we'll add
+ // it ourselves for testing purposes
+ let parsed_doc: ParsedDocument = Arc::from(ParsedDocumentInner {
+ ast: Document::parse(query, "query.graphql").unwrap(),
+ ..Default::default()
+ });
+
+ let context = Context::new();
+ context
+ .extensions()
+ .lock()
+ .insert::(parsed_doc);
+
+ context
+ .insert(
+ LABELS_TO_OVERRIDE_KEY,
+ labels_from_coprocessors
+ .iter()
+ .map(|s| s.to_string())
+ .collect::>(),
+ )
+ .unwrap();
+
+ let request = supergraph::Request::fake_builder()
+ .context(context)
+ .query(query)
+ .build()
+ .unwrap();
+
+ let _ = service_stack.oneshot(request).await;
+}
+
+#[tokio::test]
+async fn query_with_overridden_labels_metrics() {
+ async {
+ query_with_labels("{ percent100 { foo } }", vec![]).await;
+ assert_counter!(
+ "apollo.router.operations.override.query",
+ 1,
+ query.label_count = 2
+ );
+ }
+ .with_metrics()
+ .await;
+}
+
+#[tokio::test]
+async fn query_with_externally_resolved_labels_metrics() {
+ async {
+ query_with_labels("{ percent100 { foo } }", vec!["foo"]).await;
+ assert_counter!(
+ "apollo.router.operations.override.query",
+ 1,
+ query.label_count = 2
+ );
+ assert_counter!("apollo.router.operations.override.external", 1);
+ }
+ .with_metrics()
+ .await;
+}
diff --git a/apollo-router/src/plugins/progressive_override/visitor.rs b/apollo-router/src/plugins/progressive_override/visitor.rs
new file mode 100644
index 0000000000..540db9e469
--- /dev/null
+++ b/apollo-router/src/plugins/progressive_override/visitor.rs
@@ -0,0 +1,185 @@
+//! Progressive override operation/schema traversal
+use std::collections::HashSet;
+use std::sync::Arc;
+
+use apollo_compiler::ast;
+use apollo_compiler::schema;
+use tower::BoxError;
+
+use super::JOIN_FIELD_DIRECTIVE_NAME;
+use super::JOIN_SPEC_BASE_URL;
+use super::JOIN_SPEC_VERSION_RANGE;
+use super::OVERRIDE_LABEL_ARG_NAME;
+use crate::spec::query::traverse;
+use crate::spec::Schema;
+
+impl<'a> OverrideLabelVisitor<'a> {
+ pub(crate) fn new(schema: &'a schema::Schema) -> Option {
+ Some(Self {
+ schema,
+ override_labels: HashSet::new(),
+ join_field_directive_name: Schema::directive_name(
+ schema,
+ JOIN_SPEC_BASE_URL,
+ JOIN_SPEC_VERSION_RANGE,
+ JOIN_FIELD_DIRECTIVE_NAME,
+ )?,
+ })
+ }
+}
+
+impl<'a> traverse::Visitor for OverrideLabelVisitor<'a> {
+ fn schema(&self) -> &apollo_compiler::Schema {
+ self.schema
+ }
+
+ fn operation(
+ &mut self,
+ root_type: &str,
+ node: &ast::OperationDefinition,
+ ) -> Result<(), BoxError> {
+ traverse::operation(self, root_type, node)
+ }
+
+ fn field(
+ &mut self,
+ _parent_type: &str,
+ field_def: &ast::FieldDefinition,
+ node: &ast::Field,
+ ) -> Result<(), BoxError> {
+ let new_override_labels = field_def
+ .directives
+ .iter()
+ .filter_map(|d| {
+ if d.name.as_str() == self.join_field_directive_name {
+ Some(d.arguments.iter().filter_map(|arg| {
+ if arg.name == OVERRIDE_LABEL_ARG_NAME {
+ arg.value.as_str().map(|s| Arc::new(s.to_string()))
+ } else {
+ None
+ }
+ }))
+ } else {
+ None
+ }
+ })
+ .flatten();
+ self.override_labels.extend(new_override_labels);
+
+ traverse::field(self, field_def, node)
+ }
+}
+
+pub(crate) struct OverrideLabelVisitor<'a> {
+ schema: &'a schema::Schema,
+ pub(crate) override_labels: HashSet>,
+ join_field_directive_name: String,
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+
+ use apollo_compiler::ast::Document;
+ use apollo_compiler::Schema;
+
+ use crate::plugins::progressive_override::visitor::OverrideLabelVisitor;
+ use crate::spec::query::traverse;
+
+ const SCHEMA: &str = r#"
+ schema
+ @link(url: "https://specs.apollo.dev/link/v1.0")
+ @link(url: "https://specs.apollo.dev/join/v0.4", for: EXECUTION)
+ {
+ query: Query
+ }
+
+ directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE
+
+ directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION
+
+ directive @join__graph(name: String!, url: String!) on ENUM_VALUE
+
+ directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE
+
+ directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR
+
+ directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION
+
+ directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA
+
+ scalar join__FieldSet
+
+ enum join__Graph {
+ SUBGRAPH1 @join__graph(name: "Subgraph1", url: "https://Subgraph1")
+ SUBGRAPH2 @join__graph(name: "Subgraph2", url: "https://Subgraph2")
+ }
+
+ scalar link__Import
+
+ enum link__Purpose {
+ """
+ \`SECURITY\` features provide metadata necessary to securely resolve fields.
+ """
+ SECURITY
+
+ """
+ \`EXECUTION\` features provide metadata necessary for operation execution.
+ """
+ EXECUTION
+ }
+
+ type Query
+ @join__type(graph: SUBGRAPH1)
+ @join__type(graph: SUBGRAPH2)
+ {
+ t: T @join__field(graph: SUBGRAPH1)
+ t2: T @join__field(graph: SUBGRAPH1, override: "Subgraph2", overrideLabel: "foo2") @join__field(graph: SUBGRAPH2, overrideLabel: "foo2")
+ }
+
+ type T
+ @join__type(graph: SUBGRAPH1, key: "k")
+ @join__type(graph: SUBGRAPH2, key: "k")
+ {
+ k: ID
+ a: Int @join__field(graph: SUBGRAPH1, override: "Subgraph2", overrideLabel: "foo") @join__field(graph: SUBGRAPH2, overrideLabel: "foo")
+ b: Int @join__field(graph: SUBGRAPH2)
+ }
+ "#;
+
+ #[test]
+ fn collects() {
+ let schema = Schema::parse(SCHEMA, "supergraph.graphql").expect("parse schema");
+ let operation_string = "{ t { k a b } }";
+ let operation =
+ Document::parse(operation_string, "query.graphql").expect("parse operation");
+
+ let mut visitor = OverrideLabelVisitor::new(&schema).expect("create visitor");
+
+ traverse::document(&mut visitor, &operation).unwrap();
+
+ assert_eq!(
+ visitor.override_labels,
+ vec![Arc::new("foo".to_string())].into_iter().collect()
+ );
+ }
+
+ #[test]
+ fn collects2() {
+ let schema = Schema::parse(SCHEMA, "supergraph.graphql").expect("parse schema");
+ let operation_string = "{ t { k a b } t2 }";
+ let operation =
+ Document::parse(operation_string, "query.graphql").expect("parse operation");
+
+ let mut visitor = OverrideLabelVisitor::new(&schema).expect("create visitor");
+
+ traverse::document(&mut visitor, &operation).unwrap();
+
+ assert_eq!(
+ visitor.override_labels,
+ vec![Arc::new("foo".to_string()), Arc::new("foo2".to_string())]
+ .into_iter()
+ .collect()
+ );
+ }
+}
diff --git a/apollo-router/src/plugins/record_replay/record.rs b/apollo-router/src/plugins/record_replay/record.rs
index 21dffb8b08..fbd9150bda 100644
--- a/apollo-router/src/plugins/record_replay/record.rs
+++ b/apollo-router/src/plugins/record_replay/record.rs
@@ -103,11 +103,7 @@ impl Plugin for Record {
let context = res.context.clone();
let after_complete = once(async move {
- let recording = context
- .private_entries
- .lock()
- .get_mut::()
- .cloned();
+ let recording = context.extensions().lock().get_mut::().cloned();
if let Some(mut recording) = recording {
let res_headers = externalize_header_map(&headers)?;
@@ -172,7 +168,7 @@ impl Plugin for Record {
let recording_enabled =
if req.supergraph_request.headers().contains_key(RECORD_HEADER) {
- req.context.private_entries.lock().insert(Recording {
+ req.context.extensions().lock().insert(Recording {
supergraph_sdl: supergraph_sdl.clone().to_string(),
client_request: Default::default(),
client_response: Default::default(),
@@ -193,8 +189,7 @@ impl Plugin for Record {
let method = req.supergraph_request.method().to_string();
let uri = req.supergraph_request.uri().to_string();
- if let Some(recording) =
- req.context.private_entries.lock().get_mut::()
+ if let Some(recording) = req.context.extensions().lock().get_mut::()
{
recording.client_request = RequestDetails {
query,
@@ -211,7 +206,7 @@ impl Plugin for Record {
.map_response(|res: supergraph::Response| {
let context = res.context.clone();
res.map_stream(move |chunk| {
- if let Some(recording) = context.private_entries.lock().get_mut::() {
+ if let Some(recording) = context.extensions().lock().get_mut::() {
recording.client_response.chunks.push(chunk.clone());
}
@@ -225,7 +220,7 @@ impl Plugin for Record {
fn execution_service(&self, service: execution::BoxService) -> execution::BoxService {
ServiceBuilder::new()
.map_request(|req: execution::Request| {
- if let Some(recording) = req.context.private_entries.lock().get_mut::() {
+ if let Some(recording) = req.context.extensions().lock().get_mut::() {
recording.formatted_query_plan = req.query_plan.formatted_query_plan.clone();
}
req
@@ -281,7 +276,7 @@ impl Plugin for Record {
};
if let Some(recording) =
- res.context.private_entries.lock().get_mut::()
+ res.context.extensions().lock().get_mut::()
{
if recording.subgraph_fetches.is_none() {
recording.subgraph_fetches = Some(Default::default());
diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs
index 92741aae62..0dc8f8d228 100644
--- a/apollo-router/src/plugins/telemetry/config.rs
+++ b/apollo-router/src/plugins/telemetry/config.rs
@@ -110,28 +110,6 @@ pub(crate) struct MetricsCommon {
pub(crate) resource: BTreeMap,
/// Custom buckets for histograms
pub(crate) buckets: Vec,
- /// Experimental metrics to know more about caching strategies
- pub(crate) experimental_cache_metrics: ExperimentalCacheMetricsConf,
-}
-
-#[derive(Clone, Debug, Deserialize, JsonSchema)]
-#[serde(deny_unknown_fields, default)]
-pub(crate) struct ExperimentalCacheMetricsConf {
- /// Enable experimental metrics
- pub(crate) enabled: bool,
- #[serde(with = "humantime_serde")]
- #[schemars(with = "String")]
- /// Potential TTL for a cache if we had one (default: 5secs)
- pub(crate) ttl: Duration,
-}
-
-impl Default for ExperimentalCacheMetricsConf {
- fn default() -> Self {
- Self {
- enabled: false,
- ttl: Duration::from_secs(5),
- }
- }
}
impl Default for MetricsCommon {
@@ -144,7 +122,6 @@ impl Default for MetricsCommon {
buckets: vec![
0.001, 0.005, 0.015, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 1.0, 5.0, 10.0,
],
- experimental_cache_metrics: ExperimentalCacheMetricsConf::default(),
}
}
}
@@ -181,6 +158,22 @@ pub(crate) struct ExposeTraceId {
#[schemars(with = "Option")]
#[serde(deserialize_with = "deserialize_option_header_name")]
pub(crate) header_name: Option,
+ /// Format of the trace ID in response headers
+ pub(crate) format: TraceIdFormat,
+}
+
+#[derive(Clone, Default, Debug, Deserialize, JsonSchema)]
+#[serde(deny_unknown_fields, rename_all = "lowercase")]
+pub(crate) enum TraceIdFormat {
+ /// Format the Trace ID as a hexadecimal number
+ ///
+ /// (e.g. Trace ID 16 -> 00000000000000000000000000000010)
+ #[default]
+ Hexadecimal,
+ /// Format the Trace ID as a decimal number
+ ///
+ /// (e.g. Trace ID 16 -> 16)
+ Decimal,
}
/// Configure propagation of traces. In general you won't have to do this as these are automatically configured
diff --git a/apollo-router/src/plugins/telemetry/config_new/attributes.rs b/apollo-router/src/plugins/telemetry/config_new/attributes.rs
index e9e4717b50..be3dc29216 100644
--- a/apollo-router/src/plugins/telemetry/config_new/attributes.rs
+++ b/apollo-router/src/plugins/telemetry/config_new/attributes.rs
@@ -9,6 +9,7 @@ use http::StatusCode;
use http::Uri;
use opentelemetry::Key;
use opentelemetry::KeyValue;
+use opentelemetry_api::baggage::BaggageExt;
use opentelemetry_semantic_conventions::trace::CLIENT_ADDRESS;
use opentelemetry_semantic_conventions::trace::CLIENT_PORT;
use opentelemetry_semantic_conventions::trace::GRAPHQL_DOCUMENT;
@@ -34,6 +35,8 @@ use serde::Deserialize;
#[cfg(test)]
use serde::Serialize;
use tower::BoxError;
+use tracing::Span;
+use tracing_opentelemetry::OpenTelemetrySpanExt;
use crate::axum_factory::utils::ConnectionInfo;
use crate::context::OPERATION_KIND;
@@ -87,6 +90,9 @@ pub(crate) struct RouterAttributes {
#[serde(rename = "trace_id")]
trace_id: Option,
+ /// All key values from trace baggage.
+ baggage: Option,
+
/// Http attributes from Open Telemetry semantic conventions.
#[serde(flatten)]
common: HttpCommonAttributes,
@@ -545,6 +551,13 @@ impl Selectors for RouterAttributes {
));
}
}
+ if let Some(true) = &self.baggage {
+ let context = Span::current().context();
+ let baggage = context.baggage();
+ for (key, (value, _)) in baggage {
+ attrs.push_back(KeyValue::new(key.clone(), value.clone()));
+ }
+ }
attrs
}
@@ -970,6 +983,8 @@ mod test {
use opentelemetry::trace::TraceId;
use opentelemetry::trace::TraceState;
use opentelemetry::Context;
+ use opentelemetry_api::baggage::BaggageExt;
+ use opentelemetry_api::KeyValue;
use opentelemetry_semantic_conventions::trace::CLIENT_ADDRESS;
use opentelemetry_semantic_conventions::trace::CLIENT_PORT;
use opentelemetry_semantic_conventions::trace::GRAPHQL_DOCUMENT;
@@ -1030,6 +1045,10 @@ mod test {
);
let _context = Context::current()
.with_remote_span_context(span_context)
+ .with_baggage(vec![
+ KeyValue::new("baggage_key", "baggage_value"),
+ KeyValue::new("baggage_key_bis", "baggage_value_bis"),
+ ])
.attach();
let span = span!(tracing::Level::INFO, "test");
let _guard = span.enter();
@@ -1037,11 +1056,13 @@ mod test {
let attributes = RouterAttributes {
datadog_trace_id: Some(true),
trace_id: Some(true),
+ baggage: Some(true),
common: Default::default(),
server: Default::default(),
};
let attributes =
attributes.on_request(&router::Request::fake_builder().build().unwrap());
+
assert_eq!(
attributes
.iter()
@@ -1058,6 +1079,23 @@ mod test {
.map(|key_val| &key_val.value),
Some(&"42".into())
);
+ assert_eq!(
+ attributes
+ .iter()
+ .find(
+ |key_val| key_val.key == opentelemetry::Key::from_static_str("baggage_key")
+ )
+ .map(|key_val| &key_val.value),
+ Some(&"baggage_value".into())
+ );
+ assert_eq!(
+ attributes
+ .iter()
+ .find(|key_val| key_val.key
+ == opentelemetry::Key::from_static_str("baggage_key_bis"))
+ .map(|key_val| &key_val.value),
+ Some(&"baggage_value_bis".into())
+ );
});
}
diff --git a/apollo-router/src/plugins/telemetry/config_new/logging.rs b/apollo-router/src/plugins/telemetry/config_new/logging.rs
index 52fd02f364..71de7c8fcc 100644
--- a/apollo-router/src/plugins/telemetry/config_new/logging.rs
+++ b/apollo-router/src/plugins/telemetry/config_new/logging.rs
@@ -1,5 +1,6 @@
use std::collections::BTreeMap;
use std::io::IsTerminal;
+use std::time::Duration;
use schemars::gen::SchemaGenerator;
use schemars::schema::InstanceType;
@@ -104,12 +105,39 @@ pub(crate) struct StdOut {
pub(crate) enabled: bool,
/// The format to log to stdout.
pub(crate) format: Format,
+ /// Log rate limiting. The limit is set per type of log message
+ pub(crate) rate_limit: RateLimit,
}
+
impl Default for StdOut {
fn default() -> Self {
StdOut {
enabled: true,
format: Format::default(),
+ rate_limit: RateLimit::default(),
+ }
+ }
+}
+
+#[derive(Deserialize, JsonSchema, Clone, Debug)]
+#[serde(deny_unknown_fields, default)]
+pub(crate) struct RateLimit {
+ /// Set to true to limit the rate of log messages
+ pub(crate) enabled: bool,
+ /// Number of log lines allowed in interval per message
+ pub(crate) capacity: u32,
+ /// Interval for rate limiting
+ #[serde(deserialize_with = "humantime_serde::deserialize")]
+ #[schemars(with = "String")]
+ pub(crate) interval: Duration,
+}
+
+impl Default for RateLimit {
+ fn default() -> Self {
+ RateLimit {
+ enabled: false,
+ capacity: 1,
+ interval: Duration::from_secs(1),
}
}
}
@@ -127,6 +155,8 @@ pub(crate) struct File {
pub(crate) format: Format,
/// The period to rollover the log file.
pub(crate) rollover: Rollover,
+ /// Log rate limiting. The limit is set per type of log message
+ pub(crate) rate_limit: Option,
}
/// The format for logging.
diff --git a/apollo-router/src/plugins/telemetry/config_new/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/selectors.rs
index 44e272c62e..3791cf74b5 100644
--- a/apollo-router/src/plugins/telemetry/config_new/selectors.rs
+++ b/apollo-router/src/plugins/telemetry/config_new/selectors.rs
@@ -132,6 +132,7 @@ pub(crate) enum RouterSelector {
/// Optional default value.
default: Option,
},
+ Static(String),
}
#[derive(Deserialize, JsonSchema, Clone, Debug)]
@@ -236,6 +237,7 @@ pub(crate) enum SupergraphSelector {
/// Optional default value.
default: Option,
},
+ Static(String),
}
#[derive(Deserialize, JsonSchema, Clone, Debug)]
@@ -403,6 +405,7 @@ pub(crate) enum SubgraphSelector {
/// Optional default value.
default: Option,
},
+ Static(String),
}
impl Selector for RouterSelector {
@@ -437,6 +440,7 @@ impl Selector for RouterSelector {
RouterSelector::Baggage {
baggage, default, ..
} => get_baggage(baggage).or_else(|| default.maybe_to_otel_value()),
+ RouterSelector::Static(val) => Some(val.clone().into()),
// Related to Response
_ => None,
}
@@ -558,10 +562,12 @@ impl Selector for SupergraphSelector {
SupergraphSelector::Baggage {
baggage, default, ..
} => get_baggage(baggage).or_else(|| default.maybe_to_otel_value()),
+
SupergraphSelector::Env { env, default, .. } => std::env::var(env)
.ok()
.or_else(|| default.clone())
.map(opentelemetry::Value::from),
+ SupergraphSelector::Static(val) => Some(val.clone().into()),
// For response
_ => None,
}
@@ -727,10 +733,12 @@ impl Selector for SubgraphSelector {
default,
..
} => get_baggage(baggage_name).or_else(|| default.maybe_to_otel_value()),
+
SubgraphSelector::Env { env, default, .. } => std::env::var(env)
.ok()
.or_else(|| default.clone())
.map(opentelemetry::Value::from),
+ SubgraphSelector::Static(val) => Some(val.clone().into()),
// For response
_ => None,
@@ -824,6 +832,21 @@ mod test {
use crate::plugins::telemetry::config_new::selectors::TraceIdFormat;
use crate::plugins::telemetry::config_new::Selector;
+ #[test]
+ fn router_static() {
+ let selector = RouterSelector::Static("test_static".to_string());
+ assert_eq!(
+ selector
+ .on_request(
+ &crate::services::RouterRequest::fake_builder()
+ .build()
+ .unwrap()
+ )
+ .unwrap(),
+ "test_static".into()
+ );
+ }
+
#[test]
fn router_request_header() {
let selector = RouterSelector::RequestHeader {
@@ -947,6 +970,22 @@ mod test {
None
);
}
+
+ #[test]
+ fn supergraph_static() {
+ let selector = SupergraphSelector::Static("test_static".to_string());
+ assert_eq!(
+ selector
+ .on_request(
+ &crate::services::SupergraphRequest::fake_builder()
+ .build()
+ .unwrap()
+ )
+ .unwrap(),
+ "test_static".into()
+ );
+ }
+
#[test]
fn supergraph_response_header() {
let selector = SupergraphSelector::ResponseHeader {
@@ -988,6 +1027,25 @@ mod test {
);
}
+ #[test]
+ fn subgraph_static() {
+ let selector = SubgraphSelector::Static("test_static".to_string());
+ assert_eq!(
+ selector
+ .on_request(
+ &crate::services::SubgraphRequest::fake_builder()
+ .supergraph_request(Arc::new(
+ http::Request::builder()
+ .body(crate::request::Request::builder().build())
+ .unwrap()
+ ))
+ .build()
+ )
+ .unwrap(),
+ "test_static".into()
+ );
+ }
+
#[test]
fn subgraph_supergraph_request_header() {
let selector = SubgraphSelector::SupergraphRequestHeader {
diff --git a/apollo-router/src/plugins/telemetry/dynamic_attribute.rs b/apollo-router/src/plugins/telemetry/dynamic_attribute.rs
index e6a1c91c4c..cab03565a6 100644
--- a/apollo-router/src/plugins/telemetry/dynamic_attribute.rs
+++ b/apollo-router/src/plugins/telemetry/dynamic_attribute.rs
@@ -21,7 +21,7 @@ impl LogAttributes {
&self.attributes
}
- fn insert(&mut self, kv: KeyValue) {
+ pub(crate) fn insert(&mut self, kv: KeyValue) {
self.attributes.push_back(kv);
}
diff --git a/apollo-router/src/plugins/telemetry/fmt_layer.rs b/apollo-router/src/plugins/telemetry/fmt_layer.rs
index 7f3a4eed93..e5c6035c8b 100644
--- a/apollo-router/src/plugins/telemetry/fmt_layer.rs
+++ b/apollo-router/src/plugins/telemetry/fmt_layer.rs
@@ -33,14 +33,18 @@ pub(crate) fn create_fmt_layer(
config: &config::Conf,
) -> Box + Send + Sync> {
match &config.exporters.logging.stdout {
- StdOut { enabled, format } if *enabled => match format {
+ StdOut {
+ enabled,
+ format,
+ rate_limit,
+ } if *enabled => match format {
Format::Json(format_config) => {
let format = Json::new(
config.exporters.logging.common.to_resource(),
format_config.clone(),
);
FmtLayer::new(
- FilteringFormatter::new(format, filter_metric_events),
+ FilteringFormatter::new(format, filter_metric_events, rate_limit),
std::io::stdout,
)
.boxed()
@@ -52,7 +56,7 @@ pub(crate) fn create_fmt_layer(
format_config.clone(),
);
FmtLayer::new(
- FilteringFormatter::new(format, filter_metric_events),
+ FilteringFormatter::new(format, filter_metric_events, rate_limit),
std::io::stdout,
)
.boxed()
@@ -257,6 +261,7 @@ mod tests {
use super::*;
use crate::plugins::telemetry::config_new::logging::JsonFormat;
+ use crate::plugins::telemetry::config_new::logging::RateLimit;
use crate::plugins::telemetry::config_new::logging::TextFormat;
use crate::plugins::telemetry::dynamic_attribute::DynAttribute;
@@ -333,7 +338,7 @@ mod tests {
let buff = LogBuffer::default();
let format = Text::default();
let fmt_layer = FmtLayer::new(
- FilteringFormatter::new(format, filter_metric_events),
+ FilteringFormatter::new(format, filter_metric_events, &RateLimit::default()),
buff.clone(),
)
.boxed();
@@ -350,7 +355,7 @@ mod tests {
let buff = LogBuffer::default();
let format = Text::default();
let fmt_layer = FmtLayer::new(
- FilteringFormatter::new(format, filter_metric_events),
+ FilteringFormatter::new(format, filter_metric_events, &RateLimit::default()),
buff.clone(),
)
.boxed();
@@ -368,7 +373,7 @@ mod tests {
let buff = LogBuffer::default();
let format = Json::default();
let fmt_layer = FmtLayer::new(
- FilteringFormatter::new(format, filter_metric_events),
+ FilteringFormatter::new(format, filter_metric_events, &RateLimit::default()),
buff.clone(),
)
.boxed();
@@ -385,7 +390,7 @@ mod tests {
let buff = LogBuffer::default();
let format = Json::default();
let fmt_layer = FmtLayer::new(
- FilteringFormatter::new(format, filter_metric_events),
+ FilteringFormatter::new(format, filter_metric_events, &RateLimit::default()),
buff.clone(),
)
.boxed();
@@ -409,7 +414,7 @@ mod tests {
};
let format = Json::new(Default::default(), json_format);
let fmt_layer = FmtLayer::new(
- FilteringFormatter::new(format, filter_metric_events),
+ FilteringFormatter::new(format, filter_metric_events, &RateLimit::default()),
buff.clone(),
)
.boxed();
@@ -434,7 +439,7 @@ mod tests {
};
let format = Text::new(Default::default(), text_format);
let fmt_layer = FmtLayer::new(
- FilteringFormatter::new(format, filter_metric_events),
+ FilteringFormatter::new(format, filter_metric_events, &RateLimit::default()),
buff.clone(),
)
.boxed();
diff --git a/apollo-router/src/plugins/telemetry/formatters/mod.rs b/apollo-router/src/plugins/telemetry/formatters/mod.rs
index d7f002f763..eaff3737b2 100644
--- a/apollo-router/src/plugins/telemetry/formatters/mod.rs
+++ b/apollo-router/src/plugins/telemetry/formatters/mod.rs
@@ -2,18 +2,25 @@
pub(crate) mod json;
pub(crate) mod text;
+use std::collections::HashMap;
use std::collections::LinkedList;
use std::fmt;
+use std::time::Instant;
use opentelemetry::sdk::Resource;
+use opentelemetry_api::KeyValue;
+use parking_lot::Mutex;
use serde_json::Number;
use tracing::Subscriber;
+use tracing_core::callsite::Identifier;
use tracing_subscriber::fmt::format::Writer;
use tracing_subscriber::fmt::FormatEvent;
use tracing_subscriber::fmt::FormatFields;
use tracing_subscriber::layer::Context;
use tracing_subscriber::registry::LookupSpan;
+use super::config_new::logging::RateLimit;
+use super::dynamic_attribute::LogAttributes;
use crate::metrics::layer::METRIC_PREFIX_COUNTER;
use crate::metrics::layer::METRIC_PREFIX_HISTOGRAM;
use crate::metrics::layer::METRIC_PREFIX_MONOTONIC_COUNTER;
@@ -44,14 +51,21 @@ pub(crate) const EXCLUDED_ATTRIBUTES: [&str; 5] = [
pub(crate) struct FilteringFormatter {
inner: T,
filter_fn: F,
+ rate_limiter: Mutex>,
+ config: RateLimit,
}
impl FilteringFormatter
where
F: Fn(&tracing::Event<'_>) -> bool,
{
- pub(crate) fn new(inner: T, filter_fn: F) -> Self {
- Self { inner, filter_fn }
+ pub(crate) fn new(inner: T, filter_fn: F, rate_limit: &RateLimit) -> Self {
+ Self {
+ inner,
+ filter_fn,
+ rate_limiter: Mutex::new(HashMap::new()),
+ config: rate_limit.clone(),
+ }
}
}
@@ -69,6 +83,32 @@ where
event: &tracing::Event<'_>,
) -> fmt::Result {
if (self.filter_fn)(event) {
+ match self.rate_limit(event) {
+ RateResult::Deny => return Ok(()),
+
+ RateResult::Allow => {}
+ RateResult::AllowSkipped(skipped) => {
+ if let Some(span) = event
+ .parent()
+ .and_then(|id| ctx.span(id))
+ .or_else(|| ctx.lookup_current())
+ {
+ let mut extensions = span.extensions_mut();
+ match extensions.get_mut::() {
+ None => {
+ let mut attributes = LogAttributes::default();
+ attributes
+ .insert(KeyValue::new("skipped_messages", skipped as i64));
+ extensions.insert(attributes);
+ }
+ Some(attributes) => {
+ attributes
+ .insert(KeyValue::new("skipped_messages", skipped as i64));
+ }
+ }
+ }
+ }
+ }
self.inner.format_event(ctx, writer, event)
} else {
Ok(())
@@ -92,6 +132,32 @@ where
W: std::fmt::Write,
{
if (self.filter_fn)(event) {
+ match self.rate_limit(event) {
+ RateResult::Deny => return Ok(()),
+
+ RateResult::Allow => {}
+ RateResult::AllowSkipped(skipped) => {
+ if let Some(span) = event
+ .parent()
+ .and_then(|id| ctx.span(id))
+ .or_else(|| ctx.lookup_current())
+ {
+ let mut extensions = span.extensions_mut();
+ match extensions.get_mut::() {
+ None => {
+ let mut attributes = LogAttributes::default();
+ attributes
+ .insert(KeyValue::new("skipped_messages", skipped as i64));
+ extensions.insert(attributes);
+ }
+ Some(attributes) => {
+ attributes
+ .insert(KeyValue::new("skipped_messages", skipped as i64));
+ }
+ }
+ }
+ }
+ }
self.inner.format_event(ctx, writer, event)
} else {
Ok(())
@@ -99,6 +165,61 @@ where
}
}
+enum RateResult {
+ Allow,
+ AllowSkipped(u32),
+ Deny,
+}
+impl FilteringFormatter {
+ fn rate_limit(&self, event: &tracing::Event<'_>) -> RateResult {
+ if self.config.enabled {
+ let now = Instant::now();
+ if let Some(counter) = self
+ .rate_limiter
+ .lock()
+ .get_mut(&event.metadata().callsite())
+ {
+ if now - counter.last < self.config.interval {
+ counter.count += 1;
+
+ if counter.count >= self.config.capacity {
+ return RateResult::Deny;
+ }
+ } else {
+ if counter.count > self.config.capacity {
+ let skipped = counter.count - self.config.capacity;
+ counter.last = now;
+ counter.count += 1;
+
+ return RateResult::AllowSkipped(skipped);
+ }
+
+ counter.last = now;
+ counter.count += 1;
+ }
+
+ return RateResult::Allow;
+ }
+
+ // this is racy but not a very large issue, we can accept an initial burst
+ self.rate_limiter.lock().insert(
+ event.metadata().callsite(),
+ RateCounter {
+ last: now,
+ count: 1,
+ },
+ );
+ }
+
+ RateResult::Allow
+ }
+}
+
+struct RateCounter {
+ last: Instant,
+ count: u32,
+}
+
// Function to filter metric event for the filter formatter
pub(crate) fn filter_metric_events(event: &tracing::Event<'_>) -> bool {
!event.metadata().fields().iter().any(|f| {
diff --git a/apollo-router/src/plugins/telemetry/logging/mod.rs b/apollo-router/src/plugins/telemetry/logging/mod.rs
new file mode 100644
index 0000000000..e417dad3c2
--- /dev/null
+++ b/apollo-router/src/plugins/telemetry/logging/mod.rs
@@ -0,0 +1,233 @@
+//TODO move telemetry logging functionality to this file
+#[cfg(test)]
+mod test {
+ use std::any::TypeId;
+ use std::sync::Arc;
+
+ use tower::BoxError;
+ use tower::ServiceBuilder;
+ use tower_service::Service;
+ use tracing_futures::WithSubscriber;
+
+ use crate::assert_snapshot_subscriber;
+ use crate::graphql;
+ use crate::plugin::Plugin;
+ use crate::plugin::PluginInit;
+ use crate::plugins::telemetry::Telemetry;
+ use crate::services::router;
+ use crate::services::subgraph;
+ use crate::services::supergraph;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_router_service() {
+ let test_harness: PluginTestHarness = PluginTestHarness::builder().build().await;
+
+ async {
+ let mut response = test_harness
+ .call_router(
+ router::Request::fake_builder()
+ .body("query { foo }")
+ .build()
+ .expect("expecting valid request"),
+ |_r| {
+ tracing::info!("response");
+ router::Response::fake_builder()
+ .header("custom-header", "val1")
+ .data(serde_json::json!({"data": "res"}))
+ .build()
+ .expect("expecting valid response")
+ },
+ )
+ .await
+ .expect("expecting successful response");
+
+ response.next_response().await;
+ }
+ .with_subscriber(assert_snapshot_subscriber!())
+ .await
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_supergraph_service() {
+ let test_harness: PluginTestHarness = PluginTestHarness::builder().build().await;
+
+ async {
+ let mut response = test_harness
+ .call_supergraph(
+ supergraph::Request::fake_builder()
+ .query("query { foo }")
+ .variable("a", "b")
+ .build()
+ .expect("expecting valid request"),
+ |_r| {
+ tracing::info!("response");
+ supergraph::Response::fake_builder()
+ .header("custom-header", "val1")
+ .data(serde_json::json!({"data": "res"}))
+ .build()
+ .expect("expecting valid response")
+ },
+ )
+ .await
+ .expect("expecting successful response");
+
+ response.next_response().await;
+ }
+ .with_subscriber(assert_snapshot_subscriber!())
+ .await
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_subgraph_service() {
+ let test_harness: PluginTestHarness = PluginTestHarness::builder().build().await;
+
+ async {
+ test_harness
+ .call_subgraph(
+ subgraph::Request::fake_builder()
+ .subgraph_name("subgraph")
+ .subgraph_request(http::Request::new(
+ graphql::Request::fake_builder()
+ .query("query { foo }")
+ .build(),
+ ))
+ .build(),
+ |_r| {
+ tracing::info!("response");
+ subgraph::Response::fake2_builder()
+ .header("custom-header", "val1")
+ .data(serde_json::json!({"data": "res"}).to_string())
+ .build()
+ .expect("expecting valid response")
+ },
+ )
+ .await
+ .expect("expecting successful response");
+ }
+ .with_subscriber(assert_snapshot_subscriber!())
+ .await
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_when_header() {
+ let test_harness: PluginTestHarness = PluginTestHarness::builder()
+ .yaml(include_str!(
+ "testdata/experimental_when_header.router.yaml"
+ ))
+ .build()
+ .await;
+
+ async {
+ let mut response = test_harness
+ .call_supergraph(
+ supergraph::Request::fake_builder()
+ .header("custom-header1", "val1")
+ .header("custom-header2", "val2")
+ .query("query { foo }")
+ .build()
+ .expect("expecting valid request"),
+ |_r| {
+ tracing::info!("response");
+ supergraph::Response::fake_builder()
+ .header("custom-header1", "val1")
+ .header("custom-header2", "val2")
+ .data(serde_json::json!({"data": "res"}))
+ .build()
+ .expect("expecting valid response")
+ },
+ )
+ .await
+ .expect("expecting successful response");
+
+ response.next_response().await;
+ }
+ .with_subscriber(assert_snapshot_subscriber!())
+ .await
+ }
+
+ // Maybe factor this out after making it more usable
+ // The difference with this and the `TestHarness` is that this has much less of the router being wired up and is useful for testing a single plugin in isolation.
+ // In particular the `TestHarness` isn't good for testing things with logging.
+ // For now let's try and increase the coverage of the telemetry plugin using this and see how it goes.
+
+ struct PluginTestHarness {
+ plugin: T,
+ }
+ #[buildstructor::buildstructor]
+ impl PluginTestHarness {
+ #[builder]
+ async fn new(yaml: Option<&'static str>, supergraph: Option<&'static str>) -> Self {
+ let factory = crate::plugin::plugins()
+ .find(|factory| factory.type_id == TypeId::of::())
+ .expect("plugin not registered");
+ let name = &factory.name.replace("apollo.", "");
+ let config = yaml
+ .map(|yaml| serde_yaml::from_str::(yaml).unwrap())
+ .map(|mut config| {
+ config
+ .as_mapping_mut()
+ .expect("invalid yaml")
+ .remove(&serde_yaml::Value::String(name.to_string()))
+ .expect("no config for plugin")
+ })
+ .unwrap_or_else(|| serde_yaml::Value::Mapping(Default::default()));
+
+ let supergraph_sdl = supergraph
+ .map(|s| Arc::new(s.to_string()))
+ .unwrap_or_default();
+ let plugin = T::new(PluginInit {
+ config: serde_yaml::from_value(config).expect("config was invalid"),
+ supergraph_sdl,
+ notify: Default::default(),
+ })
+ .await
+ .expect("failed to initialize plugin");
+
+ Self { plugin }
+ }
+
+ #[allow(dead_code)]
+ async fn call_router(
+ &self,
+ request: router::Request,
+ response_fn: fn(router::Request) -> router::Response,
+ ) -> Result {
+ let service: router::BoxService = router::BoxService::new(
+ ServiceBuilder::new()
+ .service_fn(move |req: router::Request| async move { Ok((response_fn)(req)) }),
+ );
+
+ self.plugin.router_service(service).call(request).await
+ }
+
+ async fn call_supergraph(
+ &self,
+ request: supergraph::Request,
+ response_fn: fn(supergraph::Request) -> supergraph::Response,
+ ) -> Result {
+ let service: supergraph::BoxService =
+ supergraph::BoxService::new(ServiceBuilder::new().service_fn(
+ move |req: supergraph::Request| async move { Ok((response_fn)(req)) },
+ ));
+
+ self.plugin.supergraph_service(service).call(request).await
+ }
+
+ async fn call_subgraph(
+ &self,
+ request: subgraph::Request,
+ response_fn: fn(subgraph::Request) -> subgraph::Response,
+ ) -> Result {
+ let name = request.subgraph_name.clone();
+ let service: subgraph::BoxService =
+ subgraph::BoxService::new(ServiceBuilder::new().service_fn(
+ move |req: subgraph::Request| async move { Ok((response_fn)(req)) },
+ ));
+
+ self.plugin
+ .subgraph_service(&name.expect("subgraph name must be populated"), service)
+ .call(request)
+ .await
+ }
+ }
+}
diff --git a/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__router_service@logs.snap b/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__router_service@logs.snap
new file mode 100644
index 0000000000..c5fb1f0cf1
--- /dev/null
+++ b/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__router_service@logs.snap
@@ -0,0 +1,24 @@
+---
+source: apollo-router/src/plugins/telemetry/logging/mod.rs
+expression: yaml
+---
+- fields: {}
+ level: INFO
+ message: response
+ span:
+ http.flavor: HTTP/1.1
+ http.method: GET
+ http.request.method: GET
+ http.route: "http://example.com/"
+ name: router
+ otel.kind: INTERNAL
+ trace_id: ""
+ spans:
+ - http.flavor: HTTP/1.1
+ http.method: GET
+ http.request.method: GET
+ http.route: "http://example.com/"
+ name: router
+ otel.kind: INTERNAL
+ trace_id: ""
+
diff --git a/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__subgraph_service@logs.snap b/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__subgraph_service@logs.snap
new file mode 100644
index 0000000000..b554825f63
--- /dev/null
+++ b/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__subgraph_service@logs.snap
@@ -0,0 +1,20 @@
+---
+source: apollo-router/src/plugins/telemetry/logging/mod.rs
+expression: yaml
+---
+- fields: {}
+ level: INFO
+ message: response
+ span:
+ apollo.subgraph.name: subgraph
+ graphql.document: "query { foo }"
+ graphql.operation.name: ""
+ name: subgraph
+ otel.kind: INTERNAL
+ spans:
+ - apollo.subgraph.name: subgraph
+ graphql.document: "query { foo }"
+ graphql.operation.name: ""
+ name: subgraph
+ otel.kind: INTERNAL
+
diff --git a/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__supergraph_service@logs.snap b/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__supergraph_service@logs.snap
new file mode 100644
index 0000000000..0b616aeb85
--- /dev/null
+++ b/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__supergraph_service@logs.snap
@@ -0,0 +1,20 @@
+---
+source: apollo-router/src/plugins/telemetry/logging/mod.rs
+expression: yaml
+---
+- fields: {}
+ level: INFO
+ message: response
+ span:
+ apollo_private.field_level_instrumentation_ratio: 0.01
+ apollo_private.graphql.variables: "{\"a\":\"\"}"
+ graphql.document: "query { foo }"
+ name: supergraph
+ otel.kind: INTERNAL
+ spans:
+ - apollo_private.field_level_instrumentation_ratio: 0.01
+ apollo_private.graphql.variables: "{\"a\":\"\"}"
+ graphql.document: "query { foo }"
+ name: supergraph
+ otel.kind: INTERNAL
+
diff --git a/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__when_header@logs.snap b/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__when_header@logs.snap
new file mode 100644
index 0000000000..967cfa8405
--- /dev/null
+++ b/apollo-router/src/plugins/telemetry/logging/snapshots/apollo_router__plugins__telemetry__logging__test__when_header@logs.snap
@@ -0,0 +1,48 @@
+---
+source: apollo-router/src/plugins/telemetry/logging/mod.rs
+expression: yaml
+---
+- fields:
+ http.request.headers: "{\"content-type\": \"application/json\", \"custom-header1\": \"val1\", \"custom-header2\": \"val2\"}"
+ level: INFO
+ message: Supergraph request headers
+- fields:
+ http.request.body: "Request { query: Some(\"query { foo }\"), operation_name: None, variables: {}, extensions: {} }"
+ level: INFO
+ message: Supergraph request body
+- fields: {}
+ level: INFO
+ message: response
+ span:
+ apollo_private.field_level_instrumentation_ratio: 0.01
+ apollo_private.graphql.variables: "{}"
+ graphql.document: "query { foo }"
+ name: supergraph
+ otel.kind: INTERNAL
+ spans:
+ - apollo_private.field_level_instrumentation_ratio: 0.01
+ apollo_private.graphql.variables: "{}"
+ graphql.document: "query { foo }"
+ name: supergraph
+ otel.kind: INTERNAL
+- fields:
+ http.response.headers: "{\"custom-header1\": \"val1\", \"custom-header2\": \"val2\"}"
+ level: INFO
+ message: Supergraph response headers
+ span:
+ apollo_private.field_level_instrumentation_ratio: 0.01
+ apollo_private.graphql.variables: "{}"
+ graphql.document: "query { foo }"
+ name: supergraph
+ otel.kind: INTERNAL
+ spans:
+ - apollo_private.field_level_instrumentation_ratio: 0.01
+ apollo_private.graphql.variables: "{}"
+ graphql.document: "query { foo }"
+ name: supergraph
+ otel.kind: INTERNAL
+- fields:
+ http.response.body: "Response { label: None, data: Some(Object({\"data\": String(\"res\")})), path: None, errors: [], extensions: {}, has_next: None, subscribed: None, created_at: None, incremental: [] }"
+ level: INFO
+ message: Supergraph GraphQL response
+
diff --git a/apollo-router/src/plugins/telemetry/logging/testdata/experimental_when_header.router.yaml b/apollo-router/src/plugins/telemetry/logging/testdata/experimental_when_header.router.yaml
new file mode 100644
index 0000000000..55c0431beb
--- /dev/null
+++ b/apollo-router/src/plugins/telemetry/logging/testdata/experimental_when_header.router.yaml
@@ -0,0 +1,9 @@
+telemetry:
+ exporters:
+ logging:
+ experimental_when_header:
+ - name: "custom-header1"
+ match: "^val.*"
+ headers: true
+ body: true
+
diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs
index a0f5da8e55..d52c65bb11 100644
--- a/apollo-router/src/plugins/telemetry/mod.rs
+++ b/apollo-router/src/plugins/telemetry/mod.rs
@@ -10,7 +10,6 @@ use std::time::Instant;
use ::tracing::info_span;
use ::tracing::Span;
use axum::headers::HeaderName;
-use bloomfilter::Bloom;
use config_new::Selectors;
use dashmap::DashMap;
use futures::future::ready;
@@ -39,7 +38,6 @@ use opentelemetry::trace::TracerProvider;
use opentelemetry::Key;
use opentelemetry::KeyValue;
use opentelemetry_semantic_conventions::trace::HTTP_REQUEST_METHOD;
-use parking_lot::Mutex;
use rand::Rng;
use router_bridge::planner::UsageReporting;
use serde_json_bytes::json;
@@ -61,6 +59,7 @@ use self::apollo_exporter::Sender;
use self::config::Conf;
use self::config::Sampler;
use self::config::SamplerOption;
+use self::config::TraceIdFormat;
use self::config_new::spans::Spans;
use self::metrics::apollo::studio::SingleTypeStat;
use self::metrics::AttributesForwardConf;
@@ -80,9 +79,6 @@ use crate::metrics::filter::FilterMeterProvider;
use crate::metrics::meter_provider;
use crate::plugin::Plugin;
use crate::plugin::PluginInit;
-use crate::plugins::cache::entity::hash_query;
-use crate::plugins::cache::entity::hash_vary_headers;
-use crate::plugins::cache::entity::REPRESENTATIONS;
use crate::plugins::telemetry::apollo::ForwardHeaders;
use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::node::Id::ResponseName;
use crate::plugins::telemetry::apollo_exporter::proto::reports::StatsContext;
@@ -119,7 +115,6 @@ use crate::services::SubgraphRequest;
use crate::services::SubgraphResponse;
use crate::services::SupergraphRequest;
use crate::services::SupergraphResponse;
-use crate::spec::TYPENAME;
use crate::tracer::TraceId;
use crate::Context;
use crate::ListenAddr;
@@ -132,6 +127,7 @@ pub(crate) mod dynamic_attribute;
mod endpoint;
mod fmt_layer;
pub(crate) mod formatters;
+mod logging;
pub(crate) mod metrics;
mod otlp;
pub(crate) mod reload;
@@ -172,7 +168,7 @@ pub(crate) struct Telemetry {
public_meter_provider: Option,
public_prometheus_meter_provider: Option,
private_meter_provider: Option,
- counter: Option>>,
+ is_active: bool,
}
#[derive(Debug)]
@@ -238,27 +234,8 @@ impl Plugin for Telemetry {
let field_level_instrumentation_ratio =
config.calculate_field_level_instrumentation_ratio()?;
- // TODO move cache metrics to cache plugin.
let metrics_builder = Self::create_metrics_builder(&config)?;
- let counter = if config
- .exporters
- .metrics
- .common
- .experimental_cache_metrics
- .enabled
- {
- Some(Arc::new(Mutex::new(CacheCounter::new(
- config
- .exporters
- .metrics
- .common
- .experimental_cache_metrics
- .ttl,
- ))))
- } else {
- None
- };
let (sampling_filter_ratio, tracer_provider) = Self::create_tracer_provider(&config)?;
if config.instrumentation.spans.mode == SpanMode::Deprecated {
@@ -281,7 +258,7 @@ impl Plugin for Telemetry {
.map(FilterMeterProvider::public),
sampling_filter_ratio,
config: Arc::new(config),
- counter,
+ is_active: false,
})
}
@@ -456,7 +433,7 @@ impl Plugin for Telemetry {
.map_response(move |mut resp: SupergraphResponse| {
let config = config_map_res_first.clone();
if let Some(usage_reporting) =
- resp.context.private_entries.lock().get::()
+ resp.context.extensions().lock().get::()
{
// Record the operation signature on the router span
Span::current().record(
@@ -472,15 +449,30 @@ impl Plugin for Telemetry {
.unwrap_or_else(|| DEFAULT_EXPOSE_TRACE_ID_HEADER_NAME.clone())
});
+ // Append the trace ID with the right format, based on the config
+ let format_id = |trace: TraceId| {
+ let id = match config.exporters.tracing.response_trace_id.format {
+ TraceIdFormat::Hexadecimal => format!("{:032x}", trace.to_u128()),
+ TraceIdFormat::Decimal => format!("{}", trace.to_u128()),
+ };
+
+ HeaderValue::from_str(&id).ok()
+ };
if let (Some(header_name), Some(trace_id)) = (
expose_trace_id_header,
- TraceId::maybe_new().and_then(|t| HeaderValue::from_str(&t.to_string()).ok()),
+ TraceId::maybe_new().and_then(format_id),
) {
resp.response.headers_mut().append(header_name, trace_id);
}
if resp.context.contains_key(LOGGING_DISPLAY_HEADERS) {
- ::tracing::info!(http.response.headers = ?resp.response.headers(), "Supergraph response headers");
+ let sorted_headers = resp
+ .response
+ .headers()
+ .iter()
+ .map(|(k, v)| (k.as_str(), v))
+ .collect::>();
+ ::tracing::info!(http.response.headers = ?sorted_headers, "Supergraph response headers");
}
let display_body = resp.context.contains_key(LOGGING_DISPLAY_BODY);
resp.map_stream(move |gql_response| {
@@ -560,22 +552,10 @@ impl Plugin for Telemetry {
let subgraph_metrics_conf_req = self.create_subgraph_metrics_conf(name);
let subgraph_metrics_conf_resp = subgraph_metrics_conf_req.clone();
let subgraph_name = ByteString::from(name);
- let cache_metrics_enabled = self.counter.is_some();
- let counter = self.counter.clone();
let name = name.to_owned();
- let subgraph_name_arc = Arc::new(name.to_owned());
ServiceBuilder::new()
.instrument(move |req: &SubgraphRequest| span_mode.create_subgraph(name.as_str(), req))
- .map_request(move |mut req: SubgraphRequest| {
- let cache_attributes = cache_metrics_enabled
- .then(|| Self::get_cache_attributes(subgraph_name_arc.clone(), &mut req))
- .flatten();
- if let Some(cache_attributes) = cache_attributes {
- req.context.private_entries.lock().insert(cache_attributes);
- }
-
- request_ftv1(req)
- })
+ .map_request(move |req: SubgraphRequest| request_ftv1(req))
.map_response(move |resp| store_ftv1(&subgraph_name, resp))
.map_future_with_request_data(
move |sub_request: &SubgraphRequest| {
@@ -583,7 +563,7 @@ impl Plugin for Telemetry {
subgraph_metrics_conf_req.as_ref(),
sub_request,
);
- let cache_attributes = sub_request.context.private_entries.lock().remove();
+
let custom_attributes = config
.instrumentation
.spans
@@ -591,21 +571,12 @@ impl Plugin for Telemetry {
.attributes
.on_request(sub_request);
- (
- sub_request.context.clone(),
- cache_attributes,
- custom_attributes,
- )
+ (sub_request.context.clone(), custom_attributes)
},
- move |(context, cache_attributes, custom_attributes): (
- Context,
- Option,
- LinkedList,
- ),
+ move |(context, custom_attributes): (Context, LinkedList),
f: BoxFuture<'static, Result>| {
let subgraph_attribute = subgraph_attribute.clone();
let subgraph_metrics_conf = subgraph_metrics_conf_resp.clone();
- let counter = counter.clone();
let conf = conf.clone();
// Using Instant because it is guaranteed to be monotonically increasing.
let now = Instant::now();
@@ -643,8 +614,6 @@ impl Plugin for Telemetry {
subgraph_attribute,
subgraph_metrics_conf.as_ref(),
now,
- counter,
- cache_attributes,
&result,
);
result
@@ -662,6 +631,10 @@ impl Plugin for Telemetry {
impl Telemetry {
pub(crate) fn activate(&mut self) {
+ if self.is_active {
+ return;
+ }
+
// Only apply things if we were executing in the context of a vanilla the Apollo executable.
// Users that are rolling their own routers will need to set up telemetry themselves.
if let Some(hot_tracer) = OPENTELEMETRY_TRACER_HANDLE.get() {
@@ -694,6 +667,7 @@ impl Telemetry {
self.reload_metrics();
reload_fmt(create_fmt_layer(&self.config));
+ self.is_active = true;
}
fn create_propagator(config: &config::Conf) -> TextMapCompositePropagator {
@@ -824,7 +798,7 @@ impl Telemetry {
) -> Result {
let mut metric_attrs = {
context
- .private_entries
+ .extensions()
.lock()
.get::()
.cloned()
@@ -922,7 +896,13 @@ impl Telemetry {
let (should_log_headers, should_log_body) = config.exporters.logging.should_log(req);
if should_log_headers {
- ::tracing::info!(http.request.headers = ?req.supergraph_request.headers(), "Supergraph request headers");
+ let sorted_headers = req
+ .supergraph_request
+ .headers()
+ .iter()
+ .map(|(k, v)| (k.as_str(), v))
+ .collect::>();
+ ::tracing::info!(http.request.headers = ?sorted_headers, "Supergraph request headers");
let _ = req.context.insert(LOGGING_DISPLAY_HEADERS, true);
}
@@ -949,11 +929,11 @@ impl Telemetry {
attributes.extend(router_attributes_conf.get_attributes_from_context(context));
let _ = context
- .private_entries
+ .extensions()
.lock()
.insert(MetricsAttributes(attributes));
if rand::thread_rng().gen_bool(field_level_instrumentation_ratio) {
- context.private_entries.lock().insert(EnableSubgraphFtv1);
+ context.extensions().lock().insert(EnableSubgraphFtv1);
}
}
@@ -997,63 +977,6 @@ impl Telemetry {
})
}
- fn get_cache_attributes(
- subgraph_name: Arc,
- sub_request: &mut Request,
- ) -> Option {
- let body = sub_request.subgraph_request.body_mut();
- let hashed_query = hash_query(&sub_request.query_hash, body);
- let representations = body
- .variables
- .get(REPRESENTATIONS)
- .and_then(|value| value.as_array())?;
-
- let keys = extract_cache_attributes(representations).ok()?;
-
- Some(CacheAttributes {
- subgraph_name,
- headers: sub_request.subgraph_request.headers().clone(),
- hashed_query: Arc::new(hashed_query),
- representations: keys,
- })
- }
-
- fn update_cache_metrics(
- counter: Arc>,
- sub_response: &SubgraphResponse,
- cache_attributes: CacheAttributes,
- ) {
- let mut vary_headers = sub_response
- .response
- .headers()
- .get_all(header::VARY)
- .into_iter()
- .filter_map(|val| {
- val.to_str().ok().map(|v| {
- v.to_string()
- .split(", ")
- .map(|s| s.to_string())
- .collect::>()
- })
- })
- .flatten()
- .collect::>();
- vary_headers.sort();
- let vary_headers = vary_headers.join(", ");
-
- let hashed_headers = if vary_headers.is_empty() {
- Arc::default()
- } else {
- Arc::new(hash_vary_headers(&cache_attributes.headers))
- };
- counter.lock().record(
- cache_attributes.hashed_query.clone(),
- cache_attributes.subgraph_name.clone(),
- hashed_headers,
- cache_attributes.representations,
- );
- }
-
fn store_subgraph_request_attributes(
attribute_forward_config: &AttributesForwardConf,
sub_request: &Request,
@@ -1067,7 +990,7 @@ impl Telemetry {
.extend(attribute_forward_config.get_attributes_from_context(&sub_request.context));
sub_request
.context
- .private_entries
+ .extensions()
.lock()
.insert(SubgraphMetricsAttributes(attributes)); //.unwrap();
}
@@ -1078,13 +1001,11 @@ impl Telemetry {
subgraph_attribute: KeyValue,
attribute_forward_config: &AttributesForwardConf,
now: Instant,
- counter: Option>>,
- cache_attributes: Option,
result: &Result,
) {
let mut metric_attrs = {
context
- .private_entries
+ .extensions()
.lock()
.get::()
.cloned()
@@ -1108,21 +1029,6 @@ impl Telemetry {
match &result {
Ok(response) => {
- if let Some(cache_attributes) = cache_attributes {
- if let Ok(cache_control) = response
- .response
- .headers()
- .get(header::CACHE_CONTROL)
- .ok_or(())
- .and_then(|val| val.to_str().map(|v| v.to_string()).map_err(|_| ()))
- {
- metric_attrs.push(KeyValue::new("cache_control", cache_control));
- }
-
- if let Some(counter) = counter {
- Self::update_cache_metrics(counter, response, cache_attributes)
- }
- }
metric_attrs.push(KeyValue::new(
"status",
response.response.status().as_u16().to_string(),
@@ -1311,11 +1217,8 @@ impl Telemetry {
operation_kind: OperationKind,
operation_subtype: Option,
) {
- let metrics = if let Some(usage_reporting) = context
- .private_entries
- .lock()
- .get::()
- .cloned()
+ let metrics = if let Some(usage_reporting) =
+ context.extensions().lock().get::().cloned()
{
let licensed_operation_count =
licensed_operation_count(&usage_reporting.stats_report_key);
@@ -1630,113 +1533,6 @@ impl Telemetry {
}
}
-#[derive(Debug, Clone)]
-struct CacheAttributes {
- subgraph_name: Arc,
- headers: http::HeaderMap,
- hashed_query: Arc,
- // Typename + hashed_representation
- representations: Vec<(Arc, Value)>,
-}
-
-#[derive(Debug, Hash, Clone)]
-struct CacheKey {
- representation: Value,
- typename: Arc,
- query: Arc,
- subgraph_name: Arc,
- hashed_headers: Arc,
-}
-
-// Get typename and hashed representation for each representations in the subgraph query
-fn extract_cache_attributes(
- representations: &[Value],
-) -> Result, Value)>, BoxError> {
- let mut res = Vec::new();
- for representation in representations {
- let opt_type = representation
- .as_object()
- .and_then(|o| o.get(TYPENAME))
- .ok_or("missing __typename in representation")?;
- let typename = opt_type.as_str().unwrap_or("");
-
- res.push((Arc::new(typename.to_string()), representation.clone()));
- }
- Ok(res)
-}
-
-struct CacheCounter {
- primary: Bloom,
- secondary: Bloom,
- created_at: Instant,
- ttl: Duration,
-}
-
-impl CacheCounter {
- fn new(ttl: Duration) -> Self {
- Self {
- primary: Self::make_filter(),
- secondary: Self::make_filter(),
- created_at: Instant::now(),
- ttl,
- }
- }
-
- fn make_filter() -> Bloom {
- // the filter is around 4kB in size (can be calculated with `Bloom::compute_bitmap_size`)
- Bloom::new_for_fp_rate(10000, 0.2)
- }
-
- fn record(
- &mut self,
- query: Arc,
- subgraph_name: Arc,
- hashed_headers: Arc,
- representations: Vec<(Arc, Value)>,
- ) {
- if self.created_at.elapsed() >= self.ttl {
- self.clear();
- }
-
- // typename -> (nb of cache hits, nb of entities)
- let mut seen: HashMap, (usize, usize)> = HashMap::new();
- for (typename, representation) in representations {
- let cache_hit = self.check(&CacheKey {
- representation,
- typename: typename.clone(),
- query: query.clone(),
- subgraph_name: subgraph_name.clone(),
- hashed_headers: hashed_headers.clone(),
- });
-
- let seen_entry = seen.entry(typename.clone()).or_default();
- if cache_hit {
- seen_entry.0 += 1;
- }
- seen_entry.1 += 1;
- }
-
- for (typename, (cache_hit, total_entities)) in seen.into_iter() {
- ::tracing::info!(
- histogram.apollo.router.operations.entity.cache_hit = (cache_hit as f64 / total_entities as f64) * 100f64,
- entity_type = %typename,
- subgraph = %subgraph_name,
- );
- }
- }
-
- fn check(&mut self, key: &CacheKey) -> bool {
- self.primary.check_and_set(key) || self.secondary.check(key)
- }
-
- fn clear(&mut self) {
- let secondary = std::mem::replace(&mut self.primary, Self::make_filter());
- self.secondary = secondary;
-
- self.created_at = Instant::now();
- }
-}
-
fn filter_headers(headers: &HeaderMap, forward_rules: &ForwardHeaders) -> String {
if let ForwardHeaders::None = forward_rules {
return String::from("{}");
@@ -1871,7 +1667,7 @@ register_plugin!("apollo", "telemetry", Telemetry);
fn request_ftv1(mut req: SubgraphRequest) -> SubgraphRequest {
if req
.context
- .private_entries
+ .extensions()
.lock()
.contains_key::()
&& Span::current().context().span().span_context().is_sampled()
@@ -1887,7 +1683,7 @@ fn store_ftv1(subgraph_name: &ByteString, resp: SubgraphResponse) -> SubgraphRes
// Stash the FTV1 data
if resp
.context
- .private_entries
+ .extensions()
.lock()
.contains_key::()
{
diff --git a/apollo-router/src/plugins/telemetry/reload.rs b/apollo-router/src/plugins/telemetry/reload.rs
index 93f566d05e..8f8bf54d10 100644
--- a/apollo-router/src/plugins/telemetry/reload.rs
+++ b/apollo-router/src/plugins/telemetry/reload.rs
@@ -27,6 +27,7 @@ use tracing_subscriber::EnvFilter;
use tracing_subscriber::Registry;
use super::config::SamplerOption;
+use super::config_new::logging::RateLimit;
use super::dynamic_attribute::DynAttributeLayer;
use super::fmt_layer::FmtLayer;
use super::formatters::json::Json;
@@ -39,6 +40,7 @@ use crate::plugins::telemetry::formatters::filter_metric_events;
use crate::plugins::telemetry::formatters::text::Text;
use crate::plugins::telemetry::formatters::FilteringFormatter;
use crate::plugins::telemetry::tracing::reload::ReloadTracer;
+use crate::router_factory::STARTING_SPAN_NAME;
pub(crate) type LayeredRegistry = Layered>;
@@ -84,13 +86,13 @@ pub(crate) fn init_telemetry(log_level: &str) -> Result<()> {
// We choose json or plain based on tty
let fmt = if std::io::stdout().is_terminal() {
FmtLayer::new(
- FilteringFormatter::new(Text::default(), filter_metric_events),
+ FilteringFormatter::new(Text::default(), filter_metric_events, &RateLimit::default()),
std::io::stdout,
)
.boxed()
} else {
FmtLayer::new(
- FilteringFormatter::new(Json::default(), filter_metric_events),
+ FilteringFormatter::new(Json::default(), filter_metric_events, &RateLimit::default()),
std::io::stdout,
)
.boxed()
@@ -133,6 +135,10 @@ pub(super) fn reload_fmt(layer: Box + Send + Sync>) {
}
}
+pub(crate) fn apollo_opentelemetry_initialized() -> bool {
+ OPENTELEMETRY_TRACER_HANDLE.get().is_some()
+}
+
pub(crate) struct SamplingFilter;
#[allow(dead_code)]
@@ -175,9 +181,9 @@ where
meta: &tracing::Metadata<'_>,
cx: &tracing_subscriber::layer::Context<'_, S>,
) -> bool {
- // we ignore events
+ // we ignore metric events
if !meta.is_span() {
- return false;
+ return meta.fields().iter().any(|f| f.name() == "message");
}
// if there's an exsting otel context set by the client request, and it is sampled,
@@ -197,6 +203,11 @@ where
return spanref.is_sampled();
}
+ // always sample the router loading trace
+ if meta.name() == STARTING_SPAN_NAME {
+ return true;
+ }
+
// we only make the sampling decision on the root span. If we reach here for any other span,
// it means that the parent span was not enabled, so we should not enable this span either
if meta.name() != REQUEST_SPAN_NAME && meta.name() != ROUTER_SPAN_NAME {
diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs
index 918795b986..817de0979b 100644
--- a/apollo-router/src/plugins/traffic_shaping/mod.rs
+++ b/apollo-router/src/plugins/traffic_shaping/mod.rs
@@ -534,6 +534,7 @@ mod test {
for (name, plugin) in create_plugins(
&config,
&schema,
+ None,
Some(vec![(APOLLO_TRAFFIC_SHAPING.to_string(), plugin)]),
)
.await
diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs
index d5eb735fbe..19190331b7 100644
--- a/apollo-router/src/query_planner/bridge_query_planner.rs
+++ b/apollo-router/src/query_planner/bridge_query_planner.rs
@@ -9,6 +9,7 @@ use std::time::Instant;
use apollo_compiler::ast;
use futures::future::BoxFuture;
use router_bridge::planner::IncrementalDeliverySupport;
+use router_bridge::planner::PlanOptions;
use router_bridge::planner::PlanSuccess;
use router_bridge::planner::Planner;
use router_bridge::planner::QueryPlannerConfig;
@@ -32,6 +33,7 @@ use crate::json_ext::Path;
use crate::plugins::authorization::AuthorizationPlugin;
use crate::plugins::authorization::CacheKeyMetadata;
use crate::plugins::authorization::UnauthorizedPaths;
+use crate::plugins::progressive_override::LABELS_TO_OVERRIDE_KEY;
use crate::query_planner::labeler::add_defer_labels;
use crate::services::layers::query_analysis::ParsedDocument;
use crate::services::layers::query_analysis::ParsedDocumentInner;
@@ -347,6 +349,7 @@ impl BridgeQueryPlanner {
operation: Option,
key: CacheKeyMetadata,
selections: Query,
+ plan_options: PlanOptions,
) -> Result {
fn is_validation_error(errors: &PlanErrors) -> bool {
errors.errors.iter().all(|err| err.validation_error)
@@ -399,7 +402,7 @@ impl BridgeQueryPlanner {
let planner_result = match self
.planner
- .plan(filtered_query.clone(), operation.clone())
+ .plan(filtered_query.clone(), operation.clone(), plan_options)
.await
.map_err(QueryPlannerError::RouterBridgeError)?
.into_result()
@@ -511,7 +514,7 @@ impl Service for BridgeQueryPlanner {
} = req;
let metadata = context
- .private_entries
+ .extensions()
.lock()
.get::()
.cloned()
@@ -520,7 +523,7 @@ impl Service for BridgeQueryPlanner {
let fut = async move {
let start = Instant::now();
- let mut doc = match context.private_entries.lock().get::