diff --git a/.cargo/config b/.cargo/config.toml
similarity index 100%
rename from .cargo/config
rename to .cargo/config.toml
diff --git a/.circleci/config.yml b/.circleci/config.yml
index b512d02af2..acd03f9460 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -43,20 +43,13 @@ executors:
environment:
CARGO_BUILD_JOBS: 8
RUST_TEST_THREADS: 8
- arm_macos_build: &arm_macos_build_executor
+ macos_build: &macos_build_executor
macos:
# See https://circleci.com/docs/xcode-policy along with the support matrix
# at https://circleci.com/docs/using-macos#supported-xcode-versions.
# We use the major.minor notation to bring in compatible patches.
xcode: 14.2
resource_class: macos.m1.medium.gen1
- intel_macos_build: &intel_macos_build_executor
- macos:
- # See https://circleci.com/docs/xcode-policy along with the support matrix
- # at https://circleci.com/docs/using-macos#supported-xcode-versions.
- # We use the major.minor notation to bring in compatible patches.
- xcode: 14.2
- resource_class: macos.x86.medium.gen2
macos_test: &macos_test_executor
macos:
# See https://circleci.com/docs/xcode-policy along with the support matrix
@@ -167,7 +160,7 @@ commands:
- when:
condition:
or:
- - equal: [ *arm_macos_build_executor, << parameters.platform >> ]
+ - equal: [ *macos_build_executor, << parameters.platform >> ]
- equal: [ *macos_test_executor, << parameters.platform >> ]
steps:
- run:
@@ -178,20 +171,7 @@ commands:
- run:
name: Write arch
command: |
- echo 'osx-aarch64' >> ~/.arch
- - when:
- condition:
- equal: [ *intel_macos_build_executor, << parameters.platform >> ]
- steps:
- - run:
- name: Make link to md5
- command: |
- mkdir -p ~/.local/aliases
- ln -s /sbin/md5 ~/.local/aliases/md5sum
- - run:
- name: Write arch
- command: |
- echo 'osx-x86' >> ~/.arch
+ echo 'osx' >> ~/.arch
- when:
condition:
or:
@@ -264,8 +244,7 @@ commands:
- when:
condition:
or:
- - equal: [ *intel_macos_build_executor, << parameters.platform >> ]
- - equal: [ *arm_macos_build_executor, << parameters.platform >> ]
+ - equal: [ *macos_build_executor, << parameters.platform >> ]
- equal: [ *macos_test_executor, << parameters.platform >> ]
steps:
- run:
@@ -306,8 +285,7 @@ commands:
- when:
condition:
or:
- - equal: [ *intel_macos_build_executor, << parameters.platform >> ]
- - equal: [ *arm_macos_build_executor, << parameters.platform >> ]
+ - equal: [ *macos_build_executor, << parameters.platform >> ]
- equal: [ *macos_test_executor, << parameters.platform >> ]
steps:
- run:
@@ -356,6 +334,15 @@ commands:
name: Special case for Windows because of ssh-agent
command: |
printf "[net]\ngit-fetch-with-cli = true" >> ~/.cargo/Cargo.toml
+ - when:
+ condition:
+ or:
+ - equal: [ *macos_build_executor, << parameters.platform >> ]
+ steps:
+ - run:
+ name: Special case for OSX x86_64 builds
+ command: |
+ rustup target add x86_64-apple-darwin
install_extra_tools:
steps:
@@ -421,7 +408,7 @@ commands:
# Create list of kube versions
CURRENT_KUBE_VERSIONS=$(curl -s -L https://raw.githubusercontent.com/kubernetes/website/main/data/releases/schedule.yaml \
| yq -o json '.' \
- | jq --raw-output '.schedules[] | select((now | strftime("%Y-%m-%dT00:00:00Z")) as $date | .releaseDate < $date and .endOfLifeDate > $date) | .previousPatches[].release')
+ | jq --raw-output '.schedules[] | select((now | strftime("%Y-%m-%dT00:00:00Z")) as $date | .releaseDate < $date and .endOfLifeDate > $date) | select(.previousPatches != null) | .previousPatches[].release')
TEMPLATE_DIR=$(mktemp -d)
MINOR_VERSION="${kube_version%.*}"
@@ -608,8 +595,7 @@ jobs:
- when:
condition:
or:
- - equal: [ *intel_macos_build_executor, << parameters.platform >> ]
- - equal: [ *arm_macos_build_executor, << parameters.platform >> ]
+ - equal: [ *macos_build_executor, << parameters.platform >> ]
steps:
- when:
@@ -619,13 +605,28 @@ jobs:
- run: cargo xtask release prepare nightly
- run:
command: >
- cargo xtask dist
+ cargo xtask dist --target aarch64-apple-darwin
+ - run:
+ command: >
+ cargo xtask dist --target x86_64-apple-darwin
- run:
command: >
mkdir -p artifacts
- run:
command: >
cargo xtask package
+ --target aarch64-apple-darwin
+ --apple-team-id ${APPLE_TEAM_ID}
+ --apple-username ${APPLE_USERNAME}
+ --cert-bundle-base64 ${MACOS_CERT_BUNDLE_BASE64}
+ --cert-bundle-password ${MACOS_CERT_BUNDLE_PASSWORD}
+ --keychain-password ${MACOS_KEYCHAIN_PASSWORD}
+ --notarization-password ${MACOS_NOTARIZATION_PASSWORD}
+ --output artifacts/
+ - run:
+ command: >
+ cargo xtask package
+ --target x86_64-apple-darwin
--apple-team-id ${APPLE_TEAM_ID}
--apple-username ${APPLE_USERNAME}
--cert-bundle-base64 ${MACOS_CERT_BUNDLE_BASE64}
@@ -958,7 +959,7 @@ workflows:
matrix:
parameters:
platform:
- [ intel_macos_build, arm_macos_build, windows_build, amd_linux_build, arm_linux_build ]
+ [ macos_build, windows_build, amd_linux_build, arm_linux_build ]
- secops/wiz-docker:
context:
- platform-docker-ro
@@ -1055,7 +1056,7 @@ workflows:
matrix:
parameters:
platform:
- [ intel_macos_build, arm_macos_build, windows_build, amd_linux_build, arm_linux_build ]
+ [ macos_build, windows_build, amd_linux_build, arm_linux_build ]
filters:
branches:
ignore: /.*/
diff --git a/.gitignore b/.gitignore
index ad362c28b0..c8cb2b01a8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
# Generated by Cargo
# will have compiled files and executables
**/target/
+.cargo_check
# These are backup files generated by rustfmt
**/*.rs.bk
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ad0441a09b..ef575a10b5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,154 @@ All notable changes to Router will be documented in this file.
This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html).
+# [1.45.0] - 2024-04-22
+
+## ๐ Features
+
+### Query validation process with Rust ([PR #4551](https://github.com/apollographql/router/pull/4551))
+
+The router has been updated with a new Rust-based query validation process using `apollo-compiler` from the `apollo-rs` project. It replaces the Javascript implementation in the query planner. It improves query planner performance by moving the validation out of the query planner and into the router service, which frees up space in the query planner cache.
+
+Because validation now happens earlier in the router service and not in the query planner, error paths in the query planner are no longer encountered. Some messages in error responses returned from invalid queries should now be more clear.
+
+We've tested the new validation process by running it for months in production, concurrently with the JavaScript implementation, and have now completely transitioned to the Rust-based implementation.
+
+By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4551
+
+### Add support for SHA256 hashing in Rhai ([Issue #4939](https://github.com/apollographql/router/issues/4939))
+
+The router supports a new `sha256` module to create SHA256 hashes in Rhai scripts. The module supports the `sha256::digest` function.
+
+An example script that uses the module:
+
+```rs
+fn supergraph_service(service){
+ service.map_request(|request|{
+ log_info("hello world");
+ let sha = sha256::digest("hello world");
+ log_info(sha);
+ });
+}
+```
+
+
+By [@lleadbet](https://github.com/lleadbet) in https://github.com/apollographql/router/pull/4940
+
+### Subgraph support for query batching ([Issue #2002](https://github.com/apollographql/router/issues/2002))
+
+As an extension to the ongoing work to support [client-side query batching in the router](https://github.com/apollographql/router/issues/126), the router now supports batching of subgraph requests. Each subgraph batch request retains the same external format as a client batch request. This optimization reduces the number of round-trip requests from the router to subgraphs.
+
+Also, batching in the router is now a generally available feature: the `experimental_batching` router configuration option has been deprecated and is replaced by the `batching` option.
+
+Previously, the router preserved the concept of a batch until a `RouterRequest` finished processing. From that point, the router converted each batch request item into a separate `SupergraphRequest`, and the router planned and executed those requests concurrently within the router, then reassembled them into a batch of `RouterResponse` to return to the client. Now with the implementation in this release, the concept of a batch is extended so that batches are issued to configured subgraphs (all or named). Each batch request item is planned and executed separately, but the queries issued to subgraphs are optimally assembled into batches which observe the query constraints of the various batch items.
+
+To configure subgraph batching, you can enable `batching.subgraph.all` for all subgraphs. You can also enable batching per subgraph with `batching.subgraph.subgraphs.*`. For example:
+
+```yaml
+batching:
+ enabled: true
+ mode: batch_http_link
+ subgraph:
+ # Enable batching on all subgraphs
+ all:
+ enabled: true
+```
+
+```yaml
+batching:
+ enabled: true
+ mode: batch_http_link
+ subgraph:
+ # Disable batching on all subgraphs
+ all:
+ enabled: false
+ # Configure (override) batching support per subgraph
+ subgraphs:
+ subgraph_1:
+ enabled: true
+ subgraph_2:
+ enabled: true
+```
+
+Note: `all` can be overridden by `subgraphs`. This applies in general for all router subgraph configuration options.
+
+To learn more, see [query batching in Apollo docs](https://www.apollographql.com/docs/router/executing-operations/query-batching/).
+
+By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/4661
+
+## ๐ Fixes
+
+### Update `rustls` to v0.21.11, the latest v0.21.x patch ([PR #4993](https://github.com/apollographql/router/pull/4993))
+
+While the Router **does** use `rustls`, [RUSTSEC-2024-0336] (also known as [CVE-2024-32650] and [GHSA-6g7w-8wpp-frhj]) **DOES NOT affect the Router** since it uses `tokio-rustls` which is specifically called out in the advisory as **unaffected**.
+
+Despite the lack of impact, we update `rustls` version v0.21.10 to [rustls v0.21.11] which includes a patch.
+
+[RUSTSEC-2024-0336]: https://rustsec.org/advisories/RUSTSEC-2024-0336.html
+[CVE-2024-32650]: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-32650
+[GHSA-6g7w-8wpp-frhj]: https://github.com/advisories/GHSA-6g7w-8wpp-frhj
+[rustls v0.21.11]: https://github.com/rustls/rustls/releases/tag/v%2F0.21.11
+
+By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/4993
+
+### Performance improvements for Apollo usage report field generation ([PR 4951](https://github.com/apollographql/router/pull/4951))
+
+The performance of generating Apollo usage report signatures, stats keys, and referenced fields has been improved.
+
+By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4951
+
+### Apply alias rewrites to arrays ([PR #4958](https://github.com/apollographql/router/pull/4958))
+
+The automatic aliasing rules introduced in [#2489](https://github.com/apollographql/router/pull/2489) to support `@interfaceObject` are now properly applied to lists.
+
+By [@o0ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/4958
+
+### Fix compatibility of coprocessor metric creation ([PR #4930](https://github.com/apollographql/router/pull/4930))
+
+Previously, the router's execution stage created coprocessor metrics differently than other stages. This produced metrics with slight incompatibilities.
+
+This release fixes the issue by creating coprocessor metrics in the same way as all other stages.
+
+By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4930
+
+## ๐ Documentation
+
+### Documentation updates for caching and metrics instruments ([PR #4872](https://github.com/apollographql/router/pull/4872))
+
+Router documentation has been updated for a couple topics:
+- [Performance improvements vs. stability concerns](https://www.apollographql.com/docs/router/configuration/in-memory-caching#performance-improvements-vs-stability) when using the router's operation cache
+- [Overview of standard and custom metrics instruments](https://www.apollographql.com/docs/router/configuration/telemetry/instrumentation/instruments)
+
+By [@smyrick](https://github.com/smyrick) in https://github.com/apollographql/router/pull/4872
+
+## ๐งช Experimental
+
+### Experimental: Introduce a pool of query planners ([PR #4897](https://github.com/apollographql/router/pull/4897))
+
+The router supports a new experimental feature: a pool of query planners to parallelize query planning.
+
+You can configure query planner pools with the `supergraph.query_planning.experimental_parallelism` option:
+
+```yaml
+supergraph:
+ query_planning:
+ experimental_parallelism: auto # number of available CPUs
+```
+
+Its value is the number of query planners that run in parallel, and its default value is `1`. You can set it to the special value `auto` to automatically set it equal to the number of available CPUs.
+
+You can discuss and comment about query planner pools in this [GitHub discussion](https://github.com/apollographql/router/discussions/4917).
+
+By [@xuorig](https://github.com/xuorig) and [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/4897
+
+### Experimental: Rust implementation of Apollo usage report field generation ([PR 4796](https://github.com/apollographql/router/pull/4796))
+
+The router supports a new experimental Rust implementation for generating the stats report keys and referenced fields that are sent in Apollo usage reports. This implementation is one part of the effort to replace the router-bridge with native Rust code.
+
+The feature is configured with the `experimental_apollo_metrics_generation_mode` setting. We recommend that you use its default value, so we can verify that it generates the same payloads as the previous implementation.
+
+By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4796
+
# [1.44.0] - 2024-04-12
## ๐ Features
@@ -132,7 +280,7 @@ Additionally, the router now verifies that a TTL is configured for all subgraphs
By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4882
-### Helm: include all standard labels in pod spec but complete sentence that stands on its own ([PR #4862](https://github.com/apollographql/router/pull/4862))
+### Helm: include all standard labels in pod spec ([PR #4862](https://github.com/apollographql/router/pull/4862))
The templates for the router's Helm chart have been updated so that the `helm.sh/chart`, `app.kubernetes.io/version`, and `app.kubernetes.io/managed-by` labels are now included on pods, as they already were for all other resources created by the Helm chart.
@@ -140,7 +288,7 @@ The specific change to the template is that the pod spec template now uses the `
By [@glasser](https://github.com/glasser) in https://github.com/apollographql/router/pull/4862
-### Persisted queries return 4xx errors ([PR #4887](https://github.com/apollographql/router/pull/4887)
+### Persisted queries return 4xx errors ([PR #4887](https://github.com/apollographql/router/pull/4887))
Previously, sending an invalid persisted query request could return a 200 status code to the client when they should have returned errors. These requests now return errors as 4xx status codes:
diff --git a/Cargo.lock b/Cargo.lock
index 470b383139..156bf8d353 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -87,9 +87,9 @@ dependencies = [
[[package]]
name = "ahash"
-version = "0.8.6"
+version = "0.8.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a"
+checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
dependencies = [
"cfg-if",
"const-random",
@@ -192,9 +192,9 @@ checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1"
[[package]]
name = "apollo-compiler"
-version = "1.0.0-beta.14"
+version = "1.0.0-beta.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "00ed7af048c0beb66c9201c032b25a81b1b450397ddb2cb277ca57bcae2d9e13"
+checksum = "175659cea0232b38bfacd1505aed00221cc4028d848699ce9e3422c6bf87d90a"
dependencies = [
"apollo-parser",
"ariadne",
@@ -220,28 +220,28 @@ dependencies = [
[[package]]
name = "apollo-federation"
-version = "0.0.9"
+version = "0.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "45c675747dd20db0f124d07b9764265b3ae67afbdd1044345673c184888cd018"
+checksum = "e9fc457f3e836a60ea3d4e1a25a8b42c5c62ddf13a2131c194d94f752c7a1475"
dependencies = [
"apollo-compiler",
"derive_more",
"indexmap 2.2.3",
"lazy_static",
"petgraph",
- "salsa",
- "serde_json",
- "strum 0.26.1",
+ "serde_json_bytes",
+ "strum 0.26.2",
"strum_macros 0.26.1",
"thiserror",
+ "time",
"url",
]
[[package]]
name = "apollo-parser"
-version = "0.7.6"
+version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e8111fa921e363466724e8cc80ef703ffbdfc5db64f826c604f7378641b12da"
+checksum = "6bb7c8a9776825e5524b5ab3a7f478bf091a054180f244dff85814452cb87d90"
dependencies = [
"memchr",
"rowan",
@@ -250,7 +250,7 @@ dependencies = [
[[package]]
name = "apollo-router"
-version = "1.44.0"
+version = "1.45.0"
dependencies = [
"access-json",
"anyhow",
@@ -258,6 +258,7 @@ dependencies = [
"apollo-federation",
"arc-swap",
"askama",
+ "async-channel 1.9.0",
"async-compression",
"async-trait",
"aws-config",
@@ -291,7 +292,7 @@ dependencies = [
"futures",
"futures-test",
"graphql_client",
- "heck 0.4.1",
+ "heck",
"hex",
"hmac",
"http 0.2.11",
@@ -337,7 +338,7 @@ dependencies = [
"opentelemetry-zipkin",
"opentelemetry_api",
"p256 0.13.2",
- "parking_lot 0.12.1",
+ "parking_lot",
"paste",
"pin-project-lite",
"prometheus",
@@ -410,7 +411,7 @@ dependencies = [
[[package]]
name = "apollo-router-benchmarks"
-version = "1.44.0"
+version = "1.45.0"
dependencies = [
"apollo-parser",
"apollo-router",
@@ -426,7 +427,7 @@ dependencies = [
[[package]]
name = "apollo-router-scaffold"
-version = "1.44.0"
+version = "1.45.0"
dependencies = [
"anyhow",
"cargo-scaffold",
@@ -941,7 +942,7 @@ dependencies = [
"hex",
"hmac",
"http 0.2.11",
- "http 1.0.0",
+ "http 1.1.0",
"once_cell",
"percent-encoding",
"sha2",
@@ -1034,7 +1035,7 @@ dependencies = [
"aws-smithy-types",
"bytes",
"http 0.2.11",
- "http 1.0.0",
+ "http 1.1.0",
"pin-project-lite",
"tokio",
"tracing",
@@ -1505,7 +1506,7 @@ version = "4.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47"
dependencies = [
- "heck 0.4.1",
+ "heck",
"proc-macro2 1.0.76",
"quote 1.0.35",
"syn 2.0.48",
@@ -1632,23 +1633,21 @@ checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f"
[[package]]
name = "const-random"
-version = "0.1.15"
+version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "368a7a772ead6ce7e1de82bfb04c485f3db8ec744f72925af5735e29a22cc18e"
+checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359"
dependencies = [
"const-random-macro",
- "proc-macro-hack",
]
[[package]]
name = "const-random-macro"
-version = "0.1.15"
+version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d7d6ab3c3a2282db210df5f02c4dab6e0a7057af0fb7ebd4070f30fe05c0ddb"
+checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e"
dependencies = [
"getrandom 0.2.10",
"once_cell",
- "proc-macro-hack",
"tiny-keccak",
]
@@ -1909,9 +1908,9 @@ dependencies = [
[[package]]
name = "curve25519-dalek"
-version = "4.0.0"
+version = "4.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2"
+checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348"
dependencies = [
"cfg-if",
"cpufeatures",
@@ -1954,7 +1953,7 @@ dependencies = [
"hashbrown 0.14.1",
"lock_api",
"once_cell",
- "parking_lot_core 0.9.8",
+ "parking_lot_core",
"serde",
]
@@ -2039,7 +2038,7 @@ dependencies = [
"libc",
"log",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"pin-project",
"serde",
"serde_json",
@@ -2221,7 +2220,7 @@ dependencies = [
"backtrace",
"lazy_static",
"mintex",
- "parking_lot 0.12.1",
+ "parking_lot",
"rustc-hash",
"serde",
"serde_json",
@@ -2449,7 +2448,7 @@ version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a"
dependencies = [
- "heck 0.4.1",
+ "heck",
"proc-macro2 1.0.76",
"quote 1.0.35",
"syn 2.0.48",
@@ -2607,9 +2606,9 @@ dependencies = [
[[package]]
name = "fiat-crypto"
-version = "0.1.20"
+version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77"
+checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f"
[[package]]
name = "filetime"
@@ -2747,7 +2746,7 @@ dependencies = [
"futures",
"lazy_static",
"log",
- "parking_lot 0.12.1",
+ "parking_lot",
"rand 0.8.5",
"redis-protocol",
"rustls",
@@ -3080,7 +3079,7 @@ checksum = "a40f793251171991c4eb75bd84bc640afa8b68ff6907bc89d3b712a22f700506"
dependencies = [
"graphql-introspection-query",
"graphql-parser",
- "heck 0.4.1",
+ "heck",
"lazy_static",
"proc-macro2 1.0.76",
"quote 1.0.35",
@@ -3215,15 +3214,6 @@ dependencies = [
"http 0.2.11",
]
-[[package]]
-name = "heck"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c"
-dependencies = [
- "unicode-segmentation",
-]
-
[[package]]
name = "heck"
version = "0.4.1"
@@ -3311,9 +3301,9 @@ dependencies = [
[[package]]
name = "http"
-version = "1.0.0"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea"
+checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
dependencies = [
"bytes",
"fnv",
@@ -3422,7 +3412,7 @@ dependencies = [
"httpdate",
"itoa",
"pin-project-lite",
- "socket2 0.4.9",
+ "socket2 0.5.5",
"tokio",
"tower-service",
"tracing",
@@ -3749,7 +3739,7 @@ dependencies = [
"memchr",
"num-cmp",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"percent-encoding",
"regex",
"serde",
@@ -4353,6 +4343,15 @@ dependencies = [
"libc",
]
+[[package]]
+name = "num_threads"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "number_prefix"
version = "0.4.0"
@@ -4761,17 +4760,6 @@ version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e"
-[[package]]
-name = "parking_lot"
-version = "0.11.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
-dependencies = [
- "instant",
- "lock_api",
- "parking_lot_core 0.8.6",
-]
-
[[package]]
name = "parking_lot"
version = "0.12.1"
@@ -4779,21 +4767,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
- "parking_lot_core 0.9.8",
-]
-
-[[package]]
-name = "parking_lot_core"
-version = "0.8.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
-dependencies = [
- "cfg-if",
- "instant",
- "libc",
- "redox_syscall 0.2.16",
- "smallvec",
- "winapi",
+ "parking_lot_core",
]
[[package]]
@@ -5150,12 +5124,6 @@ dependencies = [
"toml_edit 0.19.14",
]
-[[package]]
-name = "proc-macro-hack"
-version = "0.5.20+deprecated"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
-
[[package]]
name = "proc-macro2"
version = "0.4.30"
@@ -5184,7 +5152,7 @@ dependencies = [
"fnv",
"lazy_static",
"memchr",
- "parking_lot 0.12.1",
+ "parking_lot",
"protobuf",
"thiserror",
]
@@ -5231,7 +5199,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270"
dependencies = [
"bytes",
- "heck 0.4.1",
+ "heck",
"itertools 0.10.5",
"lazy_static",
"log",
@@ -5734,9 +5702,9 @@ dependencies = [
[[package]]
name = "router-bridge"
-version = "0.5.17+v2.7.2"
+version = "0.5.18+v2.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f183e217179b38a4283e76ca62e3149ebe96512e9b1bd6b3933abab863f9a2c"
+checksum = "673a5f56dd761938c87c89d33affb6f53e0129457d14bf12389f0cb4ebe74cfd"
dependencies = [
"anyhow",
"async-channel 1.9.0",
@@ -5761,13 +5729,24 @@ dependencies = [
name = "router-fuzz"
version = "0.0.0"
dependencies = [
+ "anyhow",
+ "apollo-compiler",
"apollo-parser",
+ "apollo-router",
"apollo-smith",
+ "async-trait",
"env_logger",
+ "http 0.2.11",
"libfuzzer-sys",
"log",
"reqwest",
+ "router-bridge",
+ "schemars",
+ "serde",
"serde_json",
+ "serde_json_bytes",
+ "tokio",
+ "tower",
]
[[package]]
@@ -5910,9 +5889,9 @@ dependencies = [
[[package]]
name = "rustls"
-version = "0.21.10"
+version = "0.21.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba"
+checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4"
dependencies = [
"log",
"ring 0.17.5",
@@ -5963,35 +5942,6 @@ version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
-[[package]]
-name = "salsa"
-version = "0.16.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4b84d9f96071f3f3be0dc818eae3327625d8ebc95b58da37d6850724f31d3403"
-dependencies = [
- "crossbeam-utils",
- "indexmap 1.9.3",
- "lock_api",
- "log",
- "oorandom",
- "parking_lot 0.11.2",
- "rustc-hash",
- "salsa-macros",
- "smallvec",
-]
-
-[[package]]
-name = "salsa-macros"
-version = "0.16.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd3904a4ba0a9d0211816177fd34b04c7095443f8cdacd11175064fe541c8fe2"
-dependencies = [
- "heck 0.3.3",
- "proc-macro2 1.0.76",
- "quote 1.0.35",
- "syn 1.0.109",
-]
-
[[package]]
name = "same-file"
version = "1.0.6"
@@ -6279,7 +6229,7 @@ dependencies = [
"futures",
"lazy_static",
"log",
- "parking_lot 0.12.1",
+ "parking_lot",
"serial_test_derive",
]
@@ -6536,9 +6486,9 @@ dependencies = [
[[package]]
name = "strum"
-version = "0.26.1"
+version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f"
+checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29"
[[package]]
name = "strum_macros"
@@ -6546,7 +6496,7 @@ version = "0.25.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0"
dependencies = [
- "heck 0.4.1",
+ "heck",
"proc-macro2 1.0.76",
"quote 1.0.35",
"rustversion",
@@ -6559,7 +6509,7 @@ version = "0.26.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18"
dependencies = [
- "heck 0.4.1",
+ "heck",
"proc-macro2 1.0.76",
"quote 1.0.35",
"rustversion",
@@ -6858,7 +6808,9 @@ checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749"
dependencies = [
"deranged",
"itoa",
+ "libc",
"num-conv",
+ "num_threads",
"powerfmt",
"serde",
"time-core",
@@ -6926,7 +6878,7 @@ dependencies = [
"libc",
"mio",
"num_cpus",
- "parking_lot 0.12.1",
+ "parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2 0.5.5",
@@ -7413,7 +7365,7 @@ dependencies = [
"ipconfig",
"lru-cache",
"once_cell",
- "parking_lot 0.12.1",
+ "parking_lot",
"rand 0.8.5",
"resolv-conf",
"smallvec",
@@ -7602,12 +7554,6 @@ dependencies = [
"tinyvec",
]
-[[package]]
-name = "unicode-segmentation"
-version = "1.10.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
-
[[package]]
name = "unicode-width"
version = "0.1.10"
@@ -8185,7 +8131,7 @@ version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96"
dependencies = [
- "curve25519-dalek 4.0.0",
+ "curve25519-dalek 4.1.2",
"rand_core 0.6.4",
"serde",
"zeroize",
diff --git a/Cargo.toml b/Cargo.toml
index 4861dbc154..dfebd2c8b7 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -41,3 +41,33 @@ incremental = false
[profile.release-dhat]
inherits = "release"
debug = 1
+
+# Dependencies used in more than one place are specified here in order to keep versions in sync:
+# https://doc.rust-lang.org/cargo/reference/workspaces.html#the-dependencies-table
+[workspace.dependencies]
+apollo-compiler = "=1.0.0-beta.16"
+apollo-parser = "0.7.6"
+apollo-smith = { version = "0.5.0", features = ["parser-impl"] }
+async-trait = "0.1.77"
+http = "0.2.11"
+once_cell = "1.19.0"
+reqwest = { version = "0.11.24", default-features = false, features = [
+ "rustls-tls",
+ "rustls-native-certs",
+ "gzip",
+ "json",
+ "stream",
+] }
+
+# note: this dependency should _always_ be pinned, prefix the version with an `=`
+router-bridge = "=0.5.18+v2.7.2"
+
+schemars = { version = "0.8.16", features = ["url"] }
+serde = { version = "1.0.197", features = ["derive", "rc"] }
+serde_json = { version = "1.0.114", features = [
+ "preserve_order",
+ "float_roundtrip",
+] }
+serde_json_bytes = { version = "0.2.2", features = ["preserve_order"] }
+tokio = { version = "1.36.0", features = ["full"] }
+tower = { version = "0.4.13", features = ["full"] }
diff --git a/RELEASE_CHECKLIST.md b/RELEASE_CHECKLIST.md
index 45fbc8cf54..c69c852b47 100644
--- a/RELEASE_CHECKLIST.md
+++ b/RELEASE_CHECKLIST.md
@@ -50,6 +50,7 @@ Make sure you have the following software installed and available in your `PATH`
- `gh`: [The GitHub CLI](https://cli.github.com/)
- `cargo`: [Cargo & Rust Installation](https://doc.rust-lang.org/cargo/getting-started/installation.html)
+ - `helm`: see
- `helm-docs`: see
- `cargo-about`: install with `cargo install --locked cargo-about`
- `cargo-deny`: install with `cargo install --locked cargo-deny`
@@ -166,7 +167,7 @@ Start following the steps below to start a release PR. The process is **not ful
6. Run the release automation script using this command to use the environment variable set previously:
```
- cargo xtask release prepare "${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}"
+ cargo xtask release prepare --pre-release "${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}"
```
Running this command will:
@@ -175,41 +176,39 @@ Start following the steps below to start a release PR. The process is **not ful
- Run our compliance checks and update the `licenses.html` file as appropriate.
- Ensure we're not using any incompatible licenses in the release.
- Currently, it will also do one step which we will **immediately undo** in the next step, since it is not desireable for pre-release versions:
-
- - Migrate the current set of `/.changesets/*.md` files into `/CHANGELOG.md` using the version specified.
-
-7. Revert the changes to the `CHANGELOG.md` made in the last step since we don't finalize the changelog from the `.changesets` until the final release is prepared. (This really could be replaced with a `--skip-changesets` flag.)
-
- ```
- git checkout -- .changesets/ CHANGELOG.md
- ```
-
-8. Now, review and stage he changes produced by the previous step. This is most safely done using the `--patch` (or `-p`) flag to `git add` (`-u` ignores untracked files).
+7. Now, review and stage he changes produced by the previous step. This is most safely done using the `--patch` (or `-p`) flag to `git add` (`-u` ignores untracked files).
```
git add -up .
```
-9. Now commit those changes locally, using a brief message:
+8. Now commit those changes locally, using a brief message:
```
git commit -m "prep release: v${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}"
```
-10. Push this commit up to the existing release PR:
+9. Push this commit up to the existing release PR:
- ```
- git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "${APOLLO_ROUTER_RELEASE_VERSION}"
- ```
+ ```
+ git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "${APOLLO_ROUTER_RELEASE_VERSION}"
+ ```
-10. Git tag & push the pre-release:
+10. Git tag the current commit and & push the branch and the pre-release tag simultaneously:
This process will kick off the bulk of the release process on CircleCI, including building each architecture on its own infrastructure and notarizing the macOS binary.
```
git tag -a "v${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" -m "${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" && \
- git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "v${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}"
+ git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "${APOLLO_ROUTER_RELEASE_VERSION}" "v${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}"
+ ```
+
+11. Finally, publish the Crate from your local computer (this also needs to be moved to CI, but requires changing the release containers to be Rust-enabled and to restore the caches):
+
+ > Note: This command may appear unnecessarily specific, but it will help avoid publishing a version to Crates.io that doesn't match what you're currently releasing. (e.g., in the event that you've changed branches in another window)
+
+ ```
+ cargo publish -p apollo-router@"${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}"
```
### Preparing the final release
@@ -442,8 +441,10 @@ Start following the steps below to start a release PR. The process is **not ful
17. Finally, publish the Crate from your local computer from the `main` branch (this also needs to be moved to CI, but requires changing the release containers to be Rust-enabled and to restore the caches):
+ > Note: This command may appear unnecessarily specific, but it will help avoid publishing a version to Crates.io that doesn't match what you're currently releasing. (e.g., in the event that you've changed branches in another window)
+
```
- cargo publish -p apollo-router
+ cargo publish -p apollo-router@"${APOLLO_ROUTER_RELEASE_VERSION}"
```
18. (Optional) To have a "social banner" for this release, run [this `htmlq` command](https://crates.io/crates/htmlq) (`cargo install htmlq`, or on MacOS `brew install htmlq`; its `jq` for HTML), open the link it produces, copy the image to your clipboard:
diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml
index 45cb1924d1..f33eea0f89 100644
--- a/apollo-router-benchmarks/Cargo.toml
+++ b/apollo-router-benchmarks/Cargo.toml
@@ -1,25 +1,23 @@
[package]
name = "apollo-router-benchmarks"
-version = "1.44.0"
+version = "1.45.0"
authors = ["Apollo Graph, Inc. "]
edition = "2021"
license = "Elastic-2.0"
publish = false
-# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
-
[dev-dependencies]
apollo-router = { path = "../apollo-router" }
criterion = { version = "0.5", features = ["async_tokio", "async_futures"] }
memory-stats = "1.1.0"
-once_cell = "1"
-serde_json = { version = "1", features = ["preserve_order", "float_roundtrip"] }
-tokio = { version = "1", features = ["full"] }
-tower = "0.4"
+once_cell.workspace = true
+serde_json.workspace = true
+tokio.workspace = true
+tower.workspace = true
[build-dependencies]
-apollo-smith = { version = "0.5.0", features = ["parser-impl"] }
-apollo-parser = "0.7.6"
+apollo-smith.workspace = true
+apollo-parser.workspace = true
arbitrary = "1.3.2"
[[bench]]
diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml
index ff6054dca0..e7772cd9a1 100644
--- a/apollo-router-scaffold/Cargo.toml
+++ b/apollo-router-scaffold/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "apollo-router-scaffold"
-version = "1.44.0"
+version = "1.45.0"
authors = ["Apollo Graph, Inc. "]
edition = "2021"
license = "Elastic-2.0"
diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml
index 56e237bfce..47ff9108a6 100644
--- a/apollo-router-scaffold/templates/base/Cargo.toml
+++ b/apollo-router-scaffold/templates/base/Cargo.toml
@@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" }
apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
{{else}}
# Note if you update these dependencies then also update xtask/Cargo.toml
-apollo-router = "1.44.0"
+apollo-router = "1.45.0"
{{/if}}
{{/if}}
async-trait = "0.1.52"
diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml
index 0f1d41f37b..bdace89219 100644
--- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml
+++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml
@@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" }
{{#if branch}}
apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
{{else}}
-apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.44.0" }
+apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0" }
{{/if}}
{{/if}}
anyhow = "1.0.58"
diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml
index cdd8c23bc0..8070eee9ee 100644
--- a/apollo-router/Cargo.toml
+++ b/apollo-router/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "apollo-router"
-version = "1.44.0"
+version = "1.45.0"
authors = ["Apollo Graph, Inc. "]
repository = "https://github.com/apollographql/router/"
documentation = "https://docs.rs/apollo-router"
@@ -64,16 +64,17 @@ features = ["docs_rs"]
askama = "0.12.1"
access-json = "0.1.0"
anyhow = "1.0.80"
-apollo-compiler = "=1.0.0-beta.14"
-apollo-federation = "=0.0.9"
+apollo-compiler.workspace = true
+apollo-federation = "=0.0.11"
arc-swap = "1.6.0"
+async-channel = "1.9.0"
async-compression = { version = "0.4.6", features = [
"tokio",
"brotli",
"gzip",
"deflate",
] }
-async-trait = "0.1.77"
+async-trait.workspace = true
axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] }
base64 = "0.21.7"
bloomfilter = "1.0.13"
@@ -103,7 +104,7 @@ fred = { version = "7.1.2", features = ["enable-rustls"] }
futures = { version = "0.3.30", features = ["thread-pool"] }
graphql_client = "0.13.0"
hex = { version = "0.4.3", features = ["serde"] }
-http = "0.2.11"
+http.workspace = true
http-body = "0.4.6"
heck = "0.4.1"
humantime = "2.1.0"
@@ -178,37 +179,28 @@ proteus = "0.5.0"
rand = "0.8.5"
rhai = { version = "=1.17.1", features = ["sync", "serde", "internals"] }
regex = "1.10.3"
-reqwest = { version = "0.11.24", default-features = false, features = [
- "rustls-tls",
- "rustls-native-certs",
- "gzip",
- "json",
- "stream",
-] }
+reqwest.workspace = true
# note: this dependency should _always_ be pinned, prefix the version with an `=`
-router-bridge = "=0.5.17+v2.7.2"
+router-bridge = "=0.5.18+v2.7.2"
rust-embed = "8.2.0"
-rustls = "0.21.10"
+rustls = "0.21.11"
rustls-native-certs = "0.6.3"
rustls-pemfile = "1.0.4"
-schemars = { version = "0.8.16", features = ["url"] }
+schemars.workspace = true
shellexpand = "3.1.0"
sha2 = "0.10.8"
semver = "1.0.22"
-serde = { version = "1.0.197", features = ["derive", "rc"] }
+serde.workspace = true
serde_derive_default = "0.1"
-serde_json_bytes = { version = "0.2.2", features = ["preserve_order"] }
-serde_json = { version = "1.0.114", features = [
- "preserve_order",
- "float_roundtrip",
-] }
+serde_json_bytes.workspace = true
+serde_json.workspace = true
serde_urlencoded = "0.7.1"
serde_yaml = "0.8.26"
static_assertions = "1.1.0"
strum_macros = "0.25.3"
sys-info = "0.9.1"
thiserror = "1.0.57"
-tokio = { version = "1.36.0", features = ["full"] }
+tokio.workspace = true
tokio-stream = { version = "0.1.14", features = ["sync", "net"] }
tokio-util = { version = "0.7.10", features = ["net", "codec", "time"] }
tonic = { version = "0.9.2", features = [
@@ -217,7 +209,7 @@ tonic = { version = "0.9.2", features = [
"tls-roots",
"gzip",
] }
-tower = { version = "0.4.13", features = ["full"] }
+tower.workspace = true
tower-http = { version = "0.4.4", features = [
"add-extension",
"trace",
@@ -272,7 +264,9 @@ uname = "0.1.1"
[target.'cfg(unix)'.dependencies]
uname = "0.1.1"
-hyperlocal = { version = "0.8.0", default-features = false, features = ["client"] }
+hyperlocal = { version = "0.8.0", default-features = false, features = [
+ "client",
+] }
[target.'cfg(target_os = "linux")'.dependencies]
tikv-jemallocator = "0.5"
@@ -292,10 +286,15 @@ maplit = "1.0.2"
memchr = { version = "2.7.1", default-features = false }
mockall = "0.11.4"
num-traits = "0.2.18"
-once_cell = "1.19.0"
+once_cell.workspace = true
opentelemetry-stdout = { version = "0.1.0", features = ["trace"] }
opentelemetry = { version = "0.20.0", features = ["testing"] }
-opentelemetry-proto = { version="0.5.0", features = ["metrics", "trace", "gen-tonic-messages", "with-serde"] }
+opentelemetry-proto = { version = "0.5.0", features = [
+ "metrics",
+ "trace",
+ "gen-tonic-messages",
+ "with-serde",
+] }
p256 = "0.13.2"
rand_core = "0.6.4"
reqwest = { version = "0.11.24", default-features = false, features = [
@@ -332,12 +331,15 @@ wiremock = "0.5.22"
rstack = { version = "0.3.3", features = ["dw"], default-features = false }
[target.'cfg(unix)'.dev-dependencies]
-hyperlocal = { version = "0.8.0", default-features = false, features = ["client", "server"] }
+hyperlocal = { version = "0.8.0", default-features = false, features = [
+ "client",
+ "server",
+] }
[build-dependencies]
tonic-build = "0.9.2"
basic-toml = "0.1"
-serde_json = "1.0.114"
+serde_json.workspace = true
[[test]]
name = "integration_tests"
@@ -350,3 +352,6 @@ harness = false
[[bench]]
name = "deeply_nested"
harness = false
+
+[[example]]
+name = "planner"
diff --git a/apollo-router/examples/.skipconfigvalidation b/apollo-router/examples/.skipconfigvalidation
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/apollo-router/examples/planner.rs b/apollo-router/examples/planner.rs
new file mode 100644
index 0000000000..43e53e9261
--- /dev/null
+++ b/apollo-router/examples/planner.rs
@@ -0,0 +1,66 @@
+use std::ops::ControlFlow;
+
+use anyhow::Result;
+use apollo_router::layers::ServiceBuilderExt;
+use apollo_router::plugin::Plugin;
+use apollo_router::plugin::PluginInit;
+use apollo_router::register_plugin;
+use apollo_router::services::execution;
+use apollo_router::services::supergraph;
+use tower::BoxError;
+use tower::ServiceBuilder;
+use tower::ServiceExt;
+
+#[derive(Debug)]
+struct DoNotExecute {
+ #[allow(dead_code)]
+ configuration: bool,
+}
+
+#[async_trait::async_trait]
+impl Plugin for DoNotExecute {
+ type Config = bool;
+
+ async fn new(init: PluginInit) -> Result {
+ Ok(Self {
+ configuration: init.config,
+ })
+ }
+
+ fn supergraph_service(&self, service: supergraph::BoxService) -> supergraph::BoxService {
+ ServiceBuilder::new()
+ .map_request(|mut req: supergraph::Request| {
+ let body = req.supergraph_request.body_mut();
+ body.query = body.query.as_ref().map(|query| {
+ let query_name = format!("query Query{} ", rand::random::());
+ query.replacen("query ", query_name.as_str(), 1)
+ });
+ req
+ })
+ .service(service)
+ .boxed()
+ }
+
+ fn execution_service(&self, service: execution::BoxService) -> execution::BoxService {
+ ServiceBuilder::new()
+ .checkpoint(|req: execution::Request| {
+ Ok(ControlFlow::Break(
+ execution::Response::fake_builder()
+ .context(req.context)
+ .build()
+ .unwrap(),
+ ))
+ })
+ .service(service)
+ .boxed()
+ }
+}
+
+register_plugin!("apollo-test", "do_not_execute", DoNotExecute);
+
+// Run this benchmark with cargo run --release --example planner -- --hot-reload -s -c ./apollo-router/examples/router.yaml
+// You can then send operations to it with `ab` or `hey` or any tool you like:
+// hey -n 1000 -c 10 -m POST -H 'Content-Type: application/json' -D 'path/to/an/anonymous/operation' http://localhost:4100
+fn main() -> Result<()> {
+ apollo_router::main()
+}
diff --git a/apollo-router/examples/router.yaml b/apollo-router/examples/router.yaml
new file mode 100644
index 0000000000..6936d57e32
--- /dev/null
+++ b/apollo-router/examples/router.yaml
@@ -0,0 +1,13 @@
+supergraph:
+ listen: 0.0.0.0:4100
+ introspection: true
+ query_planning:
+ experimental_parallelism: auto # or any number
+plugins:
+ experimental.expose_query_plan: true
+ apollo-test.do_not_execute: true
+experimental_graphql_validation_mode: both
+sandbox:
+ enabled: true
+homepage:
+ enabled: false
diff --git a/apollo-router/feature_discussions.json b/apollo-router/feature_discussions.json
index 59f5a84608..446162650a 100644
--- a/apollo-router/feature_discussions.json
+++ b/apollo-router/feature_discussions.json
@@ -2,10 +2,9 @@
"experimental": {
"experimental_retry": "https://github.com/apollographql/router/discussions/2241",
"experimental_response_trace_id": "https://github.com/apollographql/router/discussions/2147",
- "experimental_when_header": "https://github.com/apollographql/router/discussions/1961",
- "experimental_batching": "https://github.com/apollographql/router/discussions/3840"
+ "experimental_when_header": "https://github.com/apollographql/router/discussions/1961"
},
"preview": {
"preview_entity_cache": "https://github.com/apollographql/router/discussions/4592"
}
-}
\ No newline at end of file
+}
diff --git a/apollo-router/src/apollo_studio_interop/mod.rs b/apollo-router/src/apollo_studio_interop/mod.rs
new file mode 100644
index 0000000000..7cd27c3a7d
--- /dev/null
+++ b/apollo-router/src/apollo_studio_interop/mod.rs
@@ -0,0 +1,530 @@
+//! Generation of usage reporting fields
+use std::collections::hash_map::Entry;
+use std::collections::HashMap;
+use std::collections::HashSet;
+use std::fmt;
+
+use apollo_compiler::ast::Argument;
+use apollo_compiler::ast::DirectiveList;
+use apollo_compiler::ast::Name;
+use apollo_compiler::ast::OperationType;
+use apollo_compiler::ast::Value;
+use apollo_compiler::ast::VariableDefinition;
+use apollo_compiler::executable::Field;
+use apollo_compiler::executable::Fragment;
+use apollo_compiler::executable::FragmentSpread;
+use apollo_compiler::executable::InlineFragment;
+use apollo_compiler::executable::Operation;
+use apollo_compiler::executable::Selection;
+use apollo_compiler::executable::SelectionSet;
+use apollo_compiler::validation::Valid;
+use apollo_compiler::ExecutableDocument;
+use apollo_compiler::Node;
+use apollo_compiler::Schema;
+use router_bridge::planner::ReferencedFieldsForType;
+use router_bridge::planner::UsageReporting;
+
+/// The result of the generate_usage_reporting function which contains a UsageReporting struct and
+/// functions that allow comparison with another ComparableUsageReporting or UsageReporting object.
+pub(crate) struct ComparableUsageReporting {
+ /// The UsageReporting fields
+ pub(crate) result: UsageReporting,
+}
+
+/// Enum specifying the result of a comparison.
+pub(crate) enum UsageReportingComparisonResult {
+ /// The UsageReporting instances are the same
+ Equal,
+ /// The stats_report_key in the UsageReporting instances are different
+ StatsReportKeyNotEqual,
+ /// The referenced_fields in the UsageReporting instances are different. When comparing referenced
+ /// fields, we ignore the ordering of field names.
+ ReferencedFieldsNotEqual,
+ /// Both the stats_report_key and referenced_fields in the UsageReporting instances are different.
+ BothNotEqual,
+}
+
+impl ComparableUsageReporting {
+ /// Compare this to another UsageReporting.
+ pub(crate) fn compare(&self, other: &UsageReporting) -> UsageReportingComparisonResult {
+ let sig_equal = self.result.stats_report_key == other.stats_report_key;
+ let refs_equal = self.compare_referenced_fields(&other.referenced_fields_by_type);
+ match (sig_equal, refs_equal) {
+ (true, true) => UsageReportingComparisonResult::Equal,
+ (false, true) => UsageReportingComparisonResult::StatsReportKeyNotEqual,
+ (true, false) => UsageReportingComparisonResult::ReferencedFieldsNotEqual,
+ (false, false) => UsageReportingComparisonResult::BothNotEqual,
+ }
+ }
+
+ fn compare_referenced_fields(
+ &self,
+ other_ref_fields: &HashMap,
+ ) -> bool {
+ let self_ref_fields = &self.result.referenced_fields_by_type;
+ if self_ref_fields.len() != other_ref_fields.len() {
+ return false;
+ }
+
+ for (name, self_refs) in self_ref_fields.iter() {
+ let maybe_other_refs = other_ref_fields.get(name);
+ if let Some(other_refs) = maybe_other_refs {
+ if self_refs.is_interface != other_refs.is_interface {
+ return false;
+ }
+
+ let self_field_names_set: HashSet<_> =
+ self_refs.field_names.clone().into_iter().collect();
+ let other_field_names_set: HashSet<_> =
+ other_refs.field_names.clone().into_iter().collect();
+ if self_field_names_set != other_field_names_set {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+
+ true
+ }
+}
+
+/// Generate a ComparableUsageReporting containing the stats_report_key (a normalized version of the operation signature)
+/// and referenced fields of an operation. The document used to generate the signature and for the references can be
+/// different to handle cases where the operation has been filtered, but we want to keep the same signature.
+pub(crate) fn generate_usage_reporting(
+ signature_doc: &ExecutableDocument,
+ references_doc: &ExecutableDocument,
+ operation_name: &Option,
+ schema: &Valid,
+) -> ComparableUsageReporting {
+ let mut generator = UsageReportingGenerator {
+ signature_doc,
+ references_doc,
+ operation_name,
+ schema,
+ fragments_map: HashMap::new(),
+ fields_by_type: HashMap::new(),
+ fields_by_interface: HashMap::new(),
+ fragment_spread_set: HashSet::new(),
+ };
+
+ generator.generate()
+}
+
+struct UsageReportingGenerator<'a> {
+ signature_doc: &'a ExecutableDocument,
+ references_doc: &'a ExecutableDocument,
+ operation_name: &'a Option,
+ schema: &'a Valid,
+ fragments_map: HashMap>,
+ fields_by_type: HashMap>,
+ fields_by_interface: HashMap,
+ fragment_spread_set: HashSet,
+}
+
+impl UsageReportingGenerator<'_> {
+ fn generate(&mut self) -> ComparableUsageReporting {
+ ComparableUsageReporting {
+ result: UsageReporting {
+ stats_report_key: self.generate_stats_report_key(),
+ referenced_fields_by_type: self.generate_apollo_reporting_refs(),
+ },
+ }
+ }
+
+ fn generate_stats_report_key(&mut self) -> String {
+ self.fragments_map.clear();
+
+ match self
+ .signature_doc
+ .get_operation(self.operation_name.as_deref())
+ .ok()
+ {
+ None => "".to_string(),
+ Some(operation) => {
+ self.extract_signature_fragments(&operation.selection_set);
+ self.format_operation_for_report(operation)
+ }
+ }
+ }
+
+ fn extract_signature_fragments(&mut self, selection_set: &SelectionSet) {
+ for selection in &selection_set.selections {
+ match selection {
+ Selection::Field(field) => {
+ self.extract_signature_fragments(&field.selection_set);
+ }
+ Selection::InlineFragment(fragment) => {
+ self.extract_signature_fragments(&fragment.selection_set);
+ }
+ Selection::FragmentSpread(fragment_node) => {
+ let fragment_name = fragment_node.fragment_name.to_string();
+ if let Entry::Vacant(e) = self.fragments_map.entry(fragment_name) {
+ if let Some(fragment) = self
+ .signature_doc
+ .fragments
+ .get(&fragment_node.fragment_name)
+ {
+ e.insert(fragment.clone());
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn format_operation_for_report(&self, operation: &Node) -> String {
+ // The result in the name of the operation
+ let op_name = match &operation.name {
+ None => "-".into(),
+ Some(node) => node.to_string(),
+ };
+ let mut result = format!("# {}\n", op_name);
+
+ // Followed by a sorted list of fragments
+ let mut sorted_fragments: Vec<_> = self.fragments_map.iter().collect();
+ sorted_fragments.sort_by_key(|&(k, _)| k);
+
+ sorted_fragments.into_iter().for_each(|(_, f)| {
+ result.push_str(&ApolloReportingSignatureFormatter::Fragment(f).to_string())
+ });
+
+ // Followed by the operation
+ result.push_str(&ApolloReportingSignatureFormatter::Operation(operation).to_string());
+
+ result
+ }
+
+ fn generate_apollo_reporting_refs(&mut self) -> HashMap {
+ self.fragments_map.clear();
+ self.fields_by_type.clear();
+ self.fields_by_interface.clear();
+
+ match self
+ .references_doc
+ .get_operation(self.operation_name.as_deref())
+ .ok()
+ {
+ None => HashMap::new(),
+ Some(operation) => {
+ let operation_type = match operation.operation_type {
+ OperationType::Query => "Query",
+ OperationType::Mutation => "Mutation",
+ OperationType::Subscription => "Subscription",
+ };
+ self.extract_fields(operation_type, &operation.selection_set);
+
+ self.fields_by_type
+ .iter()
+ .filter_map(|(type_name, field_names)| {
+ if field_names.is_empty() {
+ None
+ } else {
+ let refs = ReferencedFieldsForType {
+ field_names: field_names.iter().cloned().collect(),
+ is_interface: *self
+ .fields_by_interface
+ .get(type_name)
+ .unwrap_or(&false),
+ };
+
+ Some((type_name.clone(), refs))
+ }
+ })
+ .collect()
+ }
+ }
+ }
+
+ fn extract_fields(&mut self, parent_type: &str, selection_set: &SelectionSet) {
+ if !self.fields_by_interface.contains_key(parent_type) {
+ let field_schema_type = self.schema.types.get(parent_type);
+ let is_interface = field_schema_type.is_some_and(|t| t.is_interface());
+ self.fields_by_interface
+ .insert(parent_type.into(), is_interface);
+ }
+
+ for selection in &selection_set.selections {
+ match selection {
+ Selection::Field(field) => {
+ self.fields_by_type
+ .entry(parent_type.into())
+ .or_default()
+ .insert(field.name.to_string());
+
+ let field_type = field.selection_set.ty.to_string();
+ self.extract_fields(&field_type, &field.selection_set);
+ }
+ Selection::InlineFragment(fragment) => {
+ let frag_type_name = match fragment.type_condition.clone() {
+ Some(fragment_type) => fragment_type.to_string(),
+ None => parent_type.into(),
+ };
+ self.extract_fields(&frag_type_name, &fragment.selection_set);
+ }
+ Selection::FragmentSpread(fragment) => {
+ if !self.fragment_spread_set.contains(&fragment.fragment_name) {
+ self.fragment_spread_set
+ .insert(fragment.fragment_name.clone());
+
+ if let Some(fragment) =
+ self.references_doc.fragments.get(&fragment.fragment_name)
+ {
+ let fragment_type = fragment.selection_set.ty.to_string();
+ self.extract_fields(&fragment_type, &fragment.selection_set);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+enum ApolloReportingSignatureFormatter<'a> {
+ Operation(&'a Node),
+ Fragment(&'a Node),
+ Argument(&'a Node),
+ Field(&'a Node),
+}
+
+impl<'a> fmt::Display for ApolloReportingSignatureFormatter<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ ApolloReportingSignatureFormatter::Operation(operation) => {
+ format_operation(operation, f)
+ }
+ ApolloReportingSignatureFormatter::Fragment(fragment) => format_fragment(fragment, f),
+ ApolloReportingSignatureFormatter::Argument(argument) => format_argument(argument, f),
+ ApolloReportingSignatureFormatter::Field(field) => format_field(field, f),
+ }
+ }
+}
+
+fn format_operation(operation: &Node, f: &mut fmt::Formatter) -> fmt::Result {
+ let shorthand = operation.operation_type == OperationType::Query
+ && operation.name.is_none()
+ && operation.variables.is_empty()
+ && operation.directives.is_empty();
+
+ if !shorthand {
+ f.write_str(operation.operation_type.name())?;
+ if let Some(name) = &operation.name {
+ write!(f, " {}", name)?;
+ }
+
+ // print variables sorted by name
+ if !operation.variables.is_empty() {
+ f.write_str("(")?;
+ let mut sorted_variables = operation.variables.clone();
+ sorted_variables.sort_by(|a, b| a.name.cmp(&b.name));
+ for (index, variable) in sorted_variables.iter().enumerate() {
+ if index != 0 {
+ f.write_str(",")?;
+ }
+ format_variable(variable, f)?;
+ }
+ f.write_str(")")?;
+ }
+
+ // In the JS implementation, only the fragment directives are sorted
+ format_directives(&operation.directives, false, f)?;
+ }
+
+ format_selection_set(&operation.selection_set, f)
+}
+
+fn format_selection_set(selection_set: &SelectionSet, f: &mut fmt::Formatter) -> fmt::Result {
+ // print selection set sorted by name with fields followed by named fragments followed by inline fragments
+ let mut fields: Vec<&Node> = Vec::new();
+ let mut named_fragments: Vec<&Node> = Vec::new();
+ let mut inline_fragments: Vec<&Node> = Vec::new();
+ for selection in selection_set.selections.iter() {
+ match selection {
+ Selection::Field(field) => {
+ fields.push(field);
+ }
+ Selection::FragmentSpread(fragment_spread) => {
+ named_fragments.push(fragment_spread);
+ }
+ Selection::InlineFragment(inline_fragment) => {
+ inline_fragments.push(inline_fragment);
+ }
+ }
+ }
+
+ if !fields.is_empty() || !named_fragments.is_empty() || !inline_fragments.is_empty() {
+ fields.sort_by(|&a, &b| a.name.cmp(&b.name));
+ named_fragments.sort_by(|&a, &b| a.fragment_name.cmp(&b.fragment_name));
+ // Note that inline fragments are not sorted in the JS implementation
+
+ f.write_str("{")?;
+
+ for (i, &field) in fields.iter().enumerate() {
+ let field_str = ApolloReportingSignatureFormatter::Field(field).to_string();
+ f.write_str(&field_str)?;
+
+ // We need to insert a space if this is not the last field and it ends in an alphanumeric character
+ if i < fields.len() - 1
+ && field_str
+ .chars()
+ .last()
+ .map_or(false, |c| c.is_alphanumeric())
+ {
+ f.write_str(" ")?;
+ }
+ }
+
+ for &frag in named_fragments.iter() {
+ format_fragment_spread(frag, f)?;
+ }
+
+ for &frag in inline_fragments.iter() {
+ format_inline_fragment(frag, f)?;
+ }
+
+ f.write_str("}")?;
+ }
+
+ Ok(())
+}
+
+fn format_variable(arg: &Node, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "${}:{}", arg.name, arg.ty)?;
+ if let Some(value) = &arg.default_value {
+ f.write_str("=")?;
+ format_value(value, f)?;
+ }
+ format_directives(&arg.directives, false, f)
+}
+
+fn format_argument(arg: &Node, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}:", arg.name)?;
+ format_value(&arg.value, f)
+}
+
+fn format_field(field: &Node, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_str(&field.name)?;
+
+ let mut sorted_args = field.arguments.clone();
+ if !sorted_args.is_empty() {
+ sorted_args.sort_by(|a, b| a.name.cmp(&b.name));
+
+ f.write_str("(")?;
+
+ // The graphql-js implementation will use newlines and indentation instead of commas if the length of the "arg line" is
+ // over 80 characters. This "arg line" includes the alias followed by ": " if the field has an alias (which is never
+ // the case for now), followed by all argument names and values separated by ": ", surrounded with brackets. Our usage
+ // reporting plugin replaces all newlines + indentation with a single space, so we have to replace commas with spaces if
+ // the line length is too long.
+ let arg_strings: Vec = sorted_args
+ .iter()
+ .map(|a| ApolloReportingSignatureFormatter::Argument(a).to_string())
+ .collect();
+ // Adjust for incorrect spacing generated by the argument formatter - 2 extra characters for the surrounding brackets, plus
+ // 2 extra characters per argument for the separating space and the space between the argument name and type.
+ let original_line_length =
+ 2 + arg_strings.iter().map(|s| s.len()).sum::() + (arg_strings.len() * 2);
+ let separator = if original_line_length > 80 { " " } else { "," };
+
+ for (index, arg_string) in arg_strings.iter().enumerate() {
+ f.write_str(arg_string)?;
+
+ // We only need to insert a separating space it's not the last arg and if the string ends in an alphanumeric character.
+ // If it's a comma, we always need to insert it if it's not the last arg.
+ if index < arg_strings.len() - 1
+ && (separator == ","
+ || arg_string
+ .chars()
+ .last()
+ .map_or(true, |c| c.is_alphanumeric()))
+ {
+ f.write_str(separator)?;
+ }
+ }
+ f.write_str(")")?;
+ }
+
+ // In the JS implementation, only the fragment directives are sorted
+ format_directives(&field.directives, false, f)?;
+ format_selection_set(&field.selection_set, f)
+}
+
+fn format_fragment_spread(
+ fragment_spread: &Node,
+ f: &mut fmt::Formatter,
+) -> fmt::Result {
+ write!(f, "...{}", fragment_spread.fragment_name)?;
+ format_directives(&fragment_spread.directives, true, f)
+}
+
+fn format_inline_fragment(
+ inline_fragment: &Node,
+ f: &mut fmt::Formatter,
+) -> fmt::Result {
+ if let Some(type_name) = &inline_fragment.type_condition {
+ write!(f, "...on {}", type_name)?;
+ } else {
+ f.write_str("...")?;
+ }
+
+ format_directives(&inline_fragment.directives, true, f)?;
+ format_selection_set(&inline_fragment.selection_set, f)
+}
+
+fn format_fragment(fragment: &Node, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(
+ f,
+ "fragment {} on {}",
+ &fragment.name.to_string(),
+ &fragment.selection_set.ty.to_string()
+ )?;
+ format_directives(&fragment.directives, true, f)?;
+ format_selection_set(&fragment.selection_set, f)
+}
+
+fn format_directives(
+ directives: &DirectiveList,
+ sorted: bool,
+ f: &mut fmt::Formatter,
+) -> fmt::Result {
+ let mut sorted_directives = directives.clone();
+ if sorted {
+ sorted_directives.sort_by(|a, b| a.name.cmp(&b.name));
+ }
+
+ for directive in sorted_directives.iter() {
+ write!(f, "@{}", directive.name)?;
+
+ let mut sorted_args = directive.arguments.clone();
+ if !sorted_args.is_empty() {
+ sorted_args.sort_by(|a, b| a.name.cmp(&b.name));
+
+ f.write_str("(")?;
+
+ for (index, argument) in sorted_args.iter().enumerate() {
+ if index != 0 {
+ f.write_str(",")?;
+ }
+ f.write_str(&ApolloReportingSignatureFormatter::Argument(argument).to_string())?;
+ }
+
+ f.write_str(")")?;
+ }
+ }
+
+ Ok(())
+}
+
+fn format_value(value: &Value, f: &mut fmt::Formatter) -> fmt::Result {
+ match value {
+ Value::String(_) => f.write_str("\"\""),
+ Value::Float(_) | Value::Int(_) => f.write_str("0"),
+ Value::Object(_) => f.write_str("{}"),
+ Value::List(_) => f.write_str("[]"),
+ rest => f.write_str(&rest.to_string()),
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql b/apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql
new file mode 100644
index 0000000000..e41e500782
--- /dev/null
+++ b/apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql
@@ -0,0 +1,230 @@
+schema
+ @link(url: "https://specs.apollo.dev/link/v1.0")
+ @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION)
+{
+ query: Query
+ mutation: Mutation
+ subscription: Subscription
+}
+
+directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE
+
+directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION
+
+directive @join__graph(name: String!, url: String!) on ENUM_VALUE
+
+directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE
+
+directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR
+
+directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION
+
+directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA
+
+directive @noArgs on FIELD | FRAGMENT_DEFINITION | FRAGMENT_SPREAD | INLINE_FRAGMENT | MUTATION | QUERY | SUBSCRIPTION
+
+directive @withArgs(arg1: String = "Default", arg2: String, arg3: Boolean, arg4: Int, arg5: [ID]) on FIELD | FRAGMENT_DEFINITION | FRAGMENT_SPREAD | INLINE_FRAGMENT | MUTATION | QUERY | SUBSCRIPTION
+
+interface AnInterface
+ @join__type(graph: MAIN)
+{
+ sharedField: String!
+}
+
+input AnotherInputType
+ @join__type(graph: MAIN)
+{
+ anotherInput: ID!
+}
+
+type BasicResponse
+ @join__type(graph: MAIN)
+{
+ id: Int!
+ nullableId: Int
+}
+
+type BasicTypesResponse
+ @join__type(graph: MAIN)
+{
+ nullableId: ID
+ nonNullId: ID!
+ nullableInt: Int
+ nonNullInt: Int!
+ nullableString: String
+ nonNullString: String!
+ nullableFloat: Float
+ nonNullFloat: Float!
+ nullableBoolean: Boolean
+ nonNullBoolean: Boolean!
+}
+
+input EnumInputType
+ @join__type(graph: MAIN)
+{
+ enumInput: SomeEnum!
+ enumListInput: [SomeEnum!]!
+ nestedEnumType: [NestedEnumInputType]
+}
+
+type EverythingResponse
+ @join__type(graph: MAIN)
+{
+ id: Int!
+ nullableId: Int
+ basicTypes: BasicTypesResponse
+ enumResponse: SomeEnum
+ interfaceResponse: AnInterface
+ interfaceImplementationResponse: InterfaceImplementation2
+ unionResponse: UnionType
+ unionType2Response: UnionType2
+ listOfBools: [Boolean!]!
+ listOfInterfaces: [AnInterface]
+ listOfUnions: [UnionType]
+ objectTypeWithInputField(boolInput: Boolean, secondInput: Boolean!): ObjectTypeResponse
+ listOfObjects: [ObjectTypeResponse]
+}
+
+input InputType
+ @join__type(graph: MAIN)
+{
+ inputString: String!
+ inputInt: Int!
+ inputBoolean: Boolean
+ nestedType: NestedInputType!
+ enumInput: SomeEnum
+ listInput: [Int!]!
+ nestedTypeList: [NestedInputType]
+}
+
+input InputTypeWithDefault
+ @join__type(graph: MAIN)
+{
+ nonNullId: ID!
+ nonNullIdWithDefault: ID! = "id"
+ nullableId: ID
+ nullableIdWithDefault: ID = "id"
+}
+
+type InterfaceImplementation1 implements AnInterface
+ @join__implements(graph: MAIN, interface: "AnInterface")
+ @join__type(graph: MAIN)
+{
+ sharedField: String!
+ implementation1Field: Int!
+}
+
+type InterfaceImplementation2 implements AnInterface
+ @join__implements(graph: MAIN, interface: "AnInterface")
+ @join__type(graph: MAIN)
+{
+ sharedField: String!
+ implementation2Field: Float!
+}
+
+scalar join__FieldSet
+
+enum join__Graph {
+ MAIN @join__graph(name: "main", url: "http://localhost:4001/graphql")
+}
+
+scalar link__Import
+
+enum link__Purpose {
+ """
+ `SECURITY` features provide metadata necessary to securely resolve fields.
+ """
+ SECURITY
+
+ """
+ `EXECUTION` features provide metadata necessary for operation execution.
+ """
+ EXECUTION
+}
+
+type Mutation
+ @join__type(graph: MAIN)
+{
+ noInputMutation: EverythingResponse!
+}
+
+input NestedEnumInputType
+ @join__type(graph: MAIN)
+{
+ someEnum: SomeEnum
+}
+
+input NestedInputType
+ @join__type(graph: MAIN)
+{
+ someFloat: Float!
+ someNullableFloat: Float
+}
+
+type ObjectTypeResponse
+ @join__type(graph: MAIN)
+{
+ stringField: String!
+ intField: Int!
+ nullableField: String
+}
+
+type Query
+ @join__type(graph: MAIN)
+{
+ inputTypeQuery(input: InputType!): EverythingResponse!
+ scalarInputQuery(listInput: [String!]!, stringInput: String!, nullableStringInput: String, intInput: Int!, floatInput: Float!, boolInput: Boolean!, enumInput: SomeEnum, idInput: ID!): EverythingResponse!
+ noInputQuery: EverythingResponse!
+ basicInputTypeQuery(input: NestedInputType!): EverythingResponse!
+ anotherInputTypeQuery(input: AnotherInputType): EverythingResponse!
+ enumInputQuery(enumInput: SomeEnum, inputType: EnumInputType, stringInput: String, anotherStr: String): EverythingResponse!
+ basicResponseQuery: BasicResponse!
+ scalarResponseQuery: String
+ defaultArgQuery(stringInput: String! = "default", inputType: AnotherInputType = {anotherInput: "inputDefault"}): BasicResponse!
+ inputTypeDefaultQuery(input: InputTypeWithDefault): BasicResponse!
+ sortQuery(listInput: [String!]!, stringInput: String!, nullableStringInput: String, INTInput: Int!, floatInput: Float!, boolInput: Boolean!, enumInput: SomeEnum, idInput: ID!): SortResponse!
+}
+
+enum SomeEnum
+ @join__type(graph: MAIN)
+{
+ SOME_VALUE_1 @join__enumValue(graph: MAIN)
+ SOME_VALUE_2 @join__enumValue(graph: MAIN)
+ SOME_VALUE_3 @join__enumValue(graph: MAIN)
+}
+
+type SortResponse
+ @join__type(graph: MAIN)
+{
+ id: Int!
+ nullableId: Int
+ zzz: Int
+ aaa: Int
+ CCC: Int
+}
+
+type Subscription
+ @join__type(graph: MAIN)
+{
+ noInputSubscription: EverythingResponse!
+}
+
+union UnionType
+ @join__type(graph: MAIN)
+ @join__unionMember(graph: MAIN, member: "UnionType1")
+ @join__unionMember(graph: MAIN, member: "UnionType2")
+ = UnionType1 | UnionType2
+
+type UnionType1
+ @join__type(graph: MAIN)
+{
+ unionType1Field: String!
+ nullableString: String
+}
+
+type UnionType2
+ @join__type(graph: MAIN)
+{
+ unionType2Field: String!
+ nullableString: String
+}
\ No newline at end of file
diff --git a/apollo-router/src/apollo_studio_interop/tests.rs b/apollo-router/src/apollo_studio_interop/tests.rs
new file mode 100644
index 0000000000..ac2dd78215
--- /dev/null
+++ b/apollo-router/src/apollo_studio_interop/tests.rs
@@ -0,0 +1,1445 @@
+use apollo_compiler::Schema;
+use router_bridge::planner::PlanOptions;
+use router_bridge::planner::Planner;
+use router_bridge::planner::QueryPlannerConfig;
+use test_log::test;
+
+use super::*;
+
+// Generate the signature and referenced fields using router-bridge to confirm that the expected value we used is correct.
+// We can remove this when we no longer use the bridge but should keep the rust implementation verifications.
+async fn assert_bridge_results(
+ schema_str: &str,
+ query_str: &str,
+ expected_sig: &str,
+ expected_refs: &HashMap,
+) {
+ let planner =
+ Planner::::new(schema_str.to_string(), QueryPlannerConfig::default())
+ .await
+ .unwrap();
+ let plan = planner
+ .plan(query_str.to_string(), None, PlanOptions::default())
+ .await
+ .unwrap();
+ let bridge_result = ComparableUsageReporting {
+ result: plan.usage_reporting,
+ };
+ let expected_result = UsageReporting {
+ stats_report_key: expected_sig.to_string(),
+ referenced_fields_by_type: expected_refs.clone(),
+ };
+ assert!(matches!(
+ bridge_result.compare(&expected_result),
+ UsageReportingComparisonResult::Equal
+ ));
+}
+
+fn assert_expected_results(
+ actual: &ComparableUsageReporting,
+ expected_sig: &str,
+ expected_refs: &HashMap,
+) {
+ let expected_result = UsageReporting {
+ stats_report_key: expected_sig.to_string(),
+ referenced_fields_by_type: expected_refs.clone(),
+ };
+ assert!(matches!(
+ actual.compare(&expected_result),
+ UsageReportingComparisonResult::Equal
+ ));
+}
+
+#[test(tokio::test)]
+async fn test_complex_query() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query UnusedQuery {
+ noInputQuery {
+ enumResponse
+ }
+ }
+
+ fragment UnusedFragment on EverythingResponse {
+ enumResponse
+ }
+
+ fragment Fragment2 on EverythingResponse {
+ basicTypes {
+ nullableFloat
+ }
+ }
+
+ query TransformedQuery {
+
+
+ scalarInputQuery(idInput: "a1", listInput: [], boolInput: true, intInput: 1, stringInput: "x", floatInput: 1.2) @skip(if: false) @include(if: true) {
+ ...Fragment2,
+
+
+ objectTypeWithInputField(boolInput: true, secondInput: false) {
+ stringField
+ __typename
+ intField
+ }
+
+ enumResponse
+ interfaceResponse {
+ sharedField
+ ... on InterfaceImplementation2 {
+ implementation2Field
+ }
+ ... on InterfaceImplementation1 {
+ implementation1Field
+ }
+ }
+ ...Fragment1,
+ }
+ }
+
+ fragment Fragment1 on EverythingResponse {
+ basicTypes {
+ nonNullFloat
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &Some("TransformedQuery".into()), &schema);
+
+ let expected_sig = "# TransformedQuery\nfragment Fragment1 on EverythingResponse{basicTypes{nonNullFloat}}fragment Fragment2 on EverythingResponse{basicTypes{nullableFloat}}query TransformedQuery{scalarInputQuery(boolInput:true floatInput:0 idInput:\"\"intInput:0 listInput:[]stringInput:\"\")@skip(if:false)@include(if:true){enumResponse interfaceResponse{sharedField...on InterfaceImplementation2{implementation2Field}...on InterfaceImplementation1{implementation1Field}}objectTypeWithInputField(boolInput:true,secondInput:false){__typename intField stringField}...Fragment1...Fragment2}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["scalarInputQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "BasicTypesResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["nullableFloat".into(), "nonNullFloat".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec![
+ "basicTypes".into(),
+ "objectTypeWithInputField".into(),
+ "enumResponse".into(),
+ "interfaceResponse".into(),
+ ],
+ is_interface: false,
+ },
+ ),
+ (
+ "AnInterface".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["sharedField".into()],
+ is_interface: true,
+ },
+ ),
+ (
+ "ObjectTypeResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["stringField".into(), "__typename".into(), "intField".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "InterfaceImplementation1".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["implementation1Field".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "InterfaceImplementation2".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["implementation2Field".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+
+ // the router-bridge planner will throw errors on unused fragments/queries so we remove them here
+ let sanitised_query_str = r#"fragment Fragment2 on EverythingResponse {
+ basicTypes {
+ nullableFloat
+ }
+ }
+
+ query TransformedQuery {
+
+
+ scalarInputQuery(idInput: "a1", listInput: [], boolInput: true, intInput: 1, stringInput: "x", floatInput: 1.2) @skip(if: false) @include(if: true) {
+ ...Fragment2,
+
+
+ objectTypeWithInputField(boolInput: true, secondInput: false) {
+ stringField
+ __typename
+ intField
+ }
+
+ enumResponse
+ interfaceResponse {
+ sharedField
+ ... on InterfaceImplementation2 {
+ implementation2Field
+ }
+ ... on InterfaceImplementation1 {
+ implementation1Field
+ }
+ }
+ ...Fragment1,
+ }
+ }
+
+ fragment Fragment1 on EverythingResponse {
+ basicTypes {
+ nonNullFloat
+ }
+ }"#;
+
+ assert_bridge_results(
+ schema_str,
+ sanitised_query_str,
+ expected_sig,
+ &expected_refs,
+ )
+ .await;
+}
+
+#[test(tokio::test)]
+async fn test_complex_references() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query Query($secondInput: Boolean!) {
+ scalarResponseQuery
+ noInputQuery {
+ basicTypes {
+ nonNullId
+ nonNullInt
+ }
+ enumResponse
+ interfaceImplementationResponse {
+ sharedField
+ implementation2Field
+ }
+ interfaceResponse {
+ ... on InterfaceImplementation1 {
+ implementation1Field
+ sharedField
+ }
+ ... on InterfaceImplementation2 {
+ implementation2Field
+ sharedField
+ }
+ }
+ listOfUnions {
+ ... on UnionType1 {
+ nullableString
+ }
+ }
+ objectTypeWithInputField(secondInput: $secondInput) {
+ intField
+ }
+ }
+ basicInputTypeQuery(input: { someFloat: 1 }) {
+ unionResponse {
+ ... on UnionType1 {
+ nullableString
+ }
+ }
+ unionType2Response {
+ unionType2Field
+ }
+ listOfObjects {
+ stringField
+ }
+ }
+ }"#;
+
+ let schema: Valid = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &Some("Query".into()), &schema);
+
+ let expected_sig = "# Query\nquery Query($secondInput:Boolean!){basicInputTypeQuery(input:{}){listOfObjects{stringField}unionResponse{...on UnionType1{nullableString}}unionType2Response{unionType2Field}}noInputQuery{basicTypes{nonNullId nonNullInt}enumResponse interfaceImplementationResponse{implementation2Field sharedField}interfaceResponse{...on InterfaceImplementation1{implementation1Field sharedField}...on InterfaceImplementation2{implementation2Field sharedField}}listOfUnions{...on UnionType1{nullableString}}objectTypeWithInputField(secondInput:$secondInput){intField}}scalarResponseQuery}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec![
+ "scalarResponseQuery".into(),
+ "noInputQuery".into(),
+ "basicInputTypeQuery".into(),
+ ],
+ is_interface: false,
+ },
+ ),
+ (
+ "BasicTypesResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["nonNullId".into(), "nonNullInt".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "ObjectTypeResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["intField".into(), "stringField".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "UnionType2".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["unionType2Field".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec![
+ "basicTypes".into(),
+ "enumResponse".into(),
+ "interfaceImplementationResponse".into(),
+ "interfaceResponse".into(),
+ "listOfUnions".into(),
+ "objectTypeWithInputField".into(),
+ "unionResponse".into(),
+ "unionType2Response".into(),
+ "listOfObjects".into(),
+ ],
+ is_interface: false,
+ },
+ ),
+ (
+ "InterfaceImplementation1".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["implementation1Field".into(), "sharedField".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "UnionType1".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["nullableString".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "InterfaceImplementation2".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["sharedField".into(), "implementation2Field".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_basic_whitespace() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query MyQuery {
+ noInputQuery {
+ id
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &Some("MyQuery".into()), &schema);
+
+ let expected_sig = "# MyQuery\nquery MyQuery{noInputQuery{id}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["noInputQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["id".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_anonymous_query() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query {
+ noInputQuery {
+ id
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &None, &schema);
+
+ let expected_sig = "# -\n{noInputQuery{id}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["noInputQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["id".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_anonymous_mutation() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"mutation {
+ noInputMutation {
+ id
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &None, &schema);
+
+ let expected_sig = "# -\nmutation{noInputMutation{id}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Mutation".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["noInputMutation".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["id".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_anonymous_subscription() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str: &str = r#"subscription {
+ noInputSubscription {
+ id
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &None, &schema);
+
+ let expected_sig = "# -\nsubscription{noInputSubscription{id}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Subscription".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["noInputSubscription".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["id".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_ordered_fields_and_variables() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query VariableScalarInputQuery($idInput: ID!, $boolInput: Boolean!, $floatInput: Float!, $intInput: Int!, $listInput: [String!]!, $stringInput: String!, $nullableStringInput: String) {
+ sortQuery(
+ idInput: $idInput
+ boolInput: $boolInput
+ floatInput: $floatInput
+ INTInput: $intInput
+ listInput: $listInput
+ stringInput: $stringInput
+ nullableStringInput: $nullableStringInput
+ ) {
+ zzz
+ CCC
+ nullableId
+ aaa
+ id
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(
+ &doc,
+ &doc,
+ &Some("VariableScalarInputQuery".into()),
+ &schema,
+ );
+
+ let expected_sig = "# VariableScalarInputQuery\nquery VariableScalarInputQuery($boolInput:Boolean!,$floatInput:Float!,$idInput:ID!,$intInput:Int!,$listInput:[String!]!,$nullableStringInput:String,$stringInput:String!){sortQuery(INTInput:$intInput boolInput:$boolInput floatInput:$floatInput idInput:$idInput listInput:$listInput nullableStringInput:$nullableStringInput stringInput:$stringInput){CCC aaa id nullableId zzz}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["sortQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "SortResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec![
+ "aaa".into(),
+ "CCC".into(),
+ "id".into(),
+ "nullableId".into(),
+ "zzz".into(),
+ ],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_fragments() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query FragmentQuery {
+ noInputQuery {
+ listOfBools
+ interfaceResponse {
+ sharedField
+ ... on InterfaceImplementation2 {
+ implementation2Field
+ }
+ ...bbbInterfaceFragment
+ ...aaaInterfaceFragment
+ ... {
+ ... on InterfaceImplementation1 {
+ implementation1Field
+ }
+ }
+ ... on InterfaceImplementation1 {
+ implementation1Field
+ }
+ }
+ unionResponse {
+ ... on UnionType2 {
+ unionType2Field
+ }
+ ... on UnionType1 {
+ unionType1Field
+ }
+ }
+ ...zzzFragment
+ ...aaaFragment
+ ...ZZZFragment
+ }
+ }
+
+ fragment zzzFragment on EverythingResponse {
+ listOfInterfaces {
+ sharedField
+ }
+ }
+
+ fragment ZZZFragment on EverythingResponse {
+ listOfInterfaces {
+ sharedField
+ }
+ }
+
+ fragment aaaFragment on EverythingResponse {
+ listOfInterfaces {
+ sharedField
+ }
+ }
+
+ fragment UnusedFragment on InterfaceImplementation2 {
+ sharedField
+ implementation2Field
+ }
+
+ fragment bbbInterfaceFragment on InterfaceImplementation2 {
+ sharedField
+ implementation2Field
+ }
+
+ fragment aaaInterfaceFragment on InterfaceImplementation1 {
+ sharedField
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &Some("FragmentQuery".into()), &schema);
+
+ let expected_sig = "# FragmentQuery\nfragment ZZZFragment on EverythingResponse{listOfInterfaces{sharedField}}fragment aaaFragment on EverythingResponse{listOfInterfaces{sharedField}}fragment aaaInterfaceFragment on InterfaceImplementation1{sharedField}fragment bbbInterfaceFragment on InterfaceImplementation2{implementation2Field sharedField}fragment zzzFragment on EverythingResponse{listOfInterfaces{sharedField}}query FragmentQuery{noInputQuery{interfaceResponse{sharedField...aaaInterfaceFragment...bbbInterfaceFragment...on InterfaceImplementation2{implementation2Field}...{...on InterfaceImplementation1{implementation1Field}}...on InterfaceImplementation1{implementation1Field}}listOfBools unionResponse{...on UnionType2{unionType2Field}...on UnionType1{unionType1Field}}...ZZZFragment...aaaFragment...zzzFragment}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "UnionType1".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["unionType1Field".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "UnionType2".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["unionType2Field".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["noInputQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec![
+ "listOfInterfaces".into(),
+ "listOfBools".into(),
+ "interfaceResponse".into(),
+ "unionResponse".into(),
+ ],
+ is_interface: false,
+ },
+ ),
+ (
+ "InterfaceImplementation1".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["sharedField".into(), "implementation1Field".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "InterfaceImplementation1".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["implementation1Field".into(), "sharedField".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "AnInterface".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["sharedField".into()],
+ is_interface: true,
+ },
+ ),
+ (
+ "InterfaceImplementation2".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["sharedField".into(), "implementation2Field".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+
+ // the router-bridge planner will throw errors on unused fragments/queries so we remove them here
+ let sanitised_query_str = r#"query FragmentQuery {
+ noInputQuery {
+ listOfBools
+ interfaceResponse {
+ sharedField
+ ... on InterfaceImplementation2 {
+ implementation2Field
+ }
+ ...bbbInterfaceFragment
+ ...aaaInterfaceFragment
+ ... {
+ ... on InterfaceImplementation1 {
+ implementation1Field
+ }
+ }
+ ... on InterfaceImplementation1 {
+ implementation1Field
+ }
+ }
+ unionResponse {
+ ... on UnionType2 {
+ unionType2Field
+ }
+ ... on UnionType1 {
+ unionType1Field
+ }
+ }
+ ...zzzFragment
+ ...aaaFragment
+ ...ZZZFragment
+ }
+ }
+
+ fragment zzzFragment on EverythingResponse {
+ listOfInterfaces {
+ sharedField
+ }
+ }
+
+ fragment ZZZFragment on EverythingResponse {
+ listOfInterfaces {
+ sharedField
+ }
+ }
+
+ fragment aaaFragment on EverythingResponse {
+ listOfInterfaces {
+ sharedField
+ }
+ }
+
+ fragment bbbInterfaceFragment on InterfaceImplementation2 {
+ sharedField
+ implementation2Field
+ }
+
+ fragment aaaInterfaceFragment on InterfaceImplementation1 {
+ sharedField
+ }"#;
+ assert_bridge_results(
+ schema_str,
+ sanitised_query_str,
+ expected_sig,
+ &expected_refs,
+ )
+ .await;
+}
+
+#[test(tokio::test)]
+async fn test_directives() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"fragment Fragment1 on InterfaceImplementation1 {
+ sharedField
+ implementation1Field
+ }
+
+ fragment Fragment2 on InterfaceImplementation2 @withArgs(arg2: "" arg1: "test" arg3: true arg5: [1,2] arg4: 2) @noArgs {
+ sharedField
+ implementation2Field
+ }
+
+ query DirectiveQuery @withArgs(arg2: "" arg1: "test") @noArgs {
+ noInputQuery {
+ enumResponse @withArgs(arg3: false arg5: [1,2] arg4: 2) @noArgs
+ unionResponse {
+ ... on UnionType1 @withArgs(arg2: "" arg1: "test") @noArgs {
+ unionType1Field
+ }
+ }
+ interfaceResponse {
+ ... Fragment1 @withArgs(arg1: "test") @noArgs
+ ... Fragment2
+ }
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &Some("DirectiveQuery".into()), &schema);
+
+ let expected_sig = "# DirectiveQuery\nfragment Fragment1 on InterfaceImplementation1{implementation1Field sharedField}fragment Fragment2 on InterfaceImplementation2@noArgs@withArgs(arg1:\"\",arg2:\"\",arg3:true,arg4:0,arg5:[]){implementation2Field sharedField}query DirectiveQuery@withArgs(arg1:\"\",arg2:\"\")@noArgs{noInputQuery{enumResponse@withArgs(arg3:false,arg4:0,arg5:[])@noArgs interfaceResponse{...Fragment1@noArgs@withArgs(arg1:\"\")...Fragment2}unionResponse{...on UnionType1@noArgs@withArgs(arg1:\"\",arg2:\"\"){unionType1Field}}}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "UnionType1".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["unionType1Field".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["noInputQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec![
+ "enumResponse".into(),
+ "interfaceResponse".into(),
+ "unionResponse".into(),
+ ],
+ is_interface: false,
+ },
+ ),
+ (
+ "InterfaceImplementation1".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["sharedField".into(), "implementation1Field".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "InterfaceImplementation1".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["implementation1Field".into(), "sharedField".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "InterfaceImplementation2".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["sharedField".into(), "implementation2Field".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_aliases() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query AliasQuery {
+ xxAlias: enumInputQuery(enumInput: SOME_VALUE_1) {
+ aliased: enumResponse
+ }
+ aaAlias: enumInputQuery(enumInput: SOME_VALUE_2) {
+ aliasedAgain: enumResponse
+ }
+ ZZAlias: enumInputQuery(enumInput: SOME_VALUE_3) {
+ enumResponse
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &Some("AliasQuery".into()), &schema);
+
+ let expected_sig = "# AliasQuery\nquery AliasQuery{enumInputQuery(enumInput:SOME_VALUE_1){enumResponse}enumInputQuery(enumInput:SOME_VALUE_2){enumResponse}enumInputQuery(enumInput:SOME_VALUE_3){enumResponse}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumResponse".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumInputQuery".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_inline_values() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query InlineInputTypeQuery {
+ inputTypeQuery(input: {
+ inputString: "foo",
+ inputInt: 42,
+ inputBoolean: null,
+ nestedType: { someFloat: 4.2 },
+ enumInput: SOME_VALUE_1,
+ nestedTypeList: [ { someFloat: 4.2, someNullableFloat: null } ],
+ listInput: [1, 2, 3]
+ }) {
+ enumResponse
+ }
+ }"#;
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated =
+ generate_usage_reporting(&doc, &doc, &Some("InlineInputTypeQuery".into()), &schema);
+
+ let expected_sig = "# InlineInputTypeQuery\nquery InlineInputTypeQuery{inputTypeQuery(input:{}){enumResponse}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumResponse".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["inputTypeQuery".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_root_type_fragment() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query SomeQuery {
+ ... on Query {
+ ... {
+ basicResponseQuery {
+ id
+ }
+ }
+ }
+ noInputQuery {
+ enumResponse
+ }
+ }"#;
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &None, &schema);
+
+ let expected_sig = "# SomeQuery\nquery SomeQuery{noInputQuery{enumResponse}...on Query{...{basicResponseQuery{id}}}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "BasicResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["id".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["basicResponseQuery".into(), "noInputQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumResponse".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_directive_arg_spacing() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query {
+ basicResponseQuery {
+ id @withArgs(arg1: "")
+ id
+ }
+ }"#;
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &None, &schema);
+
+ let expected_sig = "# -\n{basicResponseQuery{id@withArgs(arg1:\"\")id}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "BasicResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["id".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["basicResponseQuery".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_operation_with_single_variable() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query QueryWithVar($input_enum: SomeEnum) {
+ enumInputQuery(enumInput: $input_enum) {
+ listOfBools
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &Some("QueryWithVar".into()), &schema);
+
+ let expected_sig = "# QueryWithVar\nquery QueryWithVar($input_enum:SomeEnum){enumInputQuery(enumInput:$input_enum){listOfBools}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumInputQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["listOfBools".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_operation_with_multiple_variables() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query QueryWithVars($stringInput: String!, $floatInput: Float!, $boolInput: Boolean!) {
+ scalarInputQuery(listInput: ["x"], stringInput: $stringInput, intInput: 6, floatInput: $floatInput, boolInput: $boolInput, idInput: "y") {
+ enumResponse
+ }
+ inputTypeQuery(input: { inputInt: 2, inputString: "z", listInput: [], nestedType: { someFloat: 5 }}) {
+ enumResponse
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &Some("QueryWithVars".into()), &schema);
+
+ let expected_sig = "# QueryWithVars\nquery QueryWithVars($boolInput:Boolean!,$floatInput:Float!,$stringInput:String!){inputTypeQuery(input:{}){enumResponse}scalarInputQuery(boolInput:$boolInput floatInput:$floatInput idInput:\"\"intInput:0 listInput:[]stringInput:$stringInput){enumResponse}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["scalarInputQuery".into(), "inputTypeQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumResponse".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_field_arg_comma_or_space() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query QueryArgLength($StringInputWithAVeryyyLongNameSoLineLengthIs80: String!, $inputType: AnotherInputType, $enumInputWithAVryLongNameSoLineLengthIsOver80: SomeEnum, $enumInputType: EnumInputType) {
+ enumInputQuery (enumInput:$enumInputWithAVryLongNameSoLineLengthIsOver80,inputType:$enumInputType) {
+ enumResponse
+ }
+ defaultArgQuery(stringInput:$StringInputWithAVeryyyLongNameSoLineLengthIs80,inputType:$inputType) {
+ id
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &Some("QueryArgLength".into()), &schema);
+
+ // enumInputQuery has a variable line length of 81, so it should be separated by spaces (which are converted from newlines
+ // in the original implementation).
+ // enumInputQuery has a variable line length of 80, so it should be separated by commas.
+ let expected_sig = "# QueryArgLength\nquery QueryArgLength($StringInputWithAVeryyyLongNameSoLineLengthIs80:String!,$enumInputType:EnumInputType,$enumInputWithAVryLongNameSoLineLengthIsOver80:SomeEnum,$inputType:AnotherInputType){defaultArgQuery(inputType:$inputType stringInput:$StringInputWithAVeryyyLongNameSoLineLengthIs80){id}enumInputQuery(enumInput:$enumInputWithAVryLongNameSoLineLengthIsOver80 inputType:$enumInputType){enumResponse}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumInputQuery".into(), "defaultArgQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumResponse".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "BasicResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["id".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_operation_arg_always_commas() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query QueryArgLength($enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80: SomeEnum, $enumInputType: EnumInputType) {
+ enumInputQuery (enumInput:$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80,inputType:$enumInputType) {
+ enumResponse
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated = generate_usage_reporting(&doc, &doc, &Some("QueryArgLength".into()), &schema);
+
+ // operation variables shouldn't ever be converted to spaces, since the line length check is only on field variables
+ // in the original implementation
+ let expected_sig = "# QueryArgLength\nquery QueryArgLength($enumInputType:EnumInputType,$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80:SomeEnum){enumInputQuery(enumInput:$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80 inputType:$enumInputType){enumResponse}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumInputQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumResponse".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_comma_edge_case() {
+ let schema_str = include_str!("testdata/schema_interop.graphql");
+
+ let query_str = r#"query QueryCommaEdgeCase {
+ enumInputQuery (anotherStr:"",enumInput:SOME_VALUE_1,stringInput:"") {
+ enumResponse
+ }
+ }"#;
+
+ let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap();
+ let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap();
+
+ let generated =
+ generate_usage_reporting(&doc, &doc, &Some("QueryCommaEdgeCase".into()), &schema);
+
+ let expected_sig = "# QueryCommaEdgeCase\nquery QueryCommaEdgeCase{enumInputQuery(anotherStr:\"\",enumInput:SOME_VALUE_1,stringInput:\"\"){enumResponse}}";
+ let expected_refs: HashMap = HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumInputQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "EverythingResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["enumResponse".into()],
+ is_interface: false,
+ },
+ ),
+ ]);
+
+ assert_expected_results(&generated, expected_sig, &expected_refs);
+ assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await;
+}
+
+#[test(tokio::test)]
+async fn test_compare() {
+ let source = ComparableUsageReporting {
+ result: UsageReporting {
+ stats_report_key: "# -\n{basicResponseQuery{field1 field2}}".into(),
+ referenced_fields_by_type: HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["basicResponseQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "SomeResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["field1".into(), "field2".into()],
+ is_interface: false,
+ },
+ ),
+ ]),
+ },
+ };
+
+ // Same signature and ref fields should match
+ assert!(matches!(
+ source.compare(&UsageReporting {
+ stats_report_key: source.result.stats_report_key.clone(),
+ referenced_fields_by_type: source.result.referenced_fields_by_type.clone(),
+ }),
+ UsageReportingComparisonResult::Equal
+ ));
+
+ // Reordered signature should not match
+ assert!(matches!(
+ source.compare(&UsageReporting {
+ stats_report_key: "# -\n{basicResponseQuery{field2 field1}}".into(),
+ referenced_fields_by_type: source.result.referenced_fields_by_type.clone(),
+ }),
+ UsageReportingComparisonResult::StatsReportKeyNotEqual
+ ));
+
+ // Different signature should not match
+ assert!(matches!(
+ source.compare(&UsageReporting {
+ stats_report_key: "# NamedQuery\nquery NamedQuery {basicResponseQuery{field1 field2}}"
+ .into(),
+ referenced_fields_by_type: source.result.referenced_fields_by_type.clone(),
+ }),
+ UsageReportingComparisonResult::StatsReportKeyNotEqual
+ ));
+
+ // Reordered parent type should match
+ assert!(matches!(
+ source.compare(&UsageReporting {
+ stats_report_key: source.result.stats_report_key.clone(),
+ referenced_fields_by_type: HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["basicResponseQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "SomeResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["field1".into(), "field2".into()],
+ is_interface: false,
+ },
+ ),
+ ])
+ }),
+ UsageReportingComparisonResult::Equal
+ ));
+
+ // Reordered fields should match
+ assert!(matches!(
+ source.compare(&UsageReporting {
+ stats_report_key: source.result.stats_report_key.clone(),
+ referenced_fields_by_type: HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["basicResponseQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "SomeResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["field2".into(), "field1".into()],
+ is_interface: false,
+ },
+ ),
+ ])
+ }),
+ UsageReportingComparisonResult::Equal
+ ));
+
+ // Added parent type should not match
+ assert!(matches!(
+ source.compare(&UsageReporting {
+ stats_report_key: source.result.stats_report_key.clone(),
+ referenced_fields_by_type: HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["basicResponseQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "SomeResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["field1".into(), "field2".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "OtherType".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["otherField".into()],
+ is_interface: false,
+ },
+ ),
+ ])
+ }),
+ UsageReportingComparisonResult::ReferencedFieldsNotEqual
+ ));
+
+ // Added field should not match
+ assert!(matches!(
+ source.compare(&UsageReporting {
+ stats_report_key: source.result.stats_report_key.clone(),
+ referenced_fields_by_type: HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["basicResponseQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "SomeResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["field1".into(), "field2".into(), "field3".into()],
+ is_interface: false,
+ },
+ ),
+ ])
+ }),
+ UsageReportingComparisonResult::ReferencedFieldsNotEqual
+ ));
+
+ // Missing parent type should not match
+ assert!(matches!(
+ source.compare(&UsageReporting {
+ stats_report_key: source.result.stats_report_key.clone(),
+ referenced_fields_by_type: HashMap::from([(
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["basicResponseQuery".into()],
+ is_interface: false,
+ },
+ ),])
+ }),
+ UsageReportingComparisonResult::ReferencedFieldsNotEqual
+ ));
+
+ // Missing field should not match
+ assert!(matches!(
+ source.compare(&UsageReporting {
+ stats_report_key: source.result.stats_report_key.clone(),
+ referenced_fields_by_type: HashMap::from([
+ (
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["basicResponseQuery".into()],
+ is_interface: false,
+ },
+ ),
+ (
+ "SomeResponse".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["field1".into()],
+ is_interface: false,
+ },
+ ),
+ ])
+ }),
+ UsageReportingComparisonResult::ReferencedFieldsNotEqual
+ ));
+
+ // Both different should not match
+ assert!(matches!(
+ source.compare(&UsageReporting {
+ stats_report_key: "# -\n{basicResponseQuery{field2 field1}}".into(),
+ referenced_fields_by_type: HashMap::from([(
+ "Query".into(),
+ ReferencedFieldsForType {
+ field_names: vec!["basicResponseQuery".into()],
+ is_interface: false,
+ },
+ ),])
+ }),
+ UsageReportingComparisonResult::BothNotEqual
+ ));
+}
diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs
index 1ad362a2e2..e4140ce2dc 100644
--- a/apollo-router/src/axum_factory/tests.rs
+++ b/apollo-router/src/axum_factory/tests.rs
@@ -1,6 +1,7 @@
use std::collections::HashMap;
use std::io;
use std::net::SocketAddr;
+use std::num::NonZeroUsize;
use std::pin::Pin;
use std::str::FromStr;
use std::sync::atomic::AtomicU32;
@@ -64,10 +65,11 @@ use crate::http_server_factory::HttpServerFactory;
use crate::http_server_factory::HttpServerHandle;
use crate::json_ext::Path;
use crate::plugin::test::MockSubgraph;
-use crate::query_planner::BridgeQueryPlanner;
+use crate::query_planner::BridgeQueryPlannerPool;
use crate::router_factory::create_plugins;
use crate::router_factory::Endpoint;
use crate::router_factory::RouterFactory;
+use crate::services::execution;
use crate::services::layers::persisted_queries::PersistedQueryLayer;
use crate::services::layers::query_analysis::QueryAnalysisLayer;
use crate::services::layers::static_page::home_page_content;
@@ -459,7 +461,7 @@ async fn it_compress_response_body() -> Result<(), ApolloRouterError> {
let response = client
.post(url.as_str())
.header(ACCEPT_ENCODING, HeaderValue::from_static("gzip"))
- .body(json!({ "query": "query" }).to_string())
+ .body(json!({ "query": "query { me { name } }" }).to_string())
.send()
.await
.unwrap()
@@ -483,7 +485,7 @@ async fn it_compress_response_body() -> Result<(), ApolloRouterError> {
let response = client
.get(url.as_str())
.header(ACCEPT_ENCODING, HeaderValue::from_static("gzip"))
- .query(&json!({ "query": "query" }))
+ .query(&json!({ "query": "query { me { name } }" }))
.send()
.await
.unwrap()
@@ -514,7 +516,7 @@ async fn it_compress_response_body() -> Result<(), ApolloRouterError> {
#[tokio::test]
async fn it_decompress_request_body() -> Result<(), ApolloRouterError> {
- let original_body = json!({ "query": "query" });
+ let original_body = json!({ "query": "query { me { name } }" });
let mut encoder = GzipEncoder::new(Vec::new());
encoder
.write_all(original_body.to_string().as_bytes())
@@ -528,7 +530,10 @@ async fn it_decompress_request_body() -> Result<(), ApolloRouterError> {
let example_response = expected_response.clone();
let router_service = router::service::from_supergraph_mock_callback(move |req| {
let example_response = example_response.clone();
- assert_eq!(req.supergraph_request.into_body().query.unwrap(), "query");
+ assert_eq!(
+ req.supergraph_request.into_body().query.unwrap(),
+ "query { me { name } }"
+ );
Ok(SupergraphResponse::new_from_graphql_response(
example_response,
req.context,
@@ -614,7 +619,7 @@ async fn response() -> Result<(), ApolloRouterError> {
// Post query
let response = client
.post(url.as_str())
- .body(json!({ "query": "query" }).to_string())
+ .body(json!({ "query": "query { me { name } }" }).to_string())
.send()
.await
.unwrap()
@@ -629,7 +634,7 @@ async fn response() -> Result<(), ApolloRouterError> {
// Get query
let response = client
.get(url.as_str())
- .query(&json!({ "query": "query" }))
+ .query(&json!({ "query": "query { me { name } }" }))
.send()
.await
.unwrap()
@@ -658,7 +663,7 @@ async fn bad_response() -> Result<(), ApolloRouterError> {
// Post query
let err = client
.post(url.as_str())
- .body(json!({ "query": "query" }).to_string())
+ .body(json!({ "query": "query { me { name } }" }).to_string())
.send()
.await
.unwrap()
@@ -671,7 +676,7 @@ async fn bad_response() -> Result<(), ApolloRouterError> {
// Get query
let err = client
.get(url.as_str())
- .query(&json!({ "query": "query" }))
+ .query(&json!({ "query": "query { me { name } }" }))
.send()
.await
.unwrap()
@@ -719,7 +724,7 @@ async fn response_with_root_wildcard() -> Result<(), ApolloRouterError> {
// Post query
let response = client
.post(url.as_str())
- .body(json!({ "query": "query" }).to_string())
+ .body(json!({ "query": "query { me { name } }" }).to_string())
.send()
.await
.unwrap()
@@ -740,7 +745,7 @@ async fn response_with_root_wildcard() -> Result<(), ApolloRouterError> {
.unwrap()
.to_string(),
)
- .body(json!({ "query": "query" }).to_string())
+ .body(json!({ "query": "query { me { name } }" }).to_string())
.send()
.await
.unwrap()
@@ -755,7 +760,7 @@ async fn response_with_root_wildcard() -> Result<(), ApolloRouterError> {
// Get query
let response = client
.get(url.as_str())
- .query(&json!({ "query": "query" }))
+ .query(&json!({ "query": "query { me { name } }" }))
.send()
.await
.unwrap()
@@ -805,7 +810,7 @@ async fn response_with_custom_endpoint() -> Result<(), ApolloRouterError> {
// Post query
let response = client
.post(url.as_str())
- .body(json!({ "query": "query" }).to_string())
+ .body(json!({ "query": "query { me { name } }" }).to_string())
.send()
.await
.unwrap()
@@ -820,7 +825,7 @@ async fn response_with_custom_endpoint() -> Result<(), ApolloRouterError> {
// Get query
let response = client
.get(url.as_str())
- .query(&json!({ "query": "query" }))
+ .query(&json!({ "query": "query { me { name } }" }))
.send()
.await
.unwrap()
@@ -869,7 +874,7 @@ async fn response_with_custom_prefix_endpoint() -> Result<(), ApolloRouterError>
// Post query
let response = client
.post(url.as_str())
- .body(json!({ "query": "query" }).to_string())
+ .body(json!({ "query": "query { me { name } }" }).to_string())
.send()
.await
.unwrap()
@@ -884,7 +889,7 @@ async fn response_with_custom_prefix_endpoint() -> Result<(), ApolloRouterError>
// Get query
let response = client
.get(url.as_str())
- .query(&json!({ "query": "query" }))
+ .query(&json!({ "query": "query { me { name } }" }))
.send()
.await
.unwrap()
@@ -939,7 +944,7 @@ async fn response_with_custom_endpoint_wildcard() -> Result<(), ApolloRouterErro
// Post query
let response = client
.post(url.as_str())
- .body(json!({ "query": "query" }).to_string())
+ .body(json!({ "query": "query { me { name } }" }).to_string())
.send()
.await
.unwrap()
@@ -954,7 +959,7 @@ async fn response_with_custom_endpoint_wildcard() -> Result<(), ApolloRouterErro
// Get query
let response = client
.get(url.as_str())
- .query(&json!({ "query": "query" }))
+ .query(&json!({ "query": "query { me { name } }" }))
.send()
.await
.unwrap()
@@ -997,7 +1002,7 @@ async fn response_failure() -> Result<(), ApolloRouterError> {
.body(
json!(
{
- "query": "query",
+ "query": "query { me { name } }",
})
.to_string(),
)
@@ -1602,7 +1607,7 @@ async fn response_shape() -> Result<(), ApolloRouterError> {
let (server, client) = init(router_service).await;
let query = json!(
{
- "query": "query { test }",
+ "query": "query { me { name } }",
});
let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap());
let response = client
@@ -1638,16 +1643,16 @@ async fn deferred_response_shape() -> Result<(), ApolloRouterError> {
let body = stream::iter(vec![
graphql::Response::builder()
.data(json!({
- "test": "hello",
+ "me": "id",
}))
.has_next(true)
.build(),
graphql::Response::builder()
.incremental(vec![graphql::IncrementalResponse::builder()
.data(json!({
- "other": "world"
+ "name": "Ada"
}))
- .path(Path::default())
+ .path(Path::from("me"))
.build()])
.has_next(true)
.build(),
@@ -1663,7 +1668,7 @@ async fn deferred_response_shape() -> Result<(), ApolloRouterError> {
let (server, client) = init(router_service).await;
let query = json!(
{
- "query": "query { test ... @defer { other } }",
+ "query": "query { me { id ... @defer { name } } }",
});
let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap());
let mut response = client
@@ -1683,13 +1688,13 @@ async fn deferred_response_shape() -> Result<(), ApolloRouterError> {
let first = response.chunk().await.unwrap().unwrap();
assert_eq!(
std::str::from_utf8(&first).unwrap(),
- "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{\"data\":{\"test\":\"hello\"},\"hasNext\":true}\r\n--graphql"
+ "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{\"data\":{\"me\":\"id\"},\"hasNext\":true}\r\n--graphql"
);
let second = response.chunk().await.unwrap().unwrap();
assert_eq!(
std::str::from_utf8(&second).unwrap(),
- "\r\ncontent-type: application/json\r\n\r\n{\"hasNext\":true,\"incremental\":[{\"data\":{\"other\":\"world\"},\"path\":[]}]}\r\n--graphql"
+ "\r\ncontent-type: application/json\r\n\r\n{\"hasNext\":true,\"incremental\":[{\"data\":{\"name\":\"Ada\"},\"path\":[\"me\"]}]}\r\n--graphql"
);
let third = response.chunk().await.unwrap().unwrap();
@@ -1706,7 +1711,7 @@ async fn multipart_response_shape_with_one_chunk() -> Result<(), ApolloRouterErr
let router_service = router::service::from_supergraph_mock_callback(move |req| {
let body = stream::iter(vec![graphql::Response::builder()
.data(json!({
- "test": "hello",
+ "me": "name",
}))
.has_next(false)
.build()])
@@ -1721,7 +1726,7 @@ async fn multipart_response_shape_with_one_chunk() -> Result<(), ApolloRouterErr
let (server, client) = init(router_service).await;
let query = json!(
{
- "query": "query { test }",
+ "query": "query { me { name } }",
});
let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap());
let mut response = client
@@ -1741,7 +1746,7 @@ async fn multipart_response_shape_with_one_chunk() -> Result<(), ApolloRouterErr
let first = response.chunk().await.unwrap().unwrap();
assert_eq!(
std::str::from_utf8(&first).unwrap(),
- "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{\"data\":{\"test\":\"hello\"},\"hasNext\":false}\r\n--graphql--\r\n"
+ "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{\"data\":{\"me\":\"name\"},\"hasNext\":false}\r\n--graphql--\r\n"
);
server.shutdown().await
@@ -2066,7 +2071,7 @@ async fn listening_to_unix_socket() {
let output = send_to_unix_socket(
server.graphql_listen_address().as_ref().unwrap(),
Method::POST,
- r#"{"query":"query"}"#,
+ r#"{"query":"query { me { name } }"}"#,
)
.await;
@@ -2079,7 +2084,7 @@ async fn listening_to_unix_socket() {
let output = send_to_unix_socket(
server.graphql_listen_address().as_ref().unwrap(),
Method::GET,
- r#"query=query"#,
+ r#"query=query%7Bme%7Bname%7D%7D"#,
)
.await;
@@ -2291,6 +2296,7 @@ async fn test_supergraph_and_health_check_same_port_different_listener() {
async fn test_supergraph_timeout() {
let config = serde_json::json!({
"supergraph": {
+ "listen": "127.0.0.1:0",
"defer_support": false,
},
"traffic_shaping": {
@@ -2303,17 +2309,43 @@ async fn test_supergraph_timeout() {
let conf: Arc = Arc::new(serde_json::from_value(config).unwrap());
let schema = include_str!("..//testdata/minimal_supergraph.graphql");
- let planner = BridgeQueryPlanner::new(schema.to_string(), conf.clone())
- .await
- .unwrap();
+ let planner = BridgeQueryPlannerPool::new(
+ schema.to_string(),
+ conf.clone(),
+ NonZeroUsize::new(1).unwrap(),
+ )
+ .await
+ .unwrap();
let schema = planner.schema();
// we do the entire supergraph rebuilding instead of using `from_supergraph_mock_callback_and_configuration`
// because we need the plugins to apply on the supergraph
- let plugins = create_plugins(&conf, &schema, planner.subgraph_schemas(), None, None)
+ let mut plugins = create_plugins(&conf, &schema, planner.subgraph_schemas(), None, None)
.await
.unwrap();
+ plugins.insert("delay".into(), Box::new(Delay));
+
+ struct Delay;
+
+ #[async_trait::async_trait]
+ impl crate::plugin::Plugin for Delay {
+ type Config = ();
+
+ async fn new(_: crate::plugin::PluginInit<()>) -> Result {
+ Ok(Self)
+ }
+
+ fn execution_service(&self, service: execution::BoxService) -> execution::BoxService {
+ service
+ .map_future(|fut| async {
+ tokio::time::sleep(Duration::from_millis(10)).await;
+ fut.await
+ })
+ .boxed()
+ }
+ }
+
let builder = PluggableSupergraphServiceBuilder::new(planner)
.with_configuration(conf.clone())
.with_subgraph_service("accounts", MockSubgraph::new(HashMap::new()));
@@ -2335,10 +2367,14 @@ async fn test_supergraph_timeout() {
.make();
// keep the server handle around otherwise it will immediately shutdown
- let (_server, client) = init_with_config(service, conf.clone(), MultiMap::new())
+ let (server, client) = init_with_config(service, conf.clone(), MultiMap::new())
.await
.unwrap();
- let url = "http://localhost:4000/";
+ let url = server
+ .graphql_listen_address()
+ .as_ref()
+ .unwrap()
+ .to_string();
let response = client
.post(url)
diff --git a/apollo-router/src/batching.rs b/apollo-router/src/batching.rs
new file mode 100644
index 0000000000..79a7e29f83
--- /dev/null
+++ b/apollo-router/src/batching.rs
@@ -0,0 +1,712 @@
+//! Various utility functions and core structures used to implement batching support within
+//! the router.
+
+use std::collections::HashMap;
+use std::collections::HashSet;
+use std::fmt;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering;
+use std::sync::Arc;
+
+use hyper::Body;
+use opentelemetry::trace::TraceContextExt;
+use opentelemetry::Context as otelContext;
+use parking_lot::Mutex as PMutex;
+use tokio::sync::mpsc;
+use tokio::sync::oneshot;
+use tokio::sync::Mutex;
+use tokio::task::JoinHandle;
+use tower::BoxError;
+use tracing::Instrument;
+use tracing::Span;
+use tracing_opentelemetry::OpenTelemetrySpanExt;
+
+use crate::error::FetchError;
+use crate::error::SubgraphBatchingError;
+use crate::graphql;
+use crate::query_planner::fetch::QueryHash;
+use crate::services::http::HttpClientServiceFactory;
+use crate::services::process_batches;
+use crate::services::SubgraphRequest;
+use crate::services::SubgraphResponse;
+use crate::Context;
+
+/// A query that is part of a batch.
+/// Note: It's ok to make transient clones of this struct, but *do not* store clones anywhere apart
+/// from the single copy in the extensions. The batching co-ordinator relies on the fact that all
+/// senders are dropped to know when to finish processing.
+#[derive(Clone, Debug)]
+pub(crate) struct BatchQuery {
+ /// The index of this query relative to the entire batch
+ index: usize,
+
+ /// A channel sender for sending updates to the entire batch
+ sender: Arc>>>,
+
+ /// How many more progress updates are we expecting to send?
+ remaining: Arc,
+
+ /// Batch to which this BatchQuery belongs
+ batch: Arc,
+}
+
+impl fmt::Display for BatchQuery {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "index: {}, ", self.index)?;
+ write!(f, "remaining: {}, ", self.remaining.load(Ordering::Acquire))?;
+ write!(f, "sender: {:?}, ", self.sender)?;
+ write!(f, "batch: {:?}, ", self.batch)?;
+ Ok(())
+ }
+}
+
+impl BatchQuery {
+ /// Is this BatchQuery finished?
+ pub(crate) fn finished(&self) -> bool {
+ self.remaining.load(Ordering::Acquire) == 0
+ }
+
+ /// Inform the batch of query hashes representing fetches needed by this element of the batch query
+ pub(crate) async fn set_query_hashes(
+ &self,
+ query_hashes: Vec>,
+ ) -> Result<(), BoxError> {
+ self.remaining.store(query_hashes.len(), Ordering::Release);
+
+ self.sender
+ .lock()
+ .await
+ .as_ref()
+ .ok_or(SubgraphBatchingError::SenderUnavailable)?
+ .send(BatchHandlerMessage::Begin {
+ index: self.index,
+ query_hashes,
+ })
+ .await?;
+ Ok(())
+ }
+
+ /// Signal to the batch handler that this specific batch query has made some progress.
+ ///
+ /// The returned channel can be awaited to receive the GraphQL response, when ready.
+ pub(crate) async fn signal_progress(
+ &self,
+ client_factory: HttpClientServiceFactory,
+ request: SubgraphRequest,
+ gql_request: graphql::Request,
+ ) -> Result>, BoxError> {
+ // Create a receiver for this query so that it can eventually get the request meant for it
+ let (tx, rx) = oneshot::channel();
+
+ tracing::debug!(
+ "index: {}, REMAINING: {}",
+ self.index,
+ self.remaining.load(Ordering::Acquire)
+ );
+ self.sender
+ .lock()
+ .await
+ .as_ref()
+ .ok_or(SubgraphBatchingError::SenderUnavailable)?
+ .send(BatchHandlerMessage::Progress {
+ index: self.index,
+ client_factory,
+ request,
+ gql_request,
+ response_sender: tx,
+ span_context: Span::current().context(),
+ })
+ .await?;
+
+ if !self.finished() {
+ self.remaining.fetch_sub(1, Ordering::AcqRel);
+ }
+
+ // May now be finished
+ if self.finished() {
+ let mut sender = self.sender.lock().await;
+ *sender = None;
+ }
+
+ Ok(rx)
+ }
+
+ /// Signal to the batch handler that this specific batch query is cancelled
+ pub(crate) async fn signal_cancelled(&self, reason: String) -> Result<(), BoxError> {
+ self.sender
+ .lock()
+ .await
+ .as_ref()
+ .ok_or(SubgraphBatchingError::SenderUnavailable)?
+ .send(BatchHandlerMessage::Cancel {
+ index: self.index,
+ reason,
+ })
+ .await?;
+
+ if !self.finished() {
+ self.remaining.fetch_sub(1, Ordering::AcqRel);
+ }
+
+ // May now be finished
+ if self.finished() {
+ let mut sender = self.sender.lock().await;
+ *sender = None;
+ }
+
+ Ok(())
+ }
+}
+
+// #[derive(Debug)]
+enum BatchHandlerMessage {
+ /// Cancel one of the batch items
+ Cancel { index: usize, reason: String },
+
+ /// A query has reached the subgraph service and we should update its state
+ Progress {
+ index: usize,
+ client_factory: HttpClientServiceFactory,
+ request: SubgraphRequest,
+ gql_request: graphql::Request,
+ response_sender: oneshot::Sender>,
+ span_context: otelContext,
+ },
+
+ /// A query has passed query planning and knows how many fetches are needed
+ /// to complete.
+ Begin {
+ index: usize,
+ query_hashes: Vec>,
+ },
+}
+
+/// Collection of info needed to resolve a batch query
+pub(crate) struct BatchQueryInfo {
+ /// The owning subgraph request
+ request: SubgraphRequest,
+
+ /// The GraphQL request tied to this subgraph request
+ gql_request: graphql::Request,
+
+ /// Notifier for the subgraph service handler
+ ///
+ /// Note: This must be used or else the subgraph request will time out
+ sender: oneshot::Sender>,
+}
+
+// TODO: Do we want to generate a UUID for a batch for observability reasons?
+// TODO: Do we want to track the size of a batch?
+#[derive(Debug)]
+pub(crate) struct Batch {
+ /// A sender channel to communicate with the batching handler
+ senders: PMutex>>>,
+
+ /// The spawned batching handler task handle
+ ///
+ /// Note: We keep this as a failsafe. If the task doesn't terminate _before_ the batch is
+ /// dropped, then we will abort() the task on drop.
+ spawn_handle: JoinHandle>,
+
+ /// What is the size (number of input operations) of the batch?
+ #[allow(dead_code)]
+ size: usize,
+}
+
+impl Batch {
+ /// Creates a new batch, spawning an async task for handling updates to the
+ /// batch lifecycle.
+ pub(crate) fn spawn_handler(size: usize) -> Self {
+ tracing::debug!("New batch created with size {size}");
+
+ // Create the message channel pair for sending update events to the spawned task
+ let (spawn_tx, mut rx) = mpsc::channel(size);
+
+ // Populate Senders
+ let mut senders = vec![];
+
+ for _ in 0..size {
+ senders.push(Some(spawn_tx.clone()));
+ }
+
+ let spawn_handle = tokio::spawn(async move {
+ /// Helper struct for keeping track of the state of each individual BatchQuery
+ ///
+ #[derive(Debug)]
+ struct BatchQueryState {
+ registered: HashSet>,
+ committed: HashSet>,
+ cancelled: HashSet>,
+ }
+
+ impl BatchQueryState {
+ // We are ready when everything we registered is in either cancelled or
+ // committed.
+ fn is_ready(&self) -> bool {
+ self.registered.difference(&self.committed.union(&self.cancelled).cloned().collect()).collect::>().is_empty()
+ }
+ }
+
+ // Progressively track the state of the various batch fetches that we expect to see. Keys are batch
+ // indices.
+ let mut batch_state: HashMap = HashMap::with_capacity(size);
+
+ // We also need to keep track of all requests we need to make and their send handles
+ let mut requests: Vec> =
+ Vec::from_iter((0..size).map(|_| Vec::new()));
+
+ let mut master_client_factory = None;
+ tracing::debug!("Batch about to await messages...");
+ // Start handling messages from various portions of the request lifecycle
+ // When recv() returns None, we want to stop processing messages
+ while let Some(msg) = rx.recv().await {
+ match msg {
+ BatchHandlerMessage::Cancel { index, reason } => {
+ // Log the reason for cancelling, update the state
+ tracing::debug!("Cancelling index: {index}, {reason}");
+
+ if let Some(state) = batch_state.get_mut(&index) {
+ // Short-circuit any requests that are waiting for this cancelled request to complete.
+ let cancelled_requests = std::mem::take(&mut requests[index]);
+ for BatchQueryInfo {
+ request, sender, ..
+ } in cancelled_requests
+ {
+ let subgraph_name = request.subgraph_name.ok_or(SubgraphBatchingError::MissingSubgraphName)?;
+ if let Err(log_error) = sender.send(Err(Box::new(FetchError::SubrequestBatchingError {
+ service: subgraph_name.clone(),
+ reason: format!("request cancelled: {reason}"),
+ }))) {
+ tracing::error!(service=subgraph_name, error=?log_error, "failed to notify waiter that request is cancelled");
+ }
+ }
+
+ // Clear out everything that has committed, now that they are cancelled, and
+ // mark everything as having been cancelled.
+ state.committed.clear();
+ state.cancelled = state.registered.clone();
+ }
+ }
+
+ BatchHandlerMessage::Begin {
+ index,
+ query_hashes,
+ } => {
+ tracing::debug!("Beginning batch for index {index} with {query_hashes:?}");
+
+ batch_state.insert(
+ index,
+ BatchQueryState {
+ cancelled: HashSet::with_capacity(query_hashes.len()),
+ committed: HashSet::with_capacity(query_hashes.len()),
+ registered: HashSet::from_iter(query_hashes),
+ },
+ );
+ }
+
+ BatchHandlerMessage::Progress {
+ index,
+ client_factory,
+ request,
+ gql_request,
+ response_sender,
+ span_context,
+ } => {
+ // Progress the index
+
+ tracing::debug!("Progress index: {index}");
+
+ if let Some(state) = batch_state.get_mut(&index) {
+ state.committed.insert(request.query_hash.clone());
+ }
+
+ if master_client_factory.is_none() {
+ master_client_factory = Some(client_factory);
+ }
+ Span::current().add_link(span_context.span().span_context().clone());
+ requests[index].push(BatchQueryInfo {
+ request,
+ gql_request,
+ sender: response_sender,
+ })
+ }
+ }
+ }
+
+ // Make sure that we are actually ready and haven't forgotten to update something somewhere
+ if batch_state.values().any(|f| !f.is_ready()) {
+ tracing::error!("All senders for the batch have dropped before reaching the ready state: {batch_state:#?}");
+ // There's not much else we can do, so perform an early return
+ return Err(SubgraphBatchingError::ProcessingFailed("batch senders not ready when required".to_string()).into());
+ }
+
+ tracing::debug!("Assembling {size} requests into batches");
+
+ // We now have a bunch of requests which are organised by index and we would like to
+ // convert them into a bunch of requests organised by service...
+
+ let all_in_one: Vec<_> = requests.into_iter().flatten().collect();
+
+ // Now build up a Service oriented view to use in constructing our batches
+ let mut svc_map: HashMap> = HashMap::new();
+ for BatchQueryInfo {
+ request: sg_request,
+ gql_request,
+ sender: tx,
+ } in all_in_one
+ {
+ let subgraph_name = sg_request.subgraph_name.clone().ok_or(SubgraphBatchingError::MissingSubgraphName)?;
+ let value = svc_map
+ .entry(
+ subgraph_name,
+ )
+ .or_default();
+ value.push(BatchQueryInfo {
+ request: sg_request,
+ gql_request,
+ sender: tx,
+ });
+ }
+
+ // If we don't have a master_client_factory, we can't do anything.
+ if let Some(client_factory) = master_client_factory {
+ process_batches(client_factory, svc_map).await?;
+ }
+ Ok(())
+ }.instrument(tracing::info_span!("batch_request", size)));
+
+ Self {
+ senders: PMutex::new(senders),
+ spawn_handle,
+ size,
+ }
+ }
+
+ /// Create a batch query for a specific index in this batch
+ ///
+ /// This function may fail if the index doesn't exist or has already been taken
+ pub(crate) fn query_for_index(
+ batch: Arc,
+ index: usize,
+ ) -> Result {
+ let mut guard = batch.senders.lock();
+ // It's a serious error if we try to get a query at an index which doesn't exist or which has already been taken
+ if index >= guard.len() {
+ return Err(SubgraphBatchingError::ProcessingFailed(format!(
+ "tried to retriever sender for index: {index} which does not exist"
+ )));
+ }
+ let opt_sender = std::mem::take(&mut guard[index]);
+ if opt_sender.is_none() {
+ return Err(SubgraphBatchingError::ProcessingFailed(format!(
+ "tried to retriever sender for index: {index} which has already been taken"
+ )));
+ }
+ drop(guard);
+ Ok(BatchQuery {
+ index,
+ sender: Arc::new(Mutex::new(opt_sender)),
+ remaining: Arc::new(AtomicUsize::new(0)),
+ batch,
+ })
+ }
+}
+
+impl Drop for Batch {
+ fn drop(&mut self) {
+ // Failsafe: make sure that we kill the background task if the batch itself is dropped
+ self.spawn_handle.abort();
+ }
+}
+
+// Assemble a single batch request to a subgraph
+pub(crate) async fn assemble_batch(
+ requests: Vec,
+) -> Result<
+ (
+ String,
+ Context,
+ http::Request,
+ Vec>>,
+ ),
+ BoxError,
+> {
+ // Extract the collection of parts from the requests
+ let (txs, request_pairs): (Vec<_>, Vec<_>) = requests
+ .into_iter()
+ .map(|r| (r.sender, (r.request, r.gql_request)))
+ .unzip();
+ let (requests, gql_requests): (Vec<_>, Vec<_>) = request_pairs.into_iter().unzip();
+
+ // Construct the actual byte body of the batched request
+ let bytes = hyper::body::to_bytes(serde_json::to_string(&gql_requests)?).await?;
+
+ // Grab the common info from the first request
+ let context = requests
+ .first()
+ .ok_or(SubgraphBatchingError::RequestsIsEmpty)?
+ .context
+ .clone();
+ let first_request = requests
+ .into_iter()
+ .next()
+ .ok_or(SubgraphBatchingError::RequestsIsEmpty)?
+ .subgraph_request;
+ let operation_name = first_request
+ .body()
+ .operation_name
+ .clone()
+ .unwrap_or_default();
+ let (parts, _) = first_request.into_parts();
+
+ // Generate the final request and pass it up
+ let request = http::Request::from_parts(parts, Body::from(bytes));
+ Ok((operation_name, context, request, txs))
+}
+
+#[cfg(test)]
+mod tests {
+ use std::sync::Arc;
+ use std::time::Duration;
+
+ use hyper::body::to_bytes;
+ use tokio::sync::oneshot;
+
+ use super::assemble_batch;
+ use super::Batch;
+ use super::BatchQueryInfo;
+ use crate::graphql;
+ use crate::plugins::traffic_shaping::Http2Config;
+ use crate::query_planner::fetch::QueryHash;
+ use crate::services::http::HttpClientServiceFactory;
+ use crate::services::SubgraphRequest;
+ use crate::services::SubgraphResponse;
+ use crate::Configuration;
+ use crate::Context;
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn it_assembles_batch() {
+ // Assemble a list of requests for testing
+ let (receivers, requests): (Vec<_>, Vec<_>) = (0..2)
+ .map(|index| {
+ let (tx, rx) = oneshot::channel();
+ let gql_request = graphql::Request::fake_builder()
+ .operation_name(format!("batch_test_{index}"))
+ .query(format!("query batch_test {{ slot{index} }}"))
+ .build();
+
+ (
+ rx,
+ BatchQueryInfo {
+ request: SubgraphRequest::fake_builder()
+ .subgraph_request(
+ http::Request::builder().body(gql_request.clone()).unwrap(),
+ )
+ .subgraph_name(format!("slot{index}"))
+ .build(),
+ gql_request,
+ sender: tx,
+ },
+ )
+ })
+ .unzip();
+
+ // Assemble them
+ let (op_name, _context, request, txs) = assemble_batch(requests)
+ .await
+ .expect("it can assemble a batch");
+
+ // Make sure that the name of the entire batch is that of the first
+ assert_eq!(op_name, "batch_test_0");
+
+ // We should see the aggregation of all of the requests
+ let actual: Vec = serde_json::from_str(
+ &String::from_utf8(to_bytes(request.into_body()).await.unwrap().to_vec()).unwrap(),
+ )
+ .unwrap();
+
+ let expected: Vec<_> = (0..2)
+ .map(|index| {
+ graphql::Request::fake_builder()
+ .operation_name(format!("batch_test_{index}"))
+ .query(format!("query batch_test {{ slot{index} }}"))
+ .build()
+ })
+ .collect();
+ assert_eq!(actual, expected);
+
+ // We should also have all of the correct senders and they should be linked to the correct waiter
+ // Note: We reverse the senders since they should be in reverse order when assembled
+ assert_eq!(txs.len(), receivers.len());
+ for (index, (tx, rx)) in Iterator::zip(txs.into_iter(), receivers).enumerate() {
+ let data = serde_json_bytes::json!({
+ "data": {
+ format!("slot{index}"): "valid"
+ }
+ });
+ let response = SubgraphResponse {
+ response: http::Response::builder()
+ .body(graphql::Response::builder().data(data.clone()).build())
+ .unwrap(),
+ context: Context::new(),
+ };
+
+ tx.send(Ok(response)).unwrap();
+
+ // We want to make sure that we don't hang the test if we don't get the correct message
+ let received = tokio::time::timeout(Duration::from_millis(10), rx)
+ .await
+ .unwrap()
+ .unwrap()
+ .unwrap();
+
+ assert_eq!(received.response.into_body().data, Some(data));
+ }
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn it_rejects_index_out_of_bounds() {
+ let batch = Arc::new(Batch::spawn_handler(2));
+
+ assert!(Batch::query_for_index(batch.clone(), 2).is_err());
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn it_rejects_duplicated_index_get() {
+ let batch = Arc::new(Batch::spawn_handler(2));
+
+ assert!(Batch::query_for_index(batch.clone(), 0).is_ok());
+ assert!(Batch::query_for_index(batch.clone(), 0).is_err());
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn it_limits_the_number_of_cancelled_sends() {
+ let batch = Arc::new(Batch::spawn_handler(2));
+
+ let bq = Batch::query_for_index(batch.clone(), 0).expect("its a valid index");
+
+ assert!(bq
+ .set_query_hashes(vec![Arc::new(QueryHash::default())])
+ .await
+ .is_ok());
+ assert!(!bq.finished());
+ assert!(bq.signal_cancelled("why not?".to_string()).await.is_ok());
+ assert!(bq.finished());
+ assert!(bq
+ .signal_cancelled("only once though".to_string())
+ .await
+ .is_err());
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn it_limits_the_number_of_progressed_sends() {
+ let batch = Arc::new(Batch::spawn_handler(2));
+
+ let bq = Batch::query_for_index(batch.clone(), 0).expect("its a valid index");
+
+ let factory = HttpClientServiceFactory::from_config(
+ "testbatch",
+ &Configuration::default(),
+ Http2Config::Disable,
+ );
+ let request = SubgraphRequest::fake_builder()
+ .subgraph_request(
+ http::Request::builder()
+ .body(graphql::Request::default())
+ .unwrap(),
+ )
+ .subgraph_name("whatever".to_string())
+ .build();
+ assert!(bq
+ .set_query_hashes(vec![Arc::new(QueryHash::default())])
+ .await
+ .is_ok());
+ assert!(!bq.finished());
+ assert!(bq
+ .signal_progress(
+ factory.clone(),
+ request.clone(),
+ graphql::Request::default()
+ )
+ .await
+ .is_ok());
+ assert!(bq.finished());
+ assert!(bq
+ .signal_progress(factory, request, graphql::Request::default())
+ .await
+ .is_err());
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn it_limits_the_number_of_mixed_sends() {
+ let batch = Arc::new(Batch::spawn_handler(2));
+
+ let bq = Batch::query_for_index(batch.clone(), 0).expect("its a valid index");
+
+ let factory = HttpClientServiceFactory::from_config(
+ "testbatch",
+ &Configuration::default(),
+ Http2Config::Disable,
+ );
+ let request = SubgraphRequest::fake_builder()
+ .subgraph_request(
+ http::Request::builder()
+ .body(graphql::Request::default())
+ .unwrap(),
+ )
+ .subgraph_name("whatever".to_string())
+ .build();
+ assert!(bq
+ .set_query_hashes(vec![Arc::new(QueryHash::default())])
+ .await
+ .is_ok());
+ assert!(!bq.finished());
+ assert!(bq
+ .signal_progress(factory, request, graphql::Request::default())
+ .await
+ .is_ok());
+ assert!(bq.finished());
+ assert!(bq
+ .signal_cancelled("only once though".to_string())
+ .await
+ .is_err());
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn it_limits_the_number_of_mixed_sends_two_query_hashes() {
+ let batch = Arc::new(Batch::spawn_handler(2));
+
+ let bq = Batch::query_for_index(batch.clone(), 0).expect("its a valid index");
+
+ let factory = HttpClientServiceFactory::from_config(
+ "testbatch",
+ &Configuration::default(),
+ Http2Config::Disable,
+ );
+ let request = SubgraphRequest::fake_builder()
+ .subgraph_request(
+ http::Request::builder()
+ .body(graphql::Request::default())
+ .unwrap(),
+ )
+ .subgraph_name("whatever".to_string())
+ .build();
+ let qh = Arc::new(QueryHash::default());
+ assert!(bq.set_query_hashes(vec![qh.clone(), qh]).await.is_ok());
+ assert!(!bq.finished());
+ assert!(bq
+ .signal_progress(factory, request, graphql::Request::default())
+ .await
+ .is_ok());
+ assert!(!bq.finished());
+ assert!(bq
+ .signal_cancelled("only twice though".to_string())
+ .await
+ .is_ok());
+ assert!(bq.finished());
+ assert!(bq
+ .signal_cancelled("only twice though".to_string())
+ .await
+ .is_err());
+ }
+}
diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs
index 093675a010..67b405b318 100644
--- a/apollo-router/src/configuration/metrics.rs
+++ b/apollo-router/src/configuration/metrics.rs
@@ -8,11 +8,13 @@ use opentelemetry_api::KeyValue;
use paste::paste;
use serde_json::Value;
+use super::AvailableParallelism;
use crate::metrics::meter_provider;
use crate::uplink::license_enforcement::LicenseState;
use crate::Configuration;
type InstrumentMap = HashMap)>;
+
pub(crate) struct Metrics {
_instruments: Vec>,
}
@@ -44,7 +46,8 @@ impl Metrics {
.unwrap_or(&serde_json::Value::Null),
);
data.populate_license_instrument(license_state);
-
+ data.populate_user_plugins_instrument(configuration);
+ data.populate_query_planner_experimental_parallelism(configuration);
data.into()
}
}
@@ -333,7 +336,7 @@ impl InstrumentData {
populate_config_instrument!(
apollo.router.config.batching,
- "$.experimental_batching[?(@.enabled == true)]",
+ "$.batching[?(@.enabled == true)]",
opt.mode,
"$.mode"
);
@@ -406,7 +409,76 @@ impl InstrumentData {
),
);
}
+
+ pub(crate) fn populate_user_plugins_instrument(&mut self, configuration: &Configuration) {
+ self.data.insert(
+ "apollo.router.config.custom_plugins".to_string(),
+ (
+ configuration
+ .plugins
+ .plugins
+ .as_ref()
+ .map(|configuration| {
+ configuration
+ .keys()
+ .filter(|k| !k.starts_with("cloud_router."))
+ .count()
+ })
+ .unwrap_or_default() as u64,
+ [].into(),
+ ),
+ );
+ }
+
+ pub(crate) fn populate_query_planner_experimental_parallelism(
+ &mut self,
+ configuration: &Configuration,
+ ) {
+ let query_planner_parallelism_config = configuration
+ .supergraph
+ .query_planning
+ .experimental_parallelism;
+
+ if query_planner_parallelism_config != Default::default() {
+ let mut attributes = HashMap::new();
+ attributes.insert(
+ "mode".to_string(),
+ if let AvailableParallelism::Auto(_) = query_planner_parallelism_config {
+ "auto"
+ } else {
+ "static"
+ }
+ .into(),
+ );
+ self.data.insert(
+ "apollo.router.config.query_planning.parallelism".to_string(),
+ (
+ configuration
+ .supergraph
+ .query_planning
+ .experimental_query_planner_parallelism()
+ .map(|n| {
+ #[cfg(test)]
+ {
+ // Set to a fixed number for snapshot tests
+ if let AvailableParallelism::Auto(_) =
+ query_planner_parallelism_config
+ {
+ return 8;
+ }
+ }
+ let as_usize: usize = n.into();
+ let as_u64: u64 = as_usize.try_into().unwrap_or_default();
+ as_u64
+ })
+ .unwrap_or_default(),
+ attributes,
+ ),
+ );
+ }
+ }
}
+
impl From for Metrics {
fn from(data: InstrumentData) -> Self {
Metrics {
@@ -433,10 +505,12 @@ impl From for Metrics {
#[cfg(test)]
mod test {
use rust_embed::RustEmbed;
+ use serde_json::json;
use crate::configuration::metrics::InstrumentData;
use crate::configuration::metrics::Metrics;
use crate::uplink::license_enforcement::LicenseState;
+ use crate::Configuration;
#[derive(RustEmbed)]
#[folder = "src/configuration/testdata/metrics"]
@@ -454,6 +528,8 @@ mod test {
let mut data = InstrumentData::default();
data.populate_config_instruments(yaml);
+ let configuration: Configuration = input.parse().unwrap();
+ data.populate_query_planner_experimental_parallelism(&configuration);
let _metrics: Metrics = data.into();
assert_non_zero_metrics_snapshot!(file_name);
}
@@ -482,4 +558,29 @@ mod test {
let _metrics: Metrics = data.into();
assert_non_zero_metrics_snapshot!();
}
+
+ #[test]
+ fn test_custom_plugin() {
+ let mut configuration = crate::Configuration::default();
+ let mut custom_plugins = serde_json::Map::new();
+ custom_plugins.insert("name".to_string(), json!("test"));
+ configuration.plugins.plugins = Some(custom_plugins);
+ let mut data = InstrumentData::default();
+ data.populate_user_plugins_instrument(&configuration);
+ let _metrics: Metrics = data.into();
+ assert_non_zero_metrics_snapshot!();
+ }
+
+ #[test]
+ fn test_ignore_cloud_router_plugins() {
+ let mut configuration = crate::Configuration::default();
+ let mut custom_plugins = serde_json::Map::new();
+ custom_plugins.insert("name".to_string(), json!("test"));
+ custom_plugins.insert("cloud_router.".to_string(), json!("test"));
+ configuration.plugins.plugins = Some(custom_plugins);
+ let mut data = InstrumentData::default();
+ data.populate_user_plugins_instrument(&configuration);
+ let _metrics: Metrics = data.into();
+ assert_non_zero_metrics_snapshot!();
+ }
}
diff --git a/apollo-router/src/configuration/migrations/0023-batching.yaml b/apollo-router/src/configuration/migrations/0023-batching.yaml
new file mode 100644
index 0000000000..7457467524
--- /dev/null
+++ b/apollo-router/src/configuration/migrations/0023-batching.yaml
@@ -0,0 +1,5 @@
+description: Batching is no longer experimental
+actions:
+ - type: move
+ from: experimental_batching
+ to: batching
diff --git a/apollo-router/src/configuration/migrations/0024-graphql_validation.yaml b/apollo-router/src/configuration/migrations/0024-graphql_validation.yaml
new file mode 100644
index 0000000000..f830067a53
--- /dev/null
+++ b/apollo-router/src/configuration/migrations/0024-graphql_validation.yaml
@@ -0,0 +1,4 @@
+description: experimental_graphql_validation_mode is no longer supported
+actions:
+ - type: delete
+ path: experimental_graphql_validation_mode
diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs
index 7ac7061968..2b8eb42115 100644
--- a/apollo-router/src/configuration/mod.rs
+++ b/apollo-router/src/configuration/mod.rs
@@ -1,16 +1,4 @@
//! Logic for loading configuration in to an object model
-pub(crate) mod cors;
-pub(crate) mod expansion;
-mod experimental;
-pub(crate) mod metrics;
-mod persisted_queries;
-mod schema;
-pub(crate) mod subgraph;
-#[cfg(test)]
-mod tests;
-mod upgrade;
-mod yaml;
-
use std::fmt;
use std::io;
use std::io::BufReader;
@@ -70,6 +58,18 @@ use crate::plugins::subscription::APOLLO_SUBSCRIPTION_PLUGIN_NAME;
use crate::uplink::UplinkConfig;
use crate::ApolloRouterError;
+pub(crate) mod cors;
+pub(crate) mod expansion;
+mod experimental;
+pub(crate) mod metrics;
+mod persisted_queries;
+mod schema;
+pub(crate) mod subgraph;
+#[cfg(test)]
+mod tests;
+mod upgrade;
+mod yaml;
+
// TODO: Talk it through with the teams
#[cfg(not(test))]
static HEARTBEAT_TIMEOUT_DURATION_SECONDS: u64 = 15;
@@ -164,14 +164,14 @@ pub struct Configuration {
#[serde(default)]
pub(crate) experimental_chaos: Chaos,
- /// Set the GraphQL validation implementation to use.
- #[serde(default)]
- pub(crate) experimental_graphql_validation_mode: GraphQLValidationMode,
-
/// Set the API schema generation implementation to use.
#[serde(default)]
pub(crate) experimental_api_schema_generation_mode: ApiSchemaMode,
+ /// Set the Apollo usage report signature and referenced field generation implementation to use.
+ #[serde(default)]
+ pub(crate) experimental_apollo_metrics_generation_mode: ApolloMetricsGenerationMode,
+
/// Plugin configuration
#[serde(default)]
pub(crate) plugins: UserPlugins,
@@ -190,7 +190,7 @@ pub struct Configuration {
/// Batching configuration.
#[serde(default)]
- pub(crate) experimental_batching: Batching,
+ pub(crate) batching: Batching,
}
impl PartialEq for Configuration {
@@ -199,11 +199,11 @@ impl PartialEq for Configuration {
}
}
-/// GraphQL validation modes.
+/// API schema generation modes.
#[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)]
#[derivative(Debug)]
#[serde(rename_all = "lowercase")]
-pub(crate) enum GraphQLValidationMode {
+pub(crate) enum ApiSchemaMode {
/// Use the new Rust-based implementation.
New,
/// Use the old JavaScript-based implementation.
@@ -214,18 +214,18 @@ pub(crate) enum GraphQLValidationMode {
Both,
}
-/// API schema generation modes.
+/// Apollo usage report signature and referenced field generation modes.
#[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)]
#[derivative(Debug)]
#[serde(rename_all = "lowercase")]
-pub(crate) enum ApiSchemaMode {
+pub(crate) enum ApolloMetricsGenerationMode {
/// Use the new Rust-based implementation.
New,
/// Use the old JavaScript-based implementation.
- #[default]
Legacy,
/// Use Rust-based and Javascript-based implementations side by side, logging warnings if the
/// implementations disagree.
+ #[default]
Both,
}
@@ -254,8 +254,8 @@ impl<'de> serde::Deserialize<'de> for Configuration {
uplink: UplinkConfig,
limits: Limits,
experimental_chaos: Chaos,
- experimental_graphql_validation_mode: GraphQLValidationMode,
- experimental_batching: Batching,
+ batching: Batching,
+ experimental_apollo_metrics_generation_mode: ApolloMetricsGenerationMode,
}
let ad_hoc: AdHocConfiguration = serde::Deserialize::deserialize(deserializer)?;
@@ -273,8 +273,10 @@ impl<'de> serde::Deserialize<'de> for Configuration {
.operation_limits(ad_hoc.limits)
.chaos(ad_hoc.experimental_chaos)
.uplink(ad_hoc.uplink)
- .graphql_validation_mode(ad_hoc.experimental_graphql_validation_mode)
- .experimental_batching(ad_hoc.experimental_batching)
+ .batching(ad_hoc.batching)
+ .experimental_apollo_metrics_generation_mode(
+ ad_hoc.experimental_apollo_metrics_generation_mode,
+ )
.build()
.map_err(|e| serde::de::Error::custom(e.to_string()))
}
@@ -286,8 +288,7 @@ fn default_graphql_listen() -> ListenAddr {
SocketAddr::from_str("127.0.0.1:4000").unwrap().into()
}
-// This isn't dead code! we use it in buildstructor's fake_new
-#[allow(dead_code)]
+#[cfg(test)]
fn test_listen() -> ListenAddr {
SocketAddr::from_str("127.0.0.1:0").unwrap().into()
}
@@ -310,9 +311,9 @@ impl Configuration {
operation_limits: Option,
chaos: Option,
uplink: Option,
- graphql_validation_mode: Option,
experimental_api_schema_generation_mode: Option,
- experimental_batching: Option,
+ batching: Option,
+ experimental_apollo_metrics_generation_mode: Option,
) -> Result {
#[cfg(not(test))]
let notify_queue_cap = match apollo_plugins.get(APOLLO_SUBSCRIPTION_PLUGIN_NAME) {
@@ -338,8 +339,8 @@ impl Configuration {
persisted_queries: persisted_query.unwrap_or_default(),
limits: operation_limits.unwrap_or_default(),
experimental_chaos: chaos.unwrap_or_default(),
- experimental_graphql_validation_mode: graphql_validation_mode.unwrap_or_default(),
experimental_api_schema_generation_mode: experimental_api_schema_generation_mode.unwrap_or_default(),
+ experimental_apollo_metrics_generation_mode: experimental_apollo_metrics_generation_mode.unwrap_or_default(),
plugins: UserPlugins {
plugins: Some(plugins),
},
@@ -348,7 +349,7 @@ impl Configuration {
},
tls: tls.unwrap_or_default(),
uplink,
- experimental_batching: experimental_batching.unwrap_or_default(),
+ batching: batching.unwrap_or_default(),
#[cfg(test)]
notify: notify.unwrap_or_default(),
#[cfg(not(test))]
@@ -386,9 +387,9 @@ impl Configuration {
operation_limits: Option,
chaos: Option,
uplink: Option,
- graphql_validation_mode: Option,
- experimental_batching: Option,
+ batching: Option,
experimental_api_schema_generation_mode: Option,
+ experimental_apollo_metrics_generation_mode: Option,
) -> Result {
let configuration = Self {
validated_yaml: Default::default(),
@@ -399,9 +400,10 @@ impl Configuration {
cors: cors.unwrap_or_default(),
limits: operation_limits.unwrap_or_default(),
experimental_chaos: chaos.unwrap_or_default(),
- experimental_graphql_validation_mode: graphql_validation_mode.unwrap_or_default(),
experimental_api_schema_generation_mode: experimental_api_schema_generation_mode
.unwrap_or_default(),
+ experimental_apollo_metrics_generation_mode:
+ experimental_apollo_metrics_generation_mode.unwrap_or_default(),
plugins: UserPlugins {
plugins: Some(plugins),
},
@@ -413,7 +415,7 @@ impl Configuration {
apq: apq.unwrap_or_default(),
persisted_queries: persisted_query.unwrap_or_default(),
uplink,
- experimental_batching: experimental_batching.unwrap_or_default(),
+ batching: batching.unwrap_or_default(),
};
configuration.validate()
@@ -631,6 +633,25 @@ pub(crate) struct Supergraph {
pub(crate) experimental_log_on_broken_pipe: bool,
}
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)]
+#[serde(rename_all = "snake_case", untagged)]
+pub(crate) enum AvailableParallelism {
+ Auto(Auto),
+ Fixed(NonZeroUsize),
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)]
+#[serde(rename_all = "snake_case")]
+pub(crate) enum Auto {
+ Auto,
+}
+
+impl Default for AvailableParallelism {
+ fn default() -> Self {
+ Self::Fixed(NonZeroUsize::new(1).expect("cannot fail"))
+ }
+}
+
fn default_defer_support() -> bool {
true
}
@@ -924,6 +945,19 @@ pub(crate) struct QueryPlanning {
/// If cache warm up is configured, this will allow the router to keep a query plan created with
/// the old schema, if it determines that the schema update does not affect the corresponding query
pub(crate) experimental_reuse_query_plans: bool,
+
+ /// Set the size of a pool of workers to enable query planning parallelism.
+ /// Default: 1.
+ pub(crate) experimental_parallelism: AvailableParallelism,
+}
+
+impl QueryPlanning {
+ pub(crate) fn experimental_query_planner_parallelism(&self) -> io::Result {
+ match self.experimental_parallelism {
+ AvailableParallelism::Auto(Auto::Auto) => std::thread::available_parallelism(),
+ AvailableParallelism::Fixed(n) => Ok(n),
+ }
+ }
}
/// Cache configuration
@@ -1526,4 +1560,42 @@ pub(crate) struct Batching {
/// Batching mode
pub(crate) mode: BatchingMode,
+
+ /// Subgraph options for batching
+ pub(crate) subgraph: Option>,
+}
+
+/// Common options for configuring subgraph batching
+#[derive(Debug, Clone, Default, Deserialize, Serialize, JsonSchema)]
+pub(crate) struct CommonBatchingConfig {
+ /// Whether this batching config should be enabled
+ pub(crate) enabled: bool,
+}
+
+impl Batching {
+ // Check if we should enable batching for a particular subgraph (service_name)
+ pub(crate) fn batch_include(&self, service_name: &str) -> bool {
+ match &self.subgraph {
+ Some(subgraph_batching_config) => {
+ // Override by checking if all is enabled
+ if subgraph_batching_config.all.enabled {
+ // If it is, require:
+ // - no subgraph entry OR
+ // - an enabled subgraph entry
+ subgraph_batching_config
+ .subgraphs
+ .get(service_name)
+ .map_or(true, |x| x.enabled)
+ } else {
+ // If it isn't, require:
+ // - an enabled subgraph entry
+ subgraph_batching_config
+ .subgraphs
+ .get(service_name)
+ .is_some_and(|x| x.enabled)
+ }
+ }
+ None => false,
+ }
+ }
}
diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__custom_plugin.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__custom_plugin.snap
new file mode 100644
index 0000000000..7ede11d1b7
--- /dev/null
+++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__custom_plugin.snap
@@ -0,0 +1,9 @@
+---
+source: apollo-router/src/configuration/metrics.rs
+expression: "&metrics.non_zero()"
+---
+- name: apollo.router.config.custom_plugins
+ data:
+ datapoints:
+ - value: 1
+ attributes: {}
diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__ignore_cloud_router_plugins.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__ignore_cloud_router_plugins.snap
new file mode 100644
index 0000000000..7ede11d1b7
--- /dev/null
+++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__ignore_cloud_router_plugins.snap
@@ -0,0 +1,9 @@
+---
+source: apollo-router/src/configuration/metrics.rs
+expression: "&metrics.non_zero()"
+---
+- name: apollo.router.config.custom_plugins
+ data:
+ datapoints:
+ - value: 1
+ attributes: {}
diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap
new file mode 100644
index 0000000000..b54b336914
--- /dev/null
+++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap
@@ -0,0 +1,10 @@
+---
+source: apollo-router/src/configuration/metrics.rs
+expression: "&metrics.non_zero()"
+---
+- name: apollo.router.config.query_planning.parallelism
+ data:
+ datapoints:
+ - value: 8
+ attributes:
+ mode: auto
diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap
new file mode 100644
index 0000000000..07bb2c2ea4
--- /dev/null
+++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap
@@ -0,0 +1,10 @@
+---
+source: apollo-router/src/configuration/metrics.rs
+expression: "&metrics.non_zero()"
+---
+- name: apollo.router.config.query_planning.parallelism
+ data:
+ datapoints:
+ - value: 10
+ attributes:
+ mode: static
diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap
index e267f4efb6..504605271e 100644
--- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap
+++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap
@@ -1,6 +1,5 @@
---
source: apollo-router/src/configuration/tests.rs
-assertion_line: 31
expression: "&schema"
---
{
@@ -743,6 +742,79 @@ expression: "&schema"
}
}
},
+ "batching": {
+ "description": "Batching configuration.",
+ "default": {
+ "enabled": false,
+ "mode": "batch_http_link",
+ "subgraph": null
+ },
+ "type": "object",
+ "required": [
+ "mode"
+ ],
+ "properties": {
+ "enabled": {
+ "description": "Activates Batching (disabled by default)",
+ "default": false,
+ "type": "boolean"
+ },
+ "mode": {
+ "description": "Batching mode",
+ "oneOf": [
+ {
+ "description": "batch_http_link",
+ "type": "string",
+ "enum": [
+ "batch_http_link"
+ ]
+ }
+ ]
+ },
+ "subgraph": {
+ "description": "Subgraph options for batching",
+ "type": "object",
+ "properties": {
+ "all": {
+ "description": "options applying to all subgraphs",
+ "default": {
+ "enabled": false
+ },
+ "type": "object",
+ "required": [
+ "enabled"
+ ],
+ "properties": {
+ "enabled": {
+ "description": "Whether this batching config should be enabled",
+ "type": "boolean"
+ }
+ }
+ },
+ "subgraphs": {
+ "description": "per subgraph options",
+ "default": {},
+ "type": "object",
+ "additionalProperties": {
+ "description": "Common options for configuring subgraph batching",
+ "type": "object",
+ "required": [
+ "enabled"
+ ],
+ "properties": {
+ "enabled": {
+ "description": "Whether this batching config should be enabled",
+ "type": "boolean"
+ }
+ }
+ }
+ }
+ },
+ "nullable": true
+ }
+ },
+ "additionalProperties": false
+ },
"coprocessor": {
"description": "Configures the externalization plugin",
"type": "object",
@@ -1324,7 +1396,7 @@ expression: "&schema"
},
"experimental_api_schema_generation_mode": {
"description": "Set the API schema generation implementation to use.",
- "default": "legacy",
+ "default": "both",
"oneOf": [
{
"description": "Use the new Rust-based implementation.",
@@ -1349,36 +1421,32 @@ expression: "&schema"
}
]
},
- "experimental_batching": {
- "description": "Batching configuration.",
- "default": {
- "enabled": false,
- "mode": "batch_http_link"
- },
- "type": "object",
- "required": [
- "mode"
- ],
- "properties": {
- "enabled": {
- "description": "Activates Batching (disabled by default)",
- "default": false,
- "type": "boolean"
+ "experimental_apollo_metrics_generation_mode": {
+ "description": "Set the Apollo usage report signature and referenced field generation implementation to use.",
+ "default": "both",
+ "oneOf": [
+ {
+ "description": "Use the new Rust-based implementation.",
+ "type": "string",
+ "enum": [
+ "new"
+ ]
},
- "mode": {
- "description": "Batching mode",
- "oneOf": [
- {
- "description": "batch_http_link",
- "type": "string",
- "enum": [
- "batch_http_link"
- ]
- }
+ {
+ "description": "Use the old JavaScript-based implementation.",
+ "type": "string",
+ "enum": [
+ "legacy"
+ ]
+ },
+ {
+ "description": "Use Rust-based and Javascript-based implementations side by side, logging warnings if the implementations disagree.",
+ "type": "string",
+ "enum": [
+ "both"
]
}
- },
- "additionalProperties": false
+ ]
},
"experimental_chaos": {
"description": "Configuration for chaos testing, trying to reproduce bugs that require uncommon conditions. You probably donโt want this in production!",
@@ -1423,33 +1491,6 @@ expression: "&schema"
},
"additionalProperties": false
},
- "experimental_graphql_validation_mode": {
- "description": "Set the GraphQL validation implementation to use.",
- "default": "both",
- "oneOf": [
- {
- "description": "Use the new Rust-based implementation.",
- "type": "string",
- "enum": [
- "new"
- ]
- },
- {
- "description": "Use the old JavaScript-based implementation.",
- "type": "string",
- "enum": [
- "legacy"
- ]
- },
- {
- "description": "Use Rust-based and Javascript-based implementations side by side, logging warnings if the implementations disagree.",
- "type": "string",
- "enum": [
- "both"
- ]
- }
- ]
- },
"forbid_mutations": {
"description": "Forbid mutations configuration",
"type": "boolean"
@@ -2632,7 +2673,8 @@ expression: "&schema"
"warmed_up_queries": null,
"experimental_plans_limit": null,
"experimental_paths_limit": null,
- "experimental_reuse_query_plans": false
+ "experimental_reuse_query_plans": false,
+ "experimental_parallelism": 1
},
"early_cancel": false,
"experimental_log_on_broken_pipe": false
@@ -2701,7 +2743,8 @@ expression: "&schema"
"warmed_up_queries": null,
"experimental_plans_limit": null,
"experimental_paths_limit": null,
- "experimental_reuse_query_plans": false
+ "experimental_reuse_query_plans": false,
+ "experimental_parallelism": 1
},
"type": "object",
"properties": {
@@ -2835,6 +2878,23 @@ expression: "&schema"
},
"additionalProperties": false
},
+ "experimental_parallelism": {
+ "description": "Set the size of a pool of workers to enable query planning parallelism. Default: 1.",
+ "default": 1,
+ "anyOf": [
+ {
+ "type": "string",
+ "enum": [
+ "auto"
+ ]
+ },
+ {
+ "type": "integer",
+ "format": "uint",
+ "minimum": 1.0
+ }
+ ]
+ },
"experimental_paths_limit": {
"description": "Before creating query plans, for each path of fields in the query we compute all the possible options to traverse that path via the subgraphs. Multiple options can arise because fields in the path can be provided by multiple subgraphs, and abstract types (i.e. unions and interfaces) returned by fields sometimes require the query planner to traverse through each constituent object type. The number of options generated in this computation can grow large if the schema or query are sufficiently complex, and that will increase the time spent planning.\n\nThis config allows specifying a per-path limit to the number of options considered. If any path's options exceeds this limit, query planning will abort and the operation will fail.\n\nThe default value is None, which specifies no limit.",
"default": null,
diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@batching.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@batching.yaml.snap
new file mode 100644
index 0000000000..daec7b3f14
--- /dev/null
+++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@batching.yaml.snap
@@ -0,0 +1,8 @@
+---
+source: apollo-router/src/configuration/tests.rs
+expression: new_config
+---
+---
+batching:
+ enabled: true
+ mode: batch_http_link
diff --git a/apollo-router/src/configuration/testdata/metrics/batching.router.yaml b/apollo-router/src/configuration/testdata/metrics/batching.router.yaml
index c177d3f45e..169f3824a9 100644
--- a/apollo-router/src/configuration/testdata/metrics/batching.router.yaml
+++ b/apollo-router/src/configuration/testdata/metrics/batching.router.yaml
@@ -1,3 +1,3 @@
-experimental_batching:
+batching:
enabled: true
mode: batch_http_link
diff --git a/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml
new file mode 100644
index 0000000000..e29357f06d
--- /dev/null
+++ b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml
@@ -0,0 +1,3 @@
+supergraph:
+ query_planning:
+ experimental_parallelism: auto
diff --git a/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml
new file mode 100644
index 0000000000..8861ab2777
--- /dev/null
+++ b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml
@@ -0,0 +1,3 @@
+supergraph:
+ query_planning:
+ experimental_parallelism: 10
diff --git a/apollo-router/src/configuration/testdata/migrations/batching.yaml b/apollo-router/src/configuration/testdata/migrations/batching.yaml
new file mode 100644
index 0000000000..c177d3f45e
--- /dev/null
+++ b/apollo-router/src/configuration/testdata/migrations/batching.yaml
@@ -0,0 +1,3 @@
+experimental_batching:
+ enabled: true
+ mode: batch_http_link
diff --git a/apollo-router/src/configuration/tests.rs b/apollo-router/src/configuration/tests.rs
index e8e985dec1..ffefd4ad54 100644
--- a/apollo-router/src/configuration/tests.rs
+++ b/apollo-router/src/configuration/tests.rs
@@ -55,7 +55,7 @@ fn routing_url_in_schema() {
REVIEWS @join__graph(name: "reviews" url: "http://localhost:4004/graphql")
}
"#;
- let schema = crate::spec::Schema::parse(schema, &Default::default()).unwrap();
+ let schema = crate::spec::Schema::parse(schema).unwrap();
let subgraphs: HashMap<&String, &Uri> = schema.subgraphs().collect();
@@ -107,7 +107,7 @@ fn missing_subgraph_url() {
PRODUCTS @join__graph(name: "products" url: "http://localhost:4003/graphql")
REVIEWS @join__graph(name: "reviews" url: "")
}"#;
- let schema_error = crate::spec::Schema::parse(schema_error, &Default::default())
+ let schema_error = crate::spec::Schema::parse(schema_error)
.expect_err("Must have an error because we have one missing subgraph routing url");
if let SchemaError::MissingSubgraphUrl(subgraph) = schema_error {
@@ -968,6 +968,132 @@ fn it_adds_slash_to_custom_health_check_path_if_missing() {
assert_eq!(&conf.health_check.path, "/healthz");
}
+#[test]
+fn it_processes_batching_subgraph_all_enabled_correctly() {
+ let json_config = json!({
+ "enabled": true,
+ "mode": "batch_http_link",
+ "subgraph": {
+ "all": {
+ "enabled": true
+ }
+ }
+ });
+
+ let config: Batching = serde_json::from_value(json_config).unwrap();
+
+ assert!(config.batch_include("anything"));
+}
+
+#[test]
+fn it_processes_batching_subgraph_all_disabled_correctly() {
+ let json_config = json!({
+ "enabled": true,
+ "mode": "batch_http_link",
+ "subgraph": {
+ "all": {
+ "enabled": false
+ }
+ }
+ });
+
+ let config: Batching = serde_json::from_value(json_config).unwrap();
+
+ assert!(!config.batch_include("anything"));
+}
+
+#[test]
+fn it_processes_batching_subgraph_accounts_enabled_correctly() {
+ let json_config = json!({
+ "enabled": true,
+ "mode": "batch_http_link",
+ "subgraph": {
+ "all": {
+ "enabled": false
+ },
+ "subgraphs": {
+ "accounts": {
+ "enabled": true
+ }
+ }
+ }
+ });
+
+ let config: Batching = serde_json::from_value(json_config).unwrap();
+
+ assert!(!config.batch_include("anything"));
+ assert!(config.batch_include("accounts"));
+}
+
+#[test]
+fn it_processes_batching_subgraph_accounts_disabled_correctly() {
+ let json_config = json!({
+ "enabled": true,
+ "mode": "batch_http_link",
+ "subgraph": {
+ "all": {
+ "enabled": false
+ },
+ "subgraphs": {
+ "accounts": {
+ "enabled": false
+ }
+ }
+ }
+ });
+
+ let config: Batching = serde_json::from_value(json_config).unwrap();
+
+ assert!(!config.batch_include("anything"));
+ assert!(!config.batch_include("accounts"));
+}
+
+#[test]
+fn it_processes_batching_subgraph_accounts_override_disabled_correctly() {
+ let json_config = json!({
+ "enabled": true,
+ "mode": "batch_http_link",
+ "subgraph": {
+ "all": {
+ "enabled": true
+ },
+ "subgraphs": {
+ "accounts": {
+ "enabled": false
+ }
+ }
+ }
+ });
+
+ let config: Batching = serde_json::from_value(json_config).unwrap();
+
+ assert!(config.batch_include("anything"));
+ assert!(!config.batch_include("accounts"));
+}
+
+#[test]
+fn it_processes_batching_subgraph_accounts_override_enabled_correctly() {
+ let json_config = json!({
+ "enabled": true,
+ "mode": "batch_http_link",
+ "subgraph": {
+ "all": {
+ "enabled": false
+ },
+ "subgraphs": {
+ "accounts": {
+ "enabled": true
+ }
+ }
+ }
+ });
+
+ let config: Batching = serde_json::from_value(json_config).unwrap();
+
+ assert!(!config.batch_include("anything"));
+ assert!(config.batch_include("accounts"));
+}
+
fn has_field_level_serde_defaults(lines: &[&str], line_number: usize) -> bool {
let serde_field_default = Regex::new(
r#"^\s*#[\s\n]*\[serde\s*\((.*,)?\s*default\s*=\s*"[a-zA-Z0-9_:]+"\s*(,.*)?\)\s*\]\s*$"#,
diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs
index 1c8a19ff3c..224ffc69d2 100644
--- a/apollo-router/src/context/mod.rs
+++ b/apollo-router/src/context/mod.rs
@@ -7,6 +7,7 @@ use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
+use apollo_compiler::validation::Valid;
use apollo_compiler::ExecutableDocument;
use dashmap::mapref::multiple::RefMulti;
use dashmap::mapref::multiple::RefMutMulti;
@@ -253,7 +254,7 @@ impl Context {
/// Read only access to the executable document. This is UNSTABLE and may be changed or removed in future router releases.
/// In addition, ExecutableDocument is UNSTABLE, and may be changed or removed in future apollo-rs releases.
#[doc(hidden)]
- pub fn unsupported_executable_document(&self) -> Option> {
+ pub fn unsupported_executable_document(&self) -> Option>> {
self.extensions()
.lock()
.get::()
@@ -334,8 +335,9 @@ impl Default for BusyTimer {
#[cfg(test)]
mod test {
- use std::sync::Arc;
-
+ use crate::spec::Query;
+ use crate::spec::Schema;
+ use crate::Configuration;
use crate::Context;
#[test]
@@ -413,16 +415,32 @@ mod test {
#[test]
fn test_executable_document_access() {
let c = Context::new();
+ let schema = r#"
+ schema
+ @core(feature: "https://specs.apollo.dev/core/v0.1"),
+ @core(feature: "https://specs.apollo.dev/join/v0.1")
+ {
+ query: Query
+ }
+ type Query {
+ me: String
+ }
+ directive @core(feature: String!) repeatable on SCHEMA
+ directive @join__graph(name: String!, url: String!) on ENUM_VALUE
+
+ enum join__Graph {
+ ACCOUNTS @join__graph(name:"accounts" url: "http://localhost:4001/graphql")
+ INVENTORY
+ @join__graph(name: "inventory", url: "http://localhost:4004/graphql")
+ PRODUCTS
+ @join__graph(name: "products" url: "http://localhost:4003/graphql")
+ REVIEWS @join__graph(name: "reviews" url: "http://localhost:4002/graphql")
+ }"#;
+ let schema = Schema::parse_test(schema, &Default::default()).unwrap();
+ let document =
+ Query::parse_document("{ me }", None, &schema, &Configuration::default()).unwrap();
assert!(c.unsupported_executable_document().is_none());
- c.extensions().lock().insert(Arc::new(
- crate::services::layers::query_analysis::ParsedDocumentInner {
- ast: Default::default(),
- executable: Default::default(),
- hash: Default::default(),
- parse_errors: Default::default(),
- validation_errors: Default::default(),
- },
- ));
+ c.extensions().lock().insert(document);
assert!(c.unsupported_executable_document().is_some());
}
}
diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs
index 9162ec0f0e..7fd226c12c 100644
--- a/apollo-router/src/error.rs
+++ b/apollo-router/src/error.rs
@@ -1,6 +1,8 @@
//! Router errors.
use std::sync::Arc;
+use apollo_compiler::validation::DiagnosticList;
+use apollo_compiler::validation::WithErrors;
use apollo_federation::error::FederationError;
use displaydoc::Display;
use lazy_static::__Deref;
@@ -98,6 +100,15 @@ pub(crate) enum FetchError {
/// could not find path: {reason}
ExecutionPathNotFound { reason: String },
+
+ /// Batching error for '{service}': {reason}
+ SubrequestBatchingError {
+ /// The service for which batch processing failed.
+ service: String,
+
+ /// The reason batch processing failed.
+ reason: String,
+ },
}
impl FetchError {
@@ -171,6 +182,7 @@ impl ErrorExtension for FetchError {
FetchError::ExecutionPathNotFound { .. } => "EXECUTION_PATH_NOT_FOUND",
FetchError::MalformedRequest { .. } => "MALFORMED_REQUEST",
FetchError::MalformedResponse { .. } => "MALFORMED_RESPONSE",
+ FetchError::SubrequestBatchingError { .. } => "SUBREQUEST_BATCHING_ERROR",
}
.to_string()
}
@@ -189,16 +201,23 @@ impl From for FetchError {
pub(crate) enum CacheResolverError {
/// value retrieval failed: {0}
RetrievalError(Arc),
+ /// batch processing failed: {0}
+ BatchingError(String),
}
impl IntoGraphQLErrors for CacheResolverError {
fn into_graphql_errors(self) -> Result, Self> {
- let CacheResolverError::RetrievalError(retrieval_error) = self;
- retrieval_error
- .deref()
- .clone()
- .into_graphql_errors()
- .map_err(|_err| CacheResolverError::RetrievalError(retrieval_error))
+ match self {
+ CacheResolverError::RetrievalError(retrieval_error) => retrieval_error
+ .deref()
+ .clone()
+ .into_graphql_errors()
+ .map_err(|_err| CacheResolverError::RetrievalError(retrieval_error)),
+ CacheResolverError::BatchingError(msg) => Ok(vec![Error::builder()
+ .message(msg)
+ .extension_code("BATCH_PROCESSING_FAILED")
+ .build()]),
+ }
}
}
@@ -260,8 +279,8 @@ pub(crate) enum QueryPlannerError {
/// couldn't instantiate query planner; invalid schema: {0}
SchemaValidationErrors(PlannerErrors),
- /// invalid query
- OperationValidationErrors(Vec),
+ /// invalid query: {0}
+ OperationValidationErrors(ValidationErrors),
/// couldn't plan query: {0}
PlanningErrors(PlanErrors),
@@ -292,6 +311,9 @@ pub(crate) enum QueryPlannerError {
/// Unauthorized field or type
Unauthorized(Vec),
+
+ /// Query planner pool error: {0}
+ PoolProcessing(String),
}
impl IntoGraphQLErrors for Vec {
@@ -320,21 +342,9 @@ impl IntoGraphQLErrors for Vec {
impl IntoGraphQLErrors for QueryPlannerError {
fn into_graphql_errors(self) -> Result, Self> {
match self {
- QueryPlannerError::SpecError(err) => {
- let gql_err = match err.custom_extension_details() {
- Some(extension_details) => Error::builder()
- .message(err.to_string())
- .extension_code(err.extension_code())
- .extensions(extension_details)
- .build(),
- None => Error::builder()
- .message(err.to_string())
- .extension_code(err.extension_code())
- .build(),
- };
-
- Ok(vec![gql_err])
- }
+ QueryPlannerError::SpecError(err) => err
+ .into_graphql_errors()
+ .map_err(QueryPlannerError::SpecError),
QueryPlannerError::SchemaValidationErrors(errs) => errs
.into_graphql_errors()
.map_err(QueryPlannerError::SchemaValidationErrors),
@@ -466,9 +476,7 @@ impl From for QueryPlannerError {
impl From for QueryPlannerError {
fn from(err: ValidationErrors) -> Self {
- QueryPlannerError::OperationValidationErrors(
- err.errors.iter().map(|e| e.to_json()).collect(),
- )
+ QueryPlannerError::OperationValidationErrors(ValidationErrors { errors: err.errors })
}
}
@@ -548,7 +556,7 @@ pub(crate) enum SchemaError {
/// Collection of schema validation errors.
#[derive(Debug)]
pub(crate) struct ParseErrors {
- pub(crate) errors: apollo_compiler::validation::DiagnosticList,
+ pub(crate) errors: DiagnosticList,
}
impl std::fmt::Display for ParseErrors {
@@ -568,13 +576,7 @@ impl std::fmt::Display for ParseErrors {
}
}
-/// Collection of schema validation errors.
-#[derive(Debug)]
-pub(crate) struct ValidationErrors {
- pub(crate) errors: apollo_compiler::validation::DiagnosticList,
-}
-
-impl IntoGraphQLErrors for ValidationErrors {
+impl IntoGraphQLErrors for ParseErrors {
fn into_graphql_errors(self) -> Result, Self> {
Ok(self
.errors
@@ -593,6 +595,37 @@ impl IntoGraphQLErrors for ValidationErrors {
})
.unwrap_or_default(),
)
+ .extension_code("GRAPHQL_PARSING_FAILED")
+ .build()
+ })
+ .collect())
+ }
+}
+
+/// Collection of schema validation errors.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub(crate) struct ValidationErrors {
+ pub(crate) errors: Vec,
+}
+
+impl IntoGraphQLErrors for ValidationErrors {
+ fn into_graphql_errors(self) -> Result, Self> {
+ Ok(self
+ .errors
+ .iter()
+ .map(|diagnostic| {
+ Error::builder()
+ .message(diagnostic.message.to_string())
+ .locations(
+ diagnostic
+ .locations
+ .iter()
+ .map(|loc| ErrorLocation {
+ line: loc.line as u32,
+ column: loc.column as u32,
+ })
+ .collect(),
+ )
.extension_code("GRAPHQL_VALIDATION_FAILED")
.build()
})
@@ -600,22 +633,53 @@ impl IntoGraphQLErrors for ValidationErrors {
}
}
+impl From for ValidationErrors {
+ fn from(errors: DiagnosticList) -> Self {
+ Self {
+ errors: errors.iter().map(|e| e.unstable_to_json_compat()).collect(),
+ }
+ }
+}
+
+impl From> for ValidationErrors {
+ fn from(WithErrors { errors, .. }: WithErrors) -> Self {
+ errors.into()
+ }
+}
+
impl std::fmt::Display for ValidationErrors {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for (index, error) in self.errors.iter().enumerate() {
if index > 0 {
f.write_str("\n")?;
}
- if let Some(location) = error.get_line_column() {
- write!(f, "[{}:{}] {}", location.line, location.column, error.error)?;
+ if let Some(location) = error.locations.first() {
+ write!(
+ f,
+ "[{}:{}] {}",
+ location.line, location.column, error.message
+ )?;
} else {
- write!(f, "{}", error.error)?;
+ write!(f, "{}", error.message)?;
}
}
Ok(())
}
}
+/// Error during subgraph batch processing
+#[derive(Debug, Error, Display)]
+pub(crate) enum SubgraphBatchingError {
+ /// Sender unavailable
+ SenderUnavailable,
+ /// Request does not have a subgraph name
+ MissingSubgraphName,
+ /// Requests is empty
+ RequestsIsEmpty,
+ /// Batch processing failed: {0}
+ ProcessingFailed(String),
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/apollo-router/src/json_ext.rs b/apollo-router/src/json_ext.rs
index 9967114741..b8cf588187 100644
--- a/apollo-router/src/json_ext.rs
+++ b/apollo-router/src/json_ext.rs
@@ -37,6 +37,15 @@ macro_rules! extract_key_value_from_object {
}};
}
+macro_rules! ensure_array {
+ ($value:expr) => {{
+ match $value {
+ crate::json_ext::Value::Array(a) => Ok(a),
+ _ => Err("invalid type, expected an array"),
+ }
+ }};
+}
+
macro_rules! ensure_object {
($value:expr) => {{
match $value {
diff --git a/apollo-router/src/lib.rs b/apollo-router/src/lib.rs
index cef6ac055f..4dc56b4ea9 100644
--- a/apollo-router/src/lib.rs
+++ b/apollo-router/src/lib.rs
@@ -50,7 +50,9 @@ pub mod plugin;
#[macro_use]
pub(crate) mod metrics;
+mod apollo_studio_interop;
pub(crate) mod axum_factory;
+mod batching;
mod cache;
mod configuration;
mod context;
diff --git a/apollo-router/src/logging/mod.rs b/apollo-router/src/logging/mod.rs
index 6948bb63d3..5d0b75879d 100644
--- a/apollo-router/src/logging/mod.rs
+++ b/apollo-router/src/logging/mod.rs
@@ -39,6 +39,9 @@ pub(crate) mod test {
use serde_json::Value;
use tracing_core::LevelFilter;
use tracing_core::Subscriber;
+ use tracing_subscriber::layer::SubscriberExt;
+
+ use crate::plugins::telemetry::dynamic_attribute::DynAttributeLayer;
pub(crate) struct SnapshotSubscriber {
buffer: Arc>>,
@@ -99,15 +102,18 @@ pub(crate) mod test {
assertion,
};
- tracing_subscriber::fmt()
- .json()
- .with_max_level(level)
- .without_time()
- .with_target(false)
- .with_file(false)
- .with_line_number(false)
- .with_writer(Mutex::new(collector))
- .finish()
+ tracing_subscriber::registry::Registry::default()
+ .with(level)
+ .with(DynAttributeLayer::new())
+ .with(
+ tracing_subscriber::fmt::Layer::default()
+ .json()
+ .without_time()
+ .with_target(false)
+ .with_file(false)
+ .with_line_number(false)
+ .with_writer(Mutex::new(collector)),
+ )
}
}
}
diff --git a/apollo-router/src/metrics/mod.rs b/apollo-router/src/metrics/mod.rs
index db3487f7d1..ab270b2f1e 100644
--- a/apollo-router/src/metrics/mod.rs
+++ b/apollo-router/src/metrics/mod.rs
@@ -1065,7 +1065,6 @@ macro_rules! assert_non_zero_metrics_snapshot {
let metrics = crate::metrics::collect_metrics();
insta::assert_yaml_snapshot!(&metrics.non_zero());
});
-
};
() => {
insta::with_settings!({sort_maps => true}, {
diff --git a/apollo-router/src/orbiter/mod.rs b/apollo-router/src/orbiter/mod.rs
index 9a788099f7..975e914691 100644
--- a/apollo-router/src/orbiter/mod.rs
+++ b/apollo-router/src/orbiter/mod.rs
@@ -383,7 +383,7 @@ mod test {
let config = Configuration::from_str(include_str!("testdata/redaction.router.yaml"))
.expect("config must be valid");
let schema_string = include_str!("../testdata/minimal_supergraph.graphql");
- let schema = crate::spec::Schema::parse(schema_string, &config).unwrap();
+ let schema = crate::spec::Schema::parse(schema_string).unwrap();
let report = create_report(Arc::new(config), Arc::new(schema));
insta::with_settings!({sort_maps => true}, {
assert_yaml_snapshot!(report, {
@@ -401,7 +401,7 @@ mod test {
.expect("config must be valid");
config.validated_yaml = Some(Value::Null);
let schema_string = include_str!("../testdata/minimal_supergraph.graphql");
- let schema = crate::spec::Schema::parse(schema_string, &config).unwrap();
+ let schema = crate::spec::Schema::parse(schema_string).unwrap();
let report = create_report(Arc::new(config), Arc::new(schema));
insta::with_settings!({sort_maps => true}, {
assert_yaml_snapshot!(report, {
@@ -419,7 +419,7 @@ mod test {
.expect("config must be valid");
config.validated_yaml = Some(json!({"garbage": "garbage"}));
let schema_string = include_str!("../testdata/minimal_supergraph.graphql");
- let schema = crate::spec::Schema::parse(schema_string, &config).unwrap();
+ let schema = crate::spec::Schema::parse(schema_string).unwrap();
let report = create_report(Arc::new(config), Arc::new(schema));
insta::with_settings!({sort_maps => true}, {
assert_yaml_snapshot!(report, {
diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs
index 85445a652c..355ffd4b5c 100644
--- a/apollo-router/src/plugins/authorization/mod.rs
+++ b/apollo-router/src/plugins/authorization/mod.rs
@@ -39,10 +39,10 @@ use crate::query_planner::FilteredQuery;
use crate::query_planner::QueryKey;
use crate::register_plugin;
use crate::services::execution;
+use crate::services::layers::query_analysis::ParsedDocumentInner;
use crate::services::supergraph;
use crate::spec::query::transform;
use crate::spec::query::traverse;
-use crate::spec::Query;
use crate::spec::Schema;
use crate::spec::SpecError;
use crate::Configuration;
@@ -175,14 +175,11 @@ impl AuthorizationPlugin {
}
pub(crate) fn query_analysis(
- query: &str,
+ doc: &ParsedDocumentInner,
operation_name: Option<&str>,
schema: &Schema,
- configuration: &Configuration,
context: &Context,
- ) -> Result<(), SpecError> {
- let doc = Query::parse_document(query, operation_name, schema, configuration)?;
-
+ ) {
let CacheKeyMetadata {
is_authenticated,
scopes,
@@ -206,8 +203,6 @@ impl AuthorizationPlugin {
policies.into_iter().map(|policy| (policy, None)).collect();
context.insert(REQUIRED_POLICIES_KEY, policies).unwrap();
}
-
- Ok(())
}
pub(crate) fn generate_cache_metadata(
@@ -442,7 +437,7 @@ impl AuthorizationPlugin {
AuthenticatedVisitor::new(&schema.definitions, doc, &schema.implementers_map, dry_run)
{
let modified_query = transform::document(&mut visitor, doc)
- .map_err(|e| SpecError::ParsingError(e.to_string()))?;
+ .map_err(|e| SpecError::TransformError(e.to_string()))?;
if visitor.query_requires_authentication {
if is_authenticated {
@@ -481,7 +476,7 @@ impl AuthorizationPlugin {
dry_run,
) {
let modified_query = transform::document(&mut visitor, doc)
- .map_err(|e| SpecError::ParsingError(e.to_string()))?;
+ .map_err(|e| SpecError::TransformError(e.to_string()))?;
if visitor.query_requires_scopes {
tracing::debug!("the query required scopes, the requests present scopes: {scopes:?}, modified query:\n{modified_query}\nunauthorized paths: {:?}",
visitor
@@ -516,7 +511,7 @@ impl AuthorizationPlugin {
dry_run,
) {
let modified_query = transform::document(&mut visitor, doc)
- .map_err(|e| SpecError::ParsingError(e.to_string()))?;
+ .map_err(|e| SpecError::TransformError(e.to_string()))?;
if visitor.query_requires_policies {
tracing::debug!("the query required policies, the requests present policies: {policies:?}, modified query:\n{modified_query}\nunauthorized paths: {:?}",
diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs
index c030b4d62b..0ed337c6d5 100644
--- a/apollo-router/src/plugins/cache/entity.rs
+++ b/apollo-router/src/plugins/cache/entity.rs
@@ -153,9 +153,11 @@ impl Plugin for EntityCache {
fn supergraph_service(&self, service: supergraph::BoxService) -> supergraph::BoxService {
ServiceBuilder::new()
.map_response(|mut response: supergraph::Response| {
- if let Some(cache_control) =
- response.context.extensions().lock().get::()
- {
+ if let Some(cache_control) = {
+ let lock = response.context.extensions().lock();
+ let cache_control = lock.get::().cloned();
+ cache_control
+ } {
let _ = cache_control.to_headers(response.response.headers_mut());
}
diff --git a/apollo-router/src/plugins/coprocessor/mod.rs b/apollo-router/src/plugins/coprocessor/mod.rs
index a79ff4da2b..2914f20ec5 100644
--- a/apollo-router/src/plugins/coprocessor/mod.rs
+++ b/apollo-router/src/plugins/coprocessor/mod.rs
@@ -359,11 +359,12 @@ impl RouterStage {
);
error
});
- tracing::info!(
- monotonic_counter.apollo.router.operations.coprocessor = 1u64,
- coprocessor.stage = %PipelineStep::RouterRequest,
- coprocessor.succeeded = succeeded,
- "Total operations with co-processors enabled"
+ u64_counter!(
+ "apollo.router.operations.coprocessor",
+ "Total operations with co-processors enabled",
+ 1,
+ "coprocessor.stage" = PipelineStep::RouterRequest,
+ "coprocessor.succeeded" = succeeded
);
result
}
@@ -397,11 +398,12 @@ impl RouterStage {
);
error
});
- tracing::info!(
- monotonic_counter.apollo.router.operations.coprocessor = 1u64,
- coprocessor.stage = %PipelineStep::RouterResponse,
- coprocessor.succeeded = succeeded,
- "Total operations with co-processors enabled"
+ u64_counter!(
+ "apollo.router.operations.coprocessor",
+ "Total operations with co-processors enabled",
+ 1,
+ "coprocessor.stage" = PipelineStep::RouterResponse,
+ "coprocessor.succeeded" = succeeded
);
result
}
@@ -491,11 +493,12 @@ impl SubgraphStage {
);
error
});
- tracing::info!(
- monotonic_counter.apollo.router.operations.coprocessor = 1u64,
- coprocessor.stage = %PipelineStep::SubgraphRequest,
- coprocessor.succeeded = succeeded,
- "Total operations with co-processors enabled"
+ u64_counter!(
+ "apollo.router.operations.coprocessor",
+ "Total operations with co-processors enabled",
+ 1,
+ "coprocessor.stage" = PipelineStep::SubgraphRequest,
+ "coprocessor.succeeded" = succeeded
);
result
}
@@ -530,11 +533,12 @@ impl SubgraphStage {
);
error
});
- tracing::info!(
- monotonic_counter.apollo.router.operations.coprocessor = 1u64,
- coprocessor.stage = %PipelineStep::SubgraphResponse,
- coprocessor.succeeded = succeeded,
- "Total operations with co-processors enabled"
+ u64_counter!(
+ "apollo.router.operations.coprocessor",
+ "Total operations with co-processors enabled",
+ 1,
+ "coprocessor.stage" = PipelineStep::SubgraphResponse,
+ "coprocessor.succeeded" = succeeded
);
result
}
diff --git a/apollo-router/src/plugins/coprocessor/supergraph.rs b/apollo-router/src/plugins/coprocessor/supergraph.rs
index 7e6e313f42..79202a0eb0 100644
--- a/apollo-router/src/plugins/coprocessor/supergraph.rs
+++ b/apollo-router/src/plugins/coprocessor/supergraph.rs
@@ -105,11 +105,12 @@ impl SupergraphStage {
);
error
});
- tracing::info!(
- monotonic_counter.apollo.router.operations.coprocessor = 1u64,
- coprocessor.stage = %PipelineStep::SupergraphRequest,
- coprocessor.succeeded = succeeded,
- "Total operations with co-processors enabled"
+ u64_counter!(
+ "apollo.router.operations.coprocessor",
+ "Total operations with co-processors enabled",
+ 1,
+ "coprocessor.stage" = PipelineStep::SupergraphRequest,
+ "coprocessor.succeeded" = succeeded
);
result
}
@@ -144,11 +145,12 @@ impl SupergraphStage {
);
error
});
- tracing::info!(
- monotonic_counter.apollo.router.operations.coprocessor = 1u64,
- coprocessor.stage = %PipelineStep::SupergraphResponse,
- coprocessor.succeeded = succeeded,
- "Total operations with co-processors enabled"
+ u64_counter!(
+ "apollo.router.operations.coprocessor",
+ "Total operations with co-processors enabled",
+ 1,
+ "coprocessor.stage" = PipelineStep::SupergraphResponse,
+ "coprocessor.succeeded" = succeeded
);
result
}
diff --git a/apollo-router/src/plugins/demand_control/basic_cost_calculator.rs b/apollo-router/src/plugins/demand_control/basic_cost_calculator.rs
index 78315277d3..1614ec6550 100644
--- a/apollo-router/src/plugins/demand_control/basic_cost_calculator.rs
+++ b/apollo-router/src/plugins/demand_control/basic_cost_calculator.rs
@@ -50,7 +50,7 @@ impl BasicCostCalculator {
/// bound for cost anyway.
fn score_field(
field: &Field,
- parent_type_name: Option<&NamedType>,
+ parent_type_name: &NamedType,
schema: &Valid,
) -> Result {
if BasicCostCalculator::skipped_by_directives(field) {
@@ -77,7 +77,7 @@ impl BasicCostCalculator {
};
type_cost += BasicCostCalculator::score_selection_set(
&field.selection_set,
- Some(field.ty().inner_named_type()),
+ field.ty().inner_named_type(),
schema,
)?;
@@ -112,7 +112,7 @@ impl BasicCostCalculator {
fn score_inline_fragment(
inline_fragment: &InlineFragment,
- parent_type: Option<&NamedType>,
+ parent_type: &NamedType,
schema: &Valid,
) -> Result {
BasicCostCalculator::score_selection_set(
@@ -127,9 +127,17 @@ impl BasicCostCalculator {
schema: &Valid,
) -> Result {
let mut cost = if operation.is_mutation() { 10.0 } else { 0.0 };
+
+ let Some(root_type_name) = schema.root_operation(operation.operation_type) else {
+ return Err(DemandControlError::QueryParseFailure(format!(
+ "Cannot cost {} operation because the schema does not support this root type",
+ operation.operation_type
+ )));
+ };
+
cost += BasicCostCalculator::score_selection_set(
&operation.selection_set,
- operation.name.as_ref(),
+ root_type_name,
schema,
)?;
@@ -138,21 +146,23 @@ impl BasicCostCalculator {
fn score_selection(
selection: &Selection,
- parent_type: Option<&NamedType>,
+ parent_type: &NamedType,
schema: &Valid,
) -> Result {
match selection {
Selection::Field(f) => BasicCostCalculator::score_field(f, parent_type, schema),
Selection::FragmentSpread(s) => BasicCostCalculator::score_fragment_spread(s),
- Selection::InlineFragment(i) => {
- BasicCostCalculator::score_inline_fragment(i, parent_type, schema)
- }
+ Selection::InlineFragment(i) => BasicCostCalculator::score_inline_fragment(
+ i,
+ i.type_condition.as_ref().unwrap_or(parent_type),
+ schema,
+ ),
}
}
fn score_selection_set(
selection_set: &SelectionSet,
- parent_type_name: Option<&NamedType>,
+ parent_type_name: &NamedType,
schema: &Valid,
) -> Result {
let mut cost = 0.0;
@@ -331,21 +341,44 @@ mod tests {
use crate::Configuration;
use crate::Context;
+ fn parse_schema_and_operation(
+ schema_str: &str,
+ query_str: &str,
+ config: &Configuration,
+ ) -> (spec::Schema, ParsedDocument) {
+ let schema = spec::Schema::parse_test(schema_str, config).unwrap();
+ let query = Query::parse_document(query_str, None, &schema, config).unwrap();
+ (schema, query)
+ }
+
+ /// Estimate cost of an operation executed on a supergraph.
fn estimated_cost(schema_str: &str, query_str: &str) -> f64 {
- let schema = Schema::parse_and_validate(schema_str, "").unwrap();
- let query = ExecutableDocument::parse(&schema, query_str, "").unwrap();
+ let (schema, query) =
+ parse_schema_and_operation(schema_str, query_str, &Default::default());
+ BasicCostCalculator::estimated(&query.executable, &schema.definitions).unwrap()
+ }
+
+ /// Estimate cost of an operation on a plain, non-federated schema.
+ fn basic_estimated_cost(schema_str: &str, query_str: &str) -> f64 {
+ let schema =
+ apollo_compiler::Schema::parse_and_validate(schema_str, "schema.graphqls").unwrap();
+ let query = apollo_compiler::ExecutableDocument::parse_and_validate(
+ &schema,
+ query_str,
+ "query.graphql",
+ )
+ .unwrap();
BasicCostCalculator::estimated(&query, &schema).unwrap()
}
async fn planned_cost(schema_str: &str, query_str: &str) -> f64 {
let config: Arc = Arc::new(Default::default());
+ let (_schema, query) = parse_schema_and_operation(schema_str, query_str, &config);
+
let mut planner = BridgeQueryPlanner::new(schema_str.to_string(), config.clone())
.await
.unwrap();
- let schema = spec::Schema::parse(schema_str, &config).unwrap();
- let query = Query::parse_document(query_str, None, &schema, &config).unwrap();
-
let ctx = Context::new();
ctx.extensions().lock().insert::(query);
@@ -366,10 +399,10 @@ mod tests {
}
fn actual_cost(schema_str: &str, query_str: &str, response_bytes: &'static [u8]) -> f64 {
- let schema = Schema::parse_and_validate(schema_str, "").unwrap();
- let query = ExecutableDocument::parse(&schema, query_str, "").unwrap();
+ let (_schema, query) =
+ parse_schema_and_operation(schema_str, query_str, &Default::default());
let response = Response::from_bytes("test", Bytes::from(response_bytes)).unwrap();
- BasicCostCalculator::actual(&query, &response).unwrap()
+ BasicCostCalculator::actual(&query.executable, &response).unwrap()
}
#[test]
@@ -377,7 +410,7 @@ mod tests {
let schema = include_str!("./fixtures/basic_schema.graphql");
let query = include_str!("./fixtures/basic_query.graphql");
- assert_eq!(estimated_cost(schema, query), 0.0)
+ assert_eq!(basic_estimated_cost(schema, query), 0.0)
}
#[test]
@@ -385,7 +418,7 @@ mod tests {
let schema = include_str!("./fixtures/basic_schema.graphql");
let query = include_str!("./fixtures/basic_mutation.graphql");
- assert_eq!(estimated_cost(schema, query), 10.0)
+ assert_eq!(basic_estimated_cost(schema, query), 10.0)
}
#[test]
@@ -393,7 +426,7 @@ mod tests {
let schema = include_str!("./fixtures/basic_schema.graphql");
let query = include_str!("./fixtures/basic_object_query.graphql");
- assert_eq!(estimated_cost(schema, query), 1.0)
+ assert_eq!(basic_estimated_cost(schema, query), 1.0)
}
#[test]
@@ -401,7 +434,7 @@ mod tests {
let schema = include_str!("./fixtures/basic_schema.graphql");
let query = include_str!("./fixtures/basic_interface_query.graphql");
- assert_eq!(estimated_cost(schema, query), 1.0)
+ assert_eq!(basic_estimated_cost(schema, query), 1.0)
}
#[test]
@@ -409,7 +442,7 @@ mod tests {
let schema = include_str!("./fixtures/basic_schema.graphql");
let query = include_str!("./fixtures/basic_union_query.graphql");
- assert_eq!(estimated_cost(schema, query), 1.0)
+ assert_eq!(basic_estimated_cost(schema, query), 1.0)
}
#[test]
@@ -417,7 +450,7 @@ mod tests {
let schema = include_str!("./fixtures/basic_schema.graphql");
let query = include_str!("./fixtures/basic_object_list_query.graphql");
- assert_eq!(estimated_cost(schema, query), 100.0)
+ assert_eq!(basic_estimated_cost(schema, query), 100.0)
}
#[test]
@@ -425,7 +458,7 @@ mod tests {
let schema = include_str!("./fixtures/basic_schema.graphql");
let query = include_str!("./fixtures/basic_scalar_list_query.graphql");
- assert_eq!(estimated_cost(schema, query), 0.0)
+ assert_eq!(basic_estimated_cost(schema, query), 0.0)
}
#[test]
@@ -433,7 +466,7 @@ mod tests {
let schema = include_str!("./fixtures/basic_schema.graphql");
let query = include_str!("./fixtures/basic_nested_list_query.graphql");
- assert_eq!(estimated_cost(schema, query), 10100.0)
+ assert_eq!(basic_estimated_cost(schema, query), 10100.0)
}
#[test]
@@ -441,7 +474,7 @@ mod tests {
let schema = include_str!("./fixtures/basic_schema.graphql");
let query = include_str!("./fixtures/basic_skipped_query.graphql");
- assert_eq!(estimated_cost(schema, query), 0.0)
+ assert_eq!(basic_estimated_cost(schema, query), 0.0)
}
#[test]
@@ -449,7 +482,7 @@ mod tests {
let schema = include_str!("./fixtures/basic_schema.graphql");
let query = include_str!("./fixtures/basic_excluded_query.graphql");
- assert_eq!(estimated_cost(schema, query), 0.0)
+ assert_eq!(basic_estimated_cost(schema, query), 0.0)
}
#[test(tokio::test)]
diff --git a/apollo-router/src/plugins/demand_control/directives.rs b/apollo-router/src/plugins/demand_control/directives.rs
index 2fb9a83c99..73528107e9 100644
--- a/apollo-router/src/plugins/demand_control/directives.rs
+++ b/apollo-router/src/plugins/demand_control/directives.rs
@@ -32,29 +32,39 @@ pub(super) struct RequiresDirective {
impl RequiresDirective {
pub(super) fn from_field(
field: &Field,
- parent_type_name: Option<&NamedType>,
+ parent_type_name: &NamedType,
schema: &Valid,
) -> Result