From bccbf97abb8da3dd1452d88ac8f81961fcae1af6 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Mon, 8 Apr 2024 12:42:30 +0100 Subject: [PATCH 01/46] Add metric for use of custom plugins (#4923) Adds a metric for configuration of custom plugins. This will help inform us of how may users are writing custom rust based plugins. --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. Co-authored-by: bryn --- apollo-router/src/configuration/metrics.rs | 59 ++++++++++++++++++- ...uration__metrics__test__custom_plugin.snap | 9 +++ ...cs__test__ignore_cloud_router_plugins.snap | 9 +++ 3 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__custom_plugin.snap create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__ignore_cloud_router_plugins.snap diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 093675a010..a80826de60 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -13,6 +13,7 @@ use crate::uplink::license_enforcement::LicenseState; use crate::Configuration; type InstrumentMap = HashMap)>; + pub(crate) struct Metrics { _instruments: Vec>, } @@ -44,7 +45,7 @@ impl Metrics { .unwrap_or(&serde_json::Value::Null), ); data.populate_license_instrument(license_state); - + data.populate_user_plugins_instrument(configuration); data.into() } } @@ -406,7 +407,37 @@ impl InstrumentData { ), ); } + + pub(crate) fn populate_user_plugins_instrument(&mut self, configuration: &Configuration) { + println!( + "custom plugins: {}", + configuration + .plugins + .plugins + .as_ref() + .map(|configuration| configuration.len()) + .unwrap_or_default() as u64 + ); + self.data.insert( + "apollo.router.config.custom_plugins".to_string(), + ( + configuration + .plugins + .plugins + .as_ref() + .map(|configuration| { + configuration + .keys() + .filter(|k| !k.starts_with("cloud_router.")) + .count() + }) + .unwrap_or_default() as u64, + [].into(), + ), + ); + } } + impl From for Metrics { fn from(data: InstrumentData) -> Self { Metrics { @@ -433,6 +464,7 @@ impl From for Metrics { #[cfg(test)] mod test { use rust_embed::RustEmbed; + use serde_json::json; use crate::configuration::metrics::InstrumentData; use crate::configuration::metrics::Metrics; @@ -482,4 +514,29 @@ mod test { let _metrics: Metrics = data.into(); assert_non_zero_metrics_snapshot!(); } + + #[test] + fn test_custom_plugin() { + let mut configuration = crate::Configuration::default(); + let mut custom_plugins = serde_json::Map::new(); + custom_plugins.insert("name".to_string(), json!("test")); + configuration.plugins.plugins = Some(custom_plugins); + let mut data = InstrumentData::default(); + data.populate_user_plugins_instrument(&configuration); + let _metrics: Metrics = data.into(); + assert_non_zero_metrics_snapshot!(); + } + + #[test] + fn test_ignore_cloud_router_plugins() { + let mut configuration = crate::Configuration::default(); + let mut custom_plugins = serde_json::Map::new(); + custom_plugins.insert("name".to_string(), json!("test")); + custom_plugins.insert("cloud_router.".to_string(), json!("test")); + configuration.plugins.plugins = Some(custom_plugins); + let mut data = InstrumentData::default(); + data.populate_user_plugins_instrument(&configuration); + let _metrics: Metrics = data.into(); + assert_non_zero_metrics_snapshot!(); + } } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__custom_plugin.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__custom_plugin.snap new file mode 100644 index 0000000000..7ede11d1b7 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__custom_plugin.snap @@ -0,0 +1,9 @@ +--- +source: apollo-router/src/configuration/metrics.rs +expression: "&metrics.non_zero()" +--- +- name: apollo.router.config.custom_plugins + data: + datapoints: + - value: 1 + attributes: {} diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__ignore_cloud_router_plugins.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__ignore_cloud_router_plugins.snap new file mode 100644 index 0000000000..7ede11d1b7 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__ignore_cloud_router_plugins.snap @@ -0,0 +1,9 @@ +--- +source: apollo-router/src/configuration/metrics.rs +expression: "&metrics.non_zero()" +--- +- name: apollo.router.config.custom_plugins + data: + datapoints: + - value: 1 + attributes: {} From 38f84e7cb088cacb18932c24025dfdddcc1f4fe9 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Mon, 8 Apr 2024 18:59:06 +0100 Subject: [PATCH 02/46] Remove println accidentally added in 4923 (#4927) #4923 added a println that should not have been there. --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. Co-authored-by: bryn --- apollo-router/src/configuration/metrics.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index a80826de60..54cdfac600 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -409,15 +409,6 @@ impl InstrumentData { } pub(crate) fn populate_user_plugins_instrument(&mut self, configuration: &Configuration) { - println!( - "custom plugins: {}", - configuration - .plugins - .plugins - .as_ref() - .map(|configuration| configuration.len()) - .unwrap_or_default() as u64 - ); self.data.insert( "apollo.router.config.custom_plugins".to_string(), ( From 181e8f72a29a4742e65c62b8175e7264ddafd304 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Tue, 9 Apr 2024 09:54:11 +0100 Subject: [PATCH 03/46] Enable instruments documentation (#4929) Now that custom instruments have been added we need to show the docs. --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. Co-authored-by: bryn --- docs/source/config.json | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/source/config.json b/docs/source/config.json index e841d24609..1b247ec085 100644 --- a/docs/source/config.json +++ b/docs/source/config.json @@ -55,7 +55,7 @@ [ "enterprise" ] - ], + ], "Subgraph Authentication": "/configuration/authn-subgraph", "Operation limits": [ "/configuration/operation-limits", @@ -77,10 +77,10 @@ "@defer support": "/executing-operations/defer-support", "Request format": "/executing-operations/requests", "Query batching": [ - "/executing-operations/query-batching", - [ - "experimental" - ] + "/executing-operations/query-batching", + [ + "experimental" + ] ], "GraphQL Subscriptions": { "Subscriptions setup": [ @@ -128,8 +128,9 @@ "Zipkin": "/configuration/telemetry/exporters/tracing/zipkin" }, "Instrumentation": { - "Spans" : "/configuration/telemetry/instrumentation/spans", - "Selectors" : "/configuration/telemetry/instrumentation/selectors", + "Instruments": "/configuration/telemetry/instrumentation/instruments", + "Spans": "/configuration/telemetry/instrumentation/spans", + "Selectors": "/configuration/telemetry/instrumentation/selectors", "Standard attributes": "/configuration/telemetry/instrumentation/standard-attributes", "Standard instruments": "/configuration/telemetry/instrumentation/standard-instruments" } From 99824bf8deefc817e6d705aba9ad839952df18a9 Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Tue, 9 Apr 2024 15:42:47 +0200 Subject: [PATCH 04/46] Experimental: Introduce a pool of query planners (#4897) ### Experimental: Introduce a pool of query planners ([PR #4897](https://github.com/apollographql/router/pull/4897)) The router supports a new experimental feature: a pool of query planners to parallelize query planning. You can configure query planner pools with the `supergraph.query_planner.experimental_available_parallelism` option: ```yaml supergraph: query_planner: experimental_available_parallelism: auto # number of available cpus ``` Its value is the number of query planners that run in parallel, and its default value is `1`. You can set it to the special value `auto` to automatically set it equal to the number of available CPUs. You can discuss and comment about query planner pools in this [GitHub discussion](https://github.com/apollographql/router/discussions/4917). By [@xuorig](https://github.com/xuorig) and [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/4897 --------- Co-authored-by: Marc-Andre Giroux Co-authored-by: Edward Huang --- .../exp_carton_ginger_magnet_beacon.md | 17 ++ Cargo.lock | 7 +- apollo-router/Cargo.toml | 4 + apollo-router/examples/.skipconfigvalidation | 0 apollo-router/examples/planner.rs | 66 ++++++ apollo-router/examples/router.yaml | 13 ++ apollo-router/src/axum_factory/tests.rs | 13 +- apollo-router/src/configuration/metrics.rs | 53 +++++ apollo-router/src/configuration/mod.rs | 44 ++++ ..._planner_parallelism_auto.router.yaml.snap | 10 + ...lanner_parallelism_static.router.yaml.snap | 10 + ...nfiguration__tests__schema_generation.snap | 35 ++- ...query_planner_parallelism_auto.router.yaml | 3 + ...ery_planner_parallelism_static.router.yaml | 3 + apollo-router/src/error.rs | 3 + apollo-router/src/metrics/mod.rs | 1 - apollo-router/src/plugins/cache/entity.rs | 8 +- .../src/plugins/include_subgraph_errors.rs | 13 +- .../src/plugins/record_replay/record.rs | 2 +- apollo-router/src/plugins/telemetry/mod.rs | 15 +- .../src/plugins/traffic_shaping/mod.rs | 13 +- .../src/query_planner/bridge_query_planner.rs | 9 +- .../bridge_query_planner_pool.rs | 203 ++++++++++++++++++ .../query_planner/caching_query_planner.rs | 30 ++- apollo-router/src/query_planner/mod.rs | 2 + apollo-router/src/router_factory.rs | 35 ++- .../layers/allow_only_http_post_mutations.rs | 10 +- .../services/layers/content_negotiation.rs | 11 +- .../src/services/supergraph/service.rs | 31 +-- 29 files changed, 584 insertions(+), 80 deletions(-) create mode 100644 .changesets/exp_carton_ginger_magnet_beacon.md create mode 100644 apollo-router/examples/.skipconfigvalidation create mode 100644 apollo-router/examples/planner.rs create mode 100644 apollo-router/examples/router.yaml create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap create mode 100644 apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml create mode 100644 apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml create mode 100644 apollo-router/src/query_planner/bridge_query_planner_pool.rs diff --git a/.changesets/exp_carton_ginger_magnet_beacon.md b/.changesets/exp_carton_ginger_magnet_beacon.md new file mode 100644 index 0000000000..1d8ad7aef7 --- /dev/null +++ b/.changesets/exp_carton_ginger_magnet_beacon.md @@ -0,0 +1,17 @@ +### Experimental: Introduce a pool of query planners ([PR #4897](https://github.com/apollographql/router/pull/4897)) + +The router supports a new experimental feature: a pool of query planners to parallelize query planning. + +You can configure query planner pools with the `supergraph.query_planner.experimental_parallelism` option: + +```yaml +supergraph: + query_planner: + experimental_parallelism: auto # number of available cpus +``` + +Its value is the number of query planners that run in parallel, and its default value is `1`. You can set it to the special value `auto` to automatically set it equal to the number of available CPUs. + +You can discuss and comment about query planner pools in this [GitHub discussion](https://github.com/apollographql/router/discussions/4917). + +By [@xuorig](https://github.com/xuorig) and [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/4897 diff --git a/Cargo.lock b/Cargo.lock index fba4a4f6f8..352d10ddfc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -231,7 +231,7 @@ dependencies = [ "petgraph", "salsa", "serde_json", - "strum 0.26.1", + "strum 0.26.2", "strum_macros 0.26.1", "thiserror", "url", @@ -258,6 +258,7 @@ dependencies = [ "apollo-federation", "arc-swap", "askama", + "async-channel 1.9.0", "async-compression", "async-trait", "aws-config", @@ -6536,9 +6537,9 @@ dependencies = [ [[package]] name = "strum" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" [[package]] name = "strum_macros" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 2eb77333f8..c4ae37d890 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -67,6 +67,7 @@ anyhow = "1.0.80" apollo-compiler = "=1.0.0-beta.14" apollo-federation = "=0.0.9" arc-swap = "1.6.0" +async-channel = "1.9.0" async-compression = { version = "0.4.6", features = [ "tokio", "brotli", @@ -350,3 +351,6 @@ harness = false [[bench]] name = "deeply_nested" harness = false + +[[example]] +name = "planner" diff --git a/apollo-router/examples/.skipconfigvalidation b/apollo-router/examples/.skipconfigvalidation new file mode 100644 index 0000000000..e69de29bb2 diff --git a/apollo-router/examples/planner.rs b/apollo-router/examples/planner.rs new file mode 100644 index 0000000000..43e53e9261 --- /dev/null +++ b/apollo-router/examples/planner.rs @@ -0,0 +1,66 @@ +use std::ops::ControlFlow; + +use anyhow::Result; +use apollo_router::layers::ServiceBuilderExt; +use apollo_router::plugin::Plugin; +use apollo_router::plugin::PluginInit; +use apollo_router::register_plugin; +use apollo_router::services::execution; +use apollo_router::services::supergraph; +use tower::BoxError; +use tower::ServiceBuilder; +use tower::ServiceExt; + +#[derive(Debug)] +struct DoNotExecute { + #[allow(dead_code)] + configuration: bool, +} + +#[async_trait::async_trait] +impl Plugin for DoNotExecute { + type Config = bool; + + async fn new(init: PluginInit) -> Result { + Ok(Self { + configuration: init.config, + }) + } + + fn supergraph_service(&self, service: supergraph::BoxService) -> supergraph::BoxService { + ServiceBuilder::new() + .map_request(|mut req: supergraph::Request| { + let body = req.supergraph_request.body_mut(); + body.query = body.query.as_ref().map(|query| { + let query_name = format!("query Query{} ", rand::random::()); + query.replacen("query ", query_name.as_str(), 1) + }); + req + }) + .service(service) + .boxed() + } + + fn execution_service(&self, service: execution::BoxService) -> execution::BoxService { + ServiceBuilder::new() + .checkpoint(|req: execution::Request| { + Ok(ControlFlow::Break( + execution::Response::fake_builder() + .context(req.context) + .build() + .unwrap(), + )) + }) + .service(service) + .boxed() + } +} + +register_plugin!("apollo-test", "do_not_execute", DoNotExecute); + +// Run this benchmark with cargo run --release --example planner -- --hot-reload -s -c ./apollo-router/examples/router.yaml +// You can then send operations to it with `ab` or `hey` or any tool you like: +// hey -n 1000 -c 10 -m POST -H 'Content-Type: application/json' -D 'path/to/an/anonymous/operation' http://localhost:4100 +fn main() -> Result<()> { + apollo_router::main() +} diff --git a/apollo-router/examples/router.yaml b/apollo-router/examples/router.yaml new file mode 100644 index 0000000000..1981c5da3b --- /dev/null +++ b/apollo-router/examples/router.yaml @@ -0,0 +1,13 @@ +supergraph: + listen: 0.0.0.0:4100 + introspection: true + query_planner: + experimental_parallelism: auto # or any number +plugins: + experimental.expose_query_plan: true + apollo-test.do_not_execute: true +experimental_graphql_validation_mode: both +sandbox: + enabled: true +homepage: + enabled: false diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs index 1ad362a2e2..aafad48e8e 100644 --- a/apollo-router/src/axum_factory/tests.rs +++ b/apollo-router/src/axum_factory/tests.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use std::io; use std::net::SocketAddr; +use std::num::NonZeroUsize; use std::pin::Pin; use std::str::FromStr; use std::sync::atomic::AtomicU32; @@ -64,7 +65,7 @@ use crate::http_server_factory::HttpServerFactory; use crate::http_server_factory::HttpServerHandle; use crate::json_ext::Path; use crate::plugin::test::MockSubgraph; -use crate::query_planner::BridgeQueryPlanner; +use crate::query_planner::BridgeQueryPlannerPool; use crate::router_factory::create_plugins; use crate::router_factory::Endpoint; use crate::router_factory::RouterFactory; @@ -2303,9 +2304,13 @@ async fn test_supergraph_timeout() { let conf: Arc = Arc::new(serde_json::from_value(config).unwrap()); let schema = include_str!("..//testdata/minimal_supergraph.graphql"); - let planner = BridgeQueryPlanner::new(schema.to_string(), conf.clone()) - .await - .unwrap(); + let planner = BridgeQueryPlannerPool::new( + schema.to_string(), + conf.clone(), + NonZeroUsize::new(1).unwrap(), + ) + .await + .unwrap(); let schema = planner.schema(); // we do the entire supergraph rebuilding instead of using `from_supergraph_mock_callback_and_configuration` diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 54cdfac600..207cf755ef 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -8,6 +8,7 @@ use opentelemetry_api::KeyValue; use paste::paste; use serde_json::Value; +use super::AvailableParallelism; use crate::metrics::meter_provider; use crate::uplink::license_enforcement::LicenseState; use crate::Configuration; @@ -46,6 +47,7 @@ impl Metrics { ); data.populate_license_instrument(license_state); data.populate_user_plugins_instrument(configuration); + data.populate_query_planner_experimental_parallelism(configuration); data.into() } } @@ -427,6 +429,54 @@ impl InstrumentData { ), ); } + + pub(crate) fn populate_query_planner_experimental_parallelism( + &mut self, + configuration: &Configuration, + ) { + let query_planner_parallelism_config = configuration + .supergraph + .query_planner + .experimental_parallelism; + + if query_planner_parallelism_config != Default::default() { + let mut attributes = HashMap::new(); + attributes.insert( + "mode".to_string(), + if let AvailableParallelism::Auto(_) = query_planner_parallelism_config { + "auto" + } else { + "static" + } + .into(), + ); + self.data.insert( + "apollo.router.config.query_panner.parallelism".to_string(), + ( + configuration + .supergraph + .query_planner + .experimental_query_planner_parallelism() + .map(|n| { + #[cfg(test)] + { + // Set to a fixed number for snapshot tests + if let AvailableParallelism::Auto(_) = + query_planner_parallelism_config + { + return 8; + } + } + let as_usize: usize = n.into(); + let as_u64: u64 = as_usize.try_into().unwrap_or_default(); + as_u64 + }) + .unwrap_or_default(), + attributes, + ), + ); + } + } } impl From for Metrics { @@ -460,6 +510,7 @@ mod test { use crate::configuration::metrics::InstrumentData; use crate::configuration::metrics::Metrics; use crate::uplink::license_enforcement::LicenseState; + use crate::Configuration; #[derive(RustEmbed)] #[folder = "src/configuration/testdata/metrics"] @@ -477,6 +528,8 @@ mod test { let mut data = InstrumentData::default(); data.populate_config_instruments(yaml); + let configuration: Configuration = input.parse().unwrap(); + data.populate_query_planner_experimental_parallelism(&configuration); let _metrics: Metrics = data.into(); assert_non_zero_metrics_snapshot!(file_name); } diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 7ac7061968..f448ca772b 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -629,6 +629,46 @@ pub(crate) struct Supergraph { /// Log a message if the client closes the connection before the response is sent. /// Default: false. pub(crate) experimental_log_on_broken_pipe: bool, + + /// Configuration options pertaining to the query planner component. + pub(crate) query_planner: QueryPlanner, +} + +/// Configuration options pertaining to the query planner component. +#[derive(Debug, Clone, Default, Deserialize, Serialize, JsonSchema)] +#[serde(deny_unknown_fields)] +pub(crate) struct QueryPlanner { + /// Set the size of a pool of workers to enable query planning parallelism. + /// Default: 1. + pub(crate) experimental_parallelism: AvailableParallelism, +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case", untagged)] +pub(crate) enum AvailableParallelism { + Auto(Auto), + Fixed(NonZeroUsize), +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub(crate) enum Auto { + Auto, +} + +impl Default for AvailableParallelism { + fn default() -> Self { + Self::Fixed(NonZeroUsize::new(1).expect("cannot fail")) + } +} + +impl QueryPlanner { + pub(crate) fn experimental_query_planner_parallelism(&self) -> io::Result { + match self.experimental_parallelism { + AvailableParallelism::Auto(Auto::Auto) => std::thread::available_parallelism(), + AvailableParallelism::Fixed(n) => Ok(n), + } + } } fn default_defer_support() -> bool { @@ -648,6 +688,7 @@ impl Supergraph { generate_query_fragments: Option, early_cancel: Option, experimental_log_on_broken_pipe: Option, + query_planner: Option, ) -> Self { Self { listen: listen.unwrap_or_else(default_graphql_listen), @@ -667,6 +708,7 @@ impl Supergraph { generate_query_fragments: generate_query_fragments.unwrap_or_default(), early_cancel: early_cancel.unwrap_or_default(), experimental_log_on_broken_pipe: experimental_log_on_broken_pipe.unwrap_or_default(), + query_planner: query_planner.unwrap_or_default(), } } } @@ -685,6 +727,7 @@ impl Supergraph { generate_query_fragments: Option, early_cancel: Option, experimental_log_on_broken_pipe: Option, + query_planner: Option, ) -> Self { Self { listen: listen.unwrap_or_else(test_listen), @@ -704,6 +747,7 @@ impl Supergraph { generate_query_fragments: generate_query_fragments.unwrap_or_default(), early_cancel: early_cancel.unwrap_or_default(), experimental_log_on_broken_pipe: experimental_log_on_broken_pipe.unwrap_or_default(), + query_planner: query_planner.unwrap_or_default(), } } } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap new file mode 100644 index 0000000000..19b8a87df3 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap @@ -0,0 +1,10 @@ +--- +source: apollo-router/src/configuration/metrics.rs +expression: "&metrics.non_zero()" +--- +- name: apollo.router.config.query_panner.parallelism + data: + datapoints: + - value: 8 + attributes: + mode: auto diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap new file mode 100644 index 0000000000..1523d59ca1 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap @@ -0,0 +1,10 @@ +--- +source: apollo-router/src/configuration/metrics.rs +expression: "&metrics.non_zero()" +--- +- name: apollo.router.config.query_panner.parallelism + data: + datapoints: + - value: 10 + attributes: + mode: static diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index e267f4efb6..bdee448830 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1,6 +1,5 @@ --- source: apollo-router/src/configuration/tests.rs -assertion_line: 31 expression: "&schema" --- { @@ -2635,7 +2634,10 @@ expression: "&schema" "experimental_reuse_query_plans": false }, "early_cancel": false, - "experimental_log_on_broken_pipe": false + "experimental_log_on_broken_pipe": false, + "query_planner": { + "experimental_parallelism": 1 + } }, "type": "object", "properties": { @@ -2689,6 +2691,35 @@ expression: "&schema" "default": "/", "type": "string" }, + "query_planner": { + "description": "Configuration options pertaining to the query planner component.", + "default": { + "experimental_parallelism": 1 + }, + "type": "object", + "required": [ + "experimental_parallelism" + ], + "properties": { + "experimental_parallelism": { + "description": "Set the size of a pool of workers to enable query planning parallelism. Default: 1.", + "anyOf": [ + { + "type": "string", + "enum": [ + "auto" + ] + }, + { + "type": "integer", + "format": "uint", + "minimum": 1.0 + } + ] + } + }, + "additionalProperties": false + }, "query_planning": { "description": "Query planning options", "default": { diff --git a/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml new file mode 100644 index 0000000000..d915dcdadc --- /dev/null +++ b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml @@ -0,0 +1,3 @@ +supergraph: + query_planner: + experimental_parallelism: auto diff --git a/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml new file mode 100644 index 0000000000..586eb5abb8 --- /dev/null +++ b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml @@ -0,0 +1,3 @@ +supergraph: + query_planner: + experimental_parallelism: 10 diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index 9162ec0f0e..4e26aa0f7d 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -292,6 +292,9 @@ pub(crate) enum QueryPlannerError { /// Unauthorized field or type Unauthorized(Vec), + + /// Query planner pool error: {0} + PoolProcessing(String), } impl IntoGraphQLErrors for Vec { diff --git a/apollo-router/src/metrics/mod.rs b/apollo-router/src/metrics/mod.rs index db3487f7d1..ab270b2f1e 100644 --- a/apollo-router/src/metrics/mod.rs +++ b/apollo-router/src/metrics/mod.rs @@ -1065,7 +1065,6 @@ macro_rules! assert_non_zero_metrics_snapshot { let metrics = crate::metrics::collect_metrics(); insta::assert_yaml_snapshot!(&metrics.non_zero()); }); - }; () => { insta::with_settings!({sort_maps => true}, { diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs index c030b4d62b..0ed337c6d5 100644 --- a/apollo-router/src/plugins/cache/entity.rs +++ b/apollo-router/src/plugins/cache/entity.rs @@ -153,9 +153,11 @@ impl Plugin for EntityCache { fn supergraph_service(&self, service: supergraph::BoxService) -> supergraph::BoxService { ServiceBuilder::new() .map_response(|mut response: supergraph::Response| { - if let Some(cache_control) = - response.context.extensions().lock().get::() - { + if let Some(cache_control) = { + let lock = response.context.extensions().lock(); + let cache_control = lock.get::().cloned(); + cache_control + } { let _ = cache_control.to_headers(response.response.headers_mut()); } diff --git a/apollo-router/src/plugins/include_subgraph_errors.rs b/apollo-router/src/plugins/include_subgraph_errors.rs index 8886049c24..66ffa53917 100644 --- a/apollo-router/src/plugins/include_subgraph_errors.rs +++ b/apollo-router/src/plugins/include_subgraph_errors.rs @@ -77,6 +77,7 @@ impl Plugin for IncludeSubgraphErrors { #[cfg(test)] mod test { + use std::num::NonZeroUsize; use std::sync::Arc; use bytes::Bytes; @@ -90,7 +91,7 @@ mod test { use crate::json_ext::Object; use crate::plugin::test::MockSubgraph; use crate::plugin::DynPlugin; - use crate::query_planner::BridgeQueryPlanner; + use crate::query_planner::BridgeQueryPlannerPool; use crate::router_factory::create_plugins; use crate::services::layers::persisted_queries::PersistedQueryLayer; use crate::services::layers::query_analysis::QueryAnalysisLayer; @@ -190,9 +191,13 @@ mod test { let schema = include_str!("../../../apollo-router-benchmarks/benches/fixtures/supergraph.graphql"); - let planner = BridgeQueryPlanner::new(schema.to_string(), Default::default()) - .await - .unwrap(); + let planner = BridgeQueryPlannerPool::new( + schema.to_string(), + Default::default(), + NonZeroUsize::new(1).unwrap(), + ) + .await + .unwrap(); let schema = planner.schema(); let subgraph_schemas = planner.subgraph_schemas(); diff --git a/apollo-router/src/plugins/record_replay/record.rs b/apollo-router/src/plugins/record_replay/record.rs index 7179c8db50..1ceb5f85cc 100644 --- a/apollo-router/src/plugins/record_replay/record.rs +++ b/apollo-router/src/plugins/record_replay/record.rs @@ -103,7 +103,7 @@ impl Plugin for Record { let context = res.context.clone(); let after_complete = once(async move { - let recording = context.extensions().lock().get_mut::().cloned(); + let recording = context.extensions().lock().remove::(); if let Some(mut recording) = recording { let res_headers = externalize_header_map(&headers)?; diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index cd82bb0c17..97755df1f7 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -470,8 +470,11 @@ impl Plugin for Telemetry { )) .map_response(move |mut resp: SupergraphResponse| { let config = config_map_res_first.clone(); - if let Some(usage_reporting) = - resp.context.extensions().lock().get::>() + if let Some(usage_reporting) = { + let extensions = resp.context.extensions().lock(); + let urp = extensions.get::>(); + urp.cloned() + } { // Record the operation signature on the router span Span::current().record( @@ -1282,9 +1285,11 @@ impl Telemetry { operation_kind: OperationKind, operation_subtype: Option, ) { - let metrics = if let Some(usage_reporting) = - context.extensions().lock().get::>() - { + let metrics = if let Some(usage_reporting) = { + let lock = context.extensions().lock(); + let urp = lock.get::>(); + urp.cloned() + } { let licensed_operation_count = licensed_operation_count(&usage_reporting.stats_report_key); let persisted_query_hit = context diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index 51a60e7c41..6c4dd43a0b 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -415,6 +415,7 @@ register_plugin!("apollo", "traffic_shaping", TrafficShaping); #[cfg(test)] mod test { + use std::num::NonZeroUsize; use std::sync::Arc; use bytes::Bytes; @@ -429,7 +430,7 @@ mod test { use crate::plugin::test::MockSubgraph; use crate::plugin::test::MockSupergraphService; use crate::plugin::DynPlugin; - use crate::query_planner::BridgeQueryPlanner; + use crate::query_planner::BridgeQueryPlannerPool; use crate::router_factory::create_plugins; use crate::services::layers::persisted_queries::PersistedQueryLayer; use crate::services::layers::query_analysis::QueryAnalysisLayer; @@ -523,9 +524,13 @@ mod test { .unwrap(); let config = Arc::new(config); - let planner = BridgeQueryPlanner::new(schema.to_string(), config.clone()) - .await - .unwrap(); + let planner = BridgeQueryPlannerPool::new( + schema.to_string(), + config.clone(), + NonZeroUsize::new(1).unwrap(), + ) + .await + .unwrap(); let schema = planner.schema(); let subgraph_schemas = planner.subgraph_schemas(); diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index be1850ff29..bad73ffbe8 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -4,7 +4,6 @@ use std::collections::HashMap; use std::fmt::Debug; use std::fmt::Write; use std::sync::Arc; -use std::time::Instant; use apollo_compiler::ast; use apollo_compiler::validation::Valid; @@ -605,11 +604,9 @@ impl Service for BridgeQueryPlanner { .unwrap_or_default(); let this = self.clone(); let fut = async move { - let start = Instant::now(); - - let mut doc = match context.extensions().lock().get::() { + let mut doc = match context.extensions().lock().get::().cloned() { None => return Err(QueryPlannerError::SpecError(SpecError::UnknownFileId)), - Some(d) => d.clone(), + Some(d) => d, }; let schema = &this.schema.api_schema().definitions; @@ -666,8 +663,6 @@ impl Service for BridgeQueryPlanner { doc, ) .await; - let duration = start.elapsed().as_secs_f64(); - tracing::info!(histogram.apollo_router_query_planning_time = duration); match res { Ok(query_planner_content) => Ok(QueryPlannerResponse::builder() diff --git a/apollo-router/src/query_planner/bridge_query_planner_pool.rs b/apollo-router/src/query_planner/bridge_query_planner_pool.rs new file mode 100644 index 0000000000..cad419cf8f --- /dev/null +++ b/apollo-router/src/query_planner/bridge_query_planner_pool.rs @@ -0,0 +1,203 @@ +use std::collections::HashMap; +use std::num::NonZeroUsize; +use std::sync::Arc; +use std::time::Instant; + +use apollo_compiler::validation::Valid; +use async_channel::bounded; +use async_channel::Sender; +use futures::future::BoxFuture; +use opentelemetry::KeyValue; +use router_bridge::planner::Planner; +use tokio::sync::oneshot; +use tokio::task::JoinSet; +use tower::Service; +use tower::ServiceExt; + +use super::bridge_query_planner::BridgeQueryPlanner; +use super::QueryPlanResult; +use crate::error::QueryPlannerError; +use crate::error::ServiceBuildError; +use crate::services::QueryPlannerRequest; +use crate::services::QueryPlannerResponse; +use crate::spec::Schema; +use crate::Configuration; + +static CHANNEL_SIZE: usize = 1_000; + +#[derive(Clone)] +pub(crate) struct BridgeQueryPlannerPool { + planners: Vec>>, + sender: Sender<( + QueryPlannerRequest, + oneshot::Sender>, + )>, + schema: Arc, + subgraph_schemas: Arc>>>, +} + +impl BridgeQueryPlannerPool { + pub(crate) async fn new( + sdl: String, + configuration: Arc, + size: NonZeroUsize, + ) -> Result { + Self::new_from_planners(Default::default(), sdl, configuration, size).await + } + + pub(crate) async fn new_from_planners( + old_planners: Vec>>, + schema: String, + configuration: Arc, + size: NonZeroUsize, + ) -> Result { + let mut join_set = JoinSet::new(); + + let (sender, receiver) = bounded::<( + QueryPlannerRequest, + oneshot::Sender>, + )>(CHANNEL_SIZE); + + let mut old_planners_iterator = old_planners.into_iter(); + + (0..size.into()).for_each(|_| { + let sdl = schema.clone(); + let configuration = configuration.clone(); + + if let Some(old_planner) = old_planners_iterator.next() { + join_set.spawn(async move { + BridgeQueryPlanner::new_from_planner(old_planner, sdl, configuration).await + }); + } else { + join_set.spawn(async move { BridgeQueryPlanner::new(sdl, configuration).await }); + } + }); + + let mut bridge_query_planners = Vec::new(); + + while let Some(task_result) = join_set.join_next().await { + let bridge_query_planner = + task_result.map_err(|e| ServiceBuildError::ServiceError(Box::new(e)))??; + bridge_query_planners.push(bridge_query_planner); + } + + let schema = bridge_query_planners + .first() + .ok_or_else(|| { + ServiceBuildError::QueryPlannerError(QueryPlannerError::PoolProcessing( + "There should be at least 1 Query Planner service in pool".to_string(), + )) + })? + .schema(); + + let subgraph_schemas = bridge_query_planners + .first() + .ok_or_else(|| { + ServiceBuildError::QueryPlannerError(QueryPlannerError::PoolProcessing( + "There should be at least 1 Query Planner service in pool".to_string(), + )) + })? + .subgraph_schemas(); + + let planners = bridge_query_planners + .iter() + .map(|p| p.planner().clone()) + .collect(); + + for (worker_id, mut planner) in bridge_query_planners.into_iter().enumerate() { + let receiver = receiver.clone(); + + tokio::spawn(async move { + while let Ok((request, res_sender)) = receiver.recv().await { + let svc = match planner.ready().await { + Ok(svc) => svc, + Err(e) => { + let _ = res_sender.send(Err(e)); + + continue; + } + }; + let start = Instant::now(); + + let res = svc.call(request).await; + + f64_histogram!( + "apollo.router.query_planner.duration", + "Duration of the query planning.", + start.elapsed().as_secs_f64(), + [KeyValue::new("workerId", worker_id.to_string())] + ); + + let _ = res_sender.send(res); + } + }); + } + + Ok(Self { + planners, + sender, + schema, + subgraph_schemas, + }) + } + + pub(crate) fn planners(&self) -> Vec>> { + self.planners.clone() + } + + pub(crate) fn schema(&self) -> Arc { + self.schema.clone() + } + + pub(crate) fn subgraph_schemas( + &self, + ) -> Arc>>> { + self.subgraph_schemas.clone() + } +} + +impl tower::Service for BridgeQueryPlannerPool { + type Response = QueryPlannerResponse; + + type Error = QueryPlannerError; + + type Future = BoxFuture<'static, Result>; + + fn poll_ready( + &mut self, + _cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + if self.sender.is_full() { + std::task::Poll::Ready(Err(QueryPlannerError::PoolProcessing( + "query plan queue is full".into(), + ))) + } else { + std::task::Poll::Ready(Ok(())) + } + } + + fn call(&mut self, req: QueryPlannerRequest) -> Self::Future { + let (response_sender, response_receiver) = oneshot::channel(); + let sender = self.sender.clone(); + + Box::pin(async move { + let start = Instant::now(); + let _ = sender.send((req, response_sender)).await; + + tracing::info!(value.apollo_router_query_planner_queue_size = sender.len()); + let res = response_receiver + .await + .map_err(|_| QueryPlannerError::UnhandledPlannerResult)?; + tracing::info!(value.apollo_router_query_planner_queue_size = sender.len()); + + f64_histogram!( + "apollo_router_query_planning_time", + "Duration of the time the router waited for a query plan, including both the queue time and planning time.", + start.elapsed().as_secs_f64(), + [] + ); + + res + }) + } +} diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index 39e742c0e0..74d72d9f0d 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -31,7 +31,7 @@ use crate::plugins::authorization::CacheKeyMetadata; use crate::plugins::progressive_override::LABELS_TO_OVERRIDE_KEY; use crate::plugins::telemetry::utils::Timer; use crate::query_planner::labeler::add_defer_labels; -use crate::query_planner::BridgeQueryPlanner; +use crate::query_planner::BridgeQueryPlannerPool; use crate::query_planner::QueryPlanResult; use crate::services::layers::persisted_queries::PersistedQueryLayer; use crate::services::layers::query_analysis::ParsedDocument; @@ -281,9 +281,9 @@ where } } -impl CachingQueryPlanner { - pub(crate) fn planner(&self) -> Arc> { - self.delegate.planner() +impl CachingQueryPlanner { + pub(crate) fn planners(&self) -> Vec>> { + self.delegate.planners() } pub(crate) fn subgraph_schemas( @@ -317,11 +317,9 @@ where let context = request.context.clone(); qp.plan(request).await.map(|response| { if let Some(usage_reporting) = { - context - .extensions() - .lock() - .get::>() - .cloned() + let lock = context.extensions().lock(); + let urp = lock.get::>(); + urp.cloned() } { let _ = response.context.insert( "apollo_operation_id", @@ -376,17 +374,17 @@ where Some(d) => d.clone(), }; + let metadata = { + let lock = request.context.extensions().lock(); + let ckm = lock.get::().cloned(); + ckm.unwrap_or_default() + }; + let caching_key = CachingQueryKey { query: request.query.clone(), operation: request.operation_name.to_owned(), hash: doc.hash.clone(), - metadata: request - .context - .extensions() - .lock() - .get::() - .cloned() - .unwrap_or_default(), + metadata, plan_options, }; diff --git a/apollo-router/src/query_planner/mod.rs b/apollo-router/src/query_planner/mod.rs index 5115bdd0c1..25d409c711 100644 --- a/apollo-router/src/query_planner/mod.rs +++ b/apollo-router/src/query_planner/mod.rs @@ -3,11 +3,13 @@ #![allow(missing_docs)] // FIXME pub(crate) use bridge_query_planner::*; +pub(crate) use bridge_query_planner_pool::*; pub(crate) use caching_query_planner::*; pub use self::fetch::OperationKind; mod bridge_query_planner; +mod bridge_query_planner_pool; mod caching_query_planner; mod execution; pub(crate) mod fetch; diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 5fa305e38f..58f31f9236 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -30,7 +30,7 @@ use crate::plugins::subscription::APOLLO_SUBSCRIPTION_PLUGIN; use crate::plugins::telemetry::reload::apollo_opentelemetry_initialized; use crate::plugins::traffic_shaping::TrafficShaping; use crate::plugins::traffic_shaping::APOLLO_TRAFFIC_SHAPING; -use crate::query_planner::BridgeQueryPlanner; +use crate::query_planner::BridgeQueryPlannerPool; use crate::services::apollo_graph_reference; use crate::services::apollo_key; use crate::services::http::HttpClientServiceFactory; @@ -278,19 +278,34 @@ impl YamlRouterFactory { ) -> Result { let query_planner_span = tracing::info_span!("query_planner_creation"); // QueryPlannerService takes an UnplannedRequest and outputs PlannedRequest - let bridge_query_planner = match previous_supergraph.as_ref().map(|router| router.planner()) - { - None => { - BridgeQueryPlanner::new(schema.clone(), configuration.clone()) + let bridge_query_planner = + match previous_supergraph.as_ref().map(|router| router.planners()) { + None => { + BridgeQueryPlannerPool::new( + schema.clone(), + configuration.clone(), + configuration + .supergraph + .query_planner + .experimental_query_planner_parallelism()?, + ) .instrument(query_planner_span) .await? - } - Some(planner) => { - BridgeQueryPlanner::new_from_planner(planner, schema.clone(), configuration.clone()) + } + Some(planners) => { + BridgeQueryPlannerPool::new_from_planners( + planners, + schema.clone(), + configuration.clone(), + configuration + .supergraph + .query_planner + .experimental_query_planner_parallelism()?, + ) .instrument(query_planner_span) .await? - } - }; + } + }; let schema_changed = previous_supergraph .map(|supergraph_creator| supergraph_creator.schema().raw_sdl.as_ref() == &schema) diff --git a/apollo-router/src/services/layers/allow_only_http_post_mutations.rs b/apollo-router/src/services/layers/allow_only_http_post_mutations.rs index 619685c5f4..22cb282121 100644 --- a/apollo-router/src/services/layers/allow_only_http_post_mutations.rs +++ b/apollo-router/src/services/layers/allow_only_http_post_mutations.rs @@ -48,7 +48,13 @@ where return Ok(ControlFlow::Continue(req)); } - let doc = match req.context.extensions().lock().get::() { + let doc = match req + .context + .extensions() + .lock() + .get::() + .cloned() + { None => { let errors = vec![Error::builder() .message("Cannot find executable document".to_string()) @@ -63,7 +69,7 @@ where return Ok(ControlFlow::Break(res)); } - Some(c) => c.clone(), + Some(c) => c, }; let op = doc diff --git a/apollo-router/src/services/layers/content_negotiation.rs b/apollo-router/src/services/layers/content_negotiation.rs index 59ccc5498e..f07cb38972 100644 --- a/apollo-router/src/services/layers/content_negotiation.rs +++ b/apollo-router/src/services/layers/content_negotiation.rs @@ -143,12 +143,11 @@ where json: accepts_json, multipart_defer: accepts_multipart_defer, multipart_subscription: accepts_multipart_subscription, - } = context - .extensions() - .lock() - .get() - .cloned() - .unwrap_or_default(); + } = { + let lock = context.extensions().lock(); + let cra = lock.get::(); + cra.cloned().unwrap_or_default() + }; if !res.has_next.unwrap_or_default() && (accepts_json || accepts_wildcard) { parts diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index 4a87809820..11121b24bc 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -41,7 +41,7 @@ use crate::plugins::traffic_shaping::APOLLO_TRAFFIC_SHAPING; use crate::query_planner::subscription::SubscriptionHandle; use crate::query_planner::subscription::OPENED_SUBSCRIPTIONS; use crate::query_planner::subscription::SUBSCRIPTION_EVENT_SPAN_NAME; -use crate::query_planner::BridgeQueryPlanner; +use crate::query_planner::BridgeQueryPlannerPool; use crate::query_planner::CachingQueryPlanner; use crate::query_planner::InMemoryCachePlanner; use crate::query_planner::QueryPlanResult; @@ -82,7 +82,7 @@ pub(crate) type Plugins = IndexMap>; #[derive(Clone)] pub(crate) struct SupergraphService { execution_service_factory: ExecutionServiceFactory, - query_planner_service: CachingQueryPlanner, + query_planner_service: CachingQueryPlanner, schema: Arc, notify: Notify, } @@ -91,7 +91,7 @@ pub(crate) struct SupergraphService { impl SupergraphService { #[builder] pub(crate) fn new( - query_planner_service: CachingQueryPlanner, + query_planner_service: CachingQueryPlanner, execution_service_factory: ExecutionServiceFactory, schema: Arc, notify: Notify, @@ -156,7 +156,7 @@ impl Service for SupergraphService { } async fn service_call( - planning: CachingQueryPlanner, + planning: CachingQueryPlanner, execution_service_factory: ExecutionServiceFactory, schema: Arc, req: SupergraphRequest, @@ -227,7 +227,11 @@ async fn service_call( let is_deferred = plan.is_deferred(operation_name.as_deref(), &variables); let is_subscription = plan.is_subscription(operation_name.as_deref()); - if let Some(batching) = context.extensions().lock().get::() { + if let Some(batching) = { + let lock = context.extensions().lock(); + let batching = lock.get::(); + batching.cloned() + } { if batching.enabled && (is_deferred || is_subscription) { let message = if is_deferred { "BATCHING_DEFER_UNSUPPORTED" @@ -586,7 +590,7 @@ async fn dispatch_event( } async fn plan_query( - mut planning: CachingQueryPlanner, + mut planning: CachingQueryPlanner, operation_name: Option, context: Context, schema: Arc, @@ -599,7 +603,10 @@ async fn plan_query( // tests will pass. // During a regular request, `ParsedDocument` is already populated during query analysis. // Some tests do populate the document, so we only do it if it's not already there. - if !context.extensions().lock().contains_key::() { + if !{ + let lock = context.extensions().lock(); + lock.contains_key::() + } { let doc = Query::parse_document( &query_str, operation_name.as_deref(), @@ -661,11 +668,11 @@ pub(crate) struct PluggableSupergraphServiceBuilder { plugins: Arc, subgraph_services: Vec<(String, Box)>, configuration: Option>, - planner: BridgeQueryPlanner, + planner: BridgeQueryPlannerPool, } impl PluggableSupergraphServiceBuilder { - pub(crate) fn new(planner: BridgeQueryPlanner) -> Self { + pub(crate) fn new(planner: BridgeQueryPlannerPool) -> Self { Self { plugins: Arc::new(Default::default()), subgraph_services: Default::default(), @@ -752,7 +759,7 @@ impl PluggableSupergraphServiceBuilder { /// A collection of services and data which may be used to create a "router". #[derive(Clone)] pub(crate) struct SupergraphCreator { - query_planner_service: CachingQueryPlanner, + query_planner_service: CachingQueryPlanner, subgraph_service_factory: Arc, schema: Arc, config: Arc, @@ -843,8 +850,8 @@ impl SupergraphCreator { self.query_planner_service.previous_cache() } - pub(crate) fn planner(&self) -> Arc> { - self.query_planner_service.planner() + pub(crate) fn planners(&self) -> Vec>> { + self.query_planner_service.planners() } pub(crate) async fn warm_up_query_planner( From 40eaed5832ac4fb1d9f0f3fbcbe7f7a8c15c2c08 Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Wed, 10 Apr 2024 12:02:54 +0200 Subject: [PATCH 05/46] query_panner -> query_planner (#4937) Fix typo `query_panner` -> `query_planner` --- apollo-router/src/configuration/metrics.rs | 2 +- ...est__metrics@query_planner_parallelism_auto.router.yaml.snap | 2 +- ...t__metrics@query_planner_parallelism_static.router.yaml.snap | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 207cf755ef..fa9a4d4a67 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -451,7 +451,7 @@ impl InstrumentData { .into(), ); self.data.insert( - "apollo.router.config.query_panner.parallelism".to_string(), + "apollo.router.config.query_planner.parallelism".to_string(), ( configuration .supergraph diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap index 19b8a87df3..36eeed98cf 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap @@ -2,7 +2,7 @@ source: apollo-router/src/configuration/metrics.rs expression: "&metrics.non_zero()" --- -- name: apollo.router.config.query_panner.parallelism +- name: apollo.router.config.query_planner.parallelism data: datapoints: - value: 8 diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap index 1523d59ca1..d9970a38fd 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap @@ -2,7 +2,7 @@ source: apollo-router/src/configuration/metrics.rs expression: "&metrics.non_zero()" --- -- name: apollo.router.config.query_panner.parallelism +- name: apollo.router.config.query_planner.parallelism data: datapoints: - value: 10 From c1161cc3b63a3abdbd6bf5da18ff26fee4767b29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Thu, 11 Apr 2024 13:29:43 +0200 Subject: [PATCH 06/46] Enable Rust-based API schema generation by default (#4931) Fixes #4649 Requires an apollo-federation update to feign support for join spec v0.4. The new join spec doesn't affect how the API schema is generated so the only thing the update does is *not error*. The new release also includes a query plan Display implementation for #4815! I ran it through our whole harness testing again, which is quite reliable for API schema as the schema data is complete. The only observable differences are in the sorting of keys in input objects in default values which is both very limited, and doesn't make sense in the JS version, so that's good. The *meaning* of schemas did not change in any case. --- Cargo.lock | 134 +++++------------- apollo-router/Cargo.toml | 4 +- apollo-router/src/configuration/mod.rs | 2 +- ...nfiguration__tests__schema_generation.snap | 2 +- examples/supergraph-sdl/rust/Cargo.toml | 2 +- 5 files changed, 43 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 352d10ddfc..b20449ff4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -192,9 +192,9 @@ checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "apollo-compiler" -version = "1.0.0-beta.14" +version = "1.0.0-beta.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00ed7af048c0beb66c9201c032b25a81b1b450397ddb2cb277ca57bcae2d9e13" +checksum = "79df4ab329753d36476653850519fe92d1b34854dd4337f6abf42a64963ac0ce" dependencies = [ "apollo-parser", "ariadne", @@ -220,28 +220,28 @@ dependencies = [ [[package]] name = "apollo-federation" -version = "0.0.9" +version = "0.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c675747dd20db0f124d07b9764265b3ae67afbdd1044345673c184888cd018" +checksum = "94e3b0774618a4febe307d2ace6714583e13cd7948cdadb2b4a937ccdc166333" dependencies = [ "apollo-compiler", "derive_more", "indexmap 2.2.3", "lazy_static", "petgraph", - "salsa", - "serde_json", + "serde_json_bytes", "strum 0.26.2", "strum_macros 0.26.1", "thiserror", + "time", "url", ] [[package]] name = "apollo-parser" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e8111fa921e363466724e8cc80ef703ffbdfc5db64f826c604f7378641b12da" +checksum = "6bb7c8a9776825e5524b5ab3a7f478bf091a054180f244dff85814452cb87d90" dependencies = [ "memchr", "rowan", @@ -292,7 +292,7 @@ dependencies = [ "futures", "futures-test", "graphql_client", - "heck 0.4.1", + "heck", "hex", "hmac", "http 0.2.11", @@ -338,7 +338,7 @@ dependencies = [ "opentelemetry-zipkin", "opentelemetry_api", "p256 0.13.2", - "parking_lot 0.12.1", + "parking_lot", "paste", "pin-project-lite", "prometheus", @@ -1506,7 +1506,7 @@ version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2 1.0.76", "quote 1.0.35", "syn 2.0.48", @@ -1955,7 +1955,7 @@ dependencies = [ "hashbrown 0.14.1", "lock_api", "once_cell", - "parking_lot_core 0.9.8", + "parking_lot_core", "serde", ] @@ -2040,7 +2040,7 @@ dependencies = [ "libc", "log", "once_cell", - "parking_lot 0.12.1", + "parking_lot", "pin-project", "serde", "serde_json", @@ -2222,7 +2222,7 @@ dependencies = [ "backtrace", "lazy_static", "mintex", - "parking_lot 0.12.1", + "parking_lot", "rustc-hash", "serde", "serde_json", @@ -2450,7 +2450,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2 1.0.76", "quote 1.0.35", "syn 2.0.48", @@ -2748,7 +2748,7 @@ dependencies = [ "futures", "lazy_static", "log", - "parking_lot 0.12.1", + "parking_lot", "rand 0.8.5", "redis-protocol", "rustls", @@ -3081,7 +3081,7 @@ checksum = "a40f793251171991c4eb75bd84bc640afa8b68ff6907bc89d3b712a22f700506" dependencies = [ "graphql-introspection-query", "graphql-parser", - "heck 0.4.1", + "heck", "lazy_static", "proc-macro2 1.0.76", "quote 1.0.35", @@ -3216,15 +3216,6 @@ dependencies = [ "http 0.2.11", ] -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "heck" version = "0.4.1" @@ -3423,7 +3414,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -3750,7 +3741,7 @@ dependencies = [ "memchr", "num-cmp", "once_cell", - "parking_lot 0.12.1", + "parking_lot", "percent-encoding", "regex", "serde", @@ -4354,6 +4345,15 @@ dependencies = [ "libc", ] +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + [[package]] name = "number_prefix" version = "0.4.0" @@ -4762,17 +4762,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - [[package]] name = "parking_lot" version = "0.12.1" @@ -4780,21 +4769,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.8", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] @@ -5185,7 +5160,7 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot 0.12.1", + "parking_lot", "protobuf", "thiserror", ] @@ -5232,7 +5207,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", - "heck 0.4.1", + "heck", "itertools 0.10.5", "lazy_static", "log", @@ -5964,35 +5939,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" -[[package]] -name = "salsa" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b84d9f96071f3f3be0dc818eae3327625d8ebc95b58da37d6850724f31d3403" -dependencies = [ - "crossbeam-utils", - "indexmap 1.9.3", - "lock_api", - "log", - "oorandom", - "parking_lot 0.11.2", - "rustc-hash", - "salsa-macros", - "smallvec", -] - -[[package]] -name = "salsa-macros" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3904a4ba0a9d0211816177fd34b04c7095443f8cdacd11175064fe541c8fe2" -dependencies = [ - "heck 0.3.3", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 1.0.109", -] - [[package]] name = "same-file" version = "1.0.6" @@ -6280,7 +6226,7 @@ dependencies = [ "futures", "lazy_static", "log", - "parking_lot 0.12.1", + "parking_lot", "serial_test_derive", ] @@ -6547,7 +6493,7 @@ version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2 1.0.76", "quote 1.0.35", "rustversion", @@ -6560,7 +6506,7 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2 1.0.76", "quote 1.0.35", "rustversion", @@ -6859,7 +6805,9 @@ checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", + "libc", "num-conv", + "num_threads", "powerfmt", "serde", "time-core", @@ -6927,7 +6875,7 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", + "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2 0.5.5", @@ -7414,7 +7362,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.1", + "parking_lot", "rand 0.8.5", "resolv-conf", "smallvec", @@ -7603,12 +7551,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" - [[package]] name = "unicode-width" version = "0.1.10" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index c4ae37d890..a03422dd21 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -64,8 +64,8 @@ features = ["docs_rs"] askama = "0.12.1" access-json = "0.1.0" anyhow = "1.0.80" -apollo-compiler = "=1.0.0-beta.14" -apollo-federation = "=0.0.9" +apollo-compiler = "=1.0.0-beta.15" +apollo-federation = "=0.0.10" arc-swap = "1.6.0" async-channel = "1.9.0" async-compression = { version = "0.4.6", features = [ diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index f448ca772b..e963fbdb41 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -222,10 +222,10 @@ pub(crate) enum ApiSchemaMode { /// Use the new Rust-based implementation. New, /// Use the old JavaScript-based implementation. - #[default] Legacy, /// Use Rust-based and Javascript-based implementations side by side, logging warnings if the /// implementations disagree. + #[default] Both, } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index bdee448830..81184e7452 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1323,7 +1323,7 @@ expression: "&schema" }, "experimental_api_schema_generation_mode": { "description": "Set the API schema generation implementation to use.", - "default": "legacy", + "default": "both", "oneOf": [ { "description": "Use the new Rust-based implementation.", diff --git a/examples/supergraph-sdl/rust/Cargo.toml b/examples/supergraph-sdl/rust/Cargo.toml index 3297706183..627d062fc9 100644 --- a/examples/supergraph-sdl/rust/Cargo.toml +++ b/examples/supergraph-sdl/rust/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] anyhow = "1" -apollo-compiler = "=1.0.0-beta.14" +apollo-compiler = "=1.0.0-beta.15" apollo-router = { path = "../../../apollo-router" } async-trait = "0.1" tower = { version = "0.4", features = ["full"] } From a501e49ac0cf32d6eebb48641130a532ee8981d9 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Thu, 11 Apr 2024 15:30:39 +0200 Subject: [PATCH 07/46] Remove legacy validation (#4551) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix #4407 Fix #4409 This removes GraphQL validation from the query planner, to use the Rust version instead. Validation has now moved to the query analysis layer, which means we can remove a lot of code that was there to accommodate parsing and validating the query but not rejecting it outright, because we needed to compare the validation result with the planner's. This will greatly reduce the load on the planner, which will now only be used for planning queries, not validating. This new validation process has been running in production for months concurrently with the JavaScript version, allowing us to detect and fix any discrepancies in the new implementation. We now have enough confidence in the new Rust-based validation to entirely switch off the less performant, JavaScript validation. Remaining issues: - [x] usage reporting: since invalid queries are rejected in query analysis, inside the router service, then usage reporting for invalid queries is no longer reported, because the telemetry plugins handles usage reporting at the supergraph response level - [x] persisted queries: apparently persisted queries use the query analysis transformation of supergraph requests, now one test fails on an invalid query, I have not yet looked at why - [x] `@defer` directive: a test is failing because the directive is used in the query but not found by the schema. @lrlna what is apollo-compiler expecting here? Is `@defer` considered builtin, or does it need to be declared in the schema? --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. --------- Co-authored-by: Iryna Shestak Co-authored-by: Bryn Cooke Co-authored-by: Simon Sapin Co-authored-by: Renรฉe Kooi --- .../feat_geal_remove_legacy_validation.md | 7 + apollo-router/src/axum_factory/tests.rs | 63 +++--- apollo-router/src/configuration/mod.rs | 25 --- ...nfiguration__tests__schema_generation.snap | 27 --- apollo-router/src/configuration/tests.rs | 4 +- apollo-router/src/context/mod.rs | 42 ++-- apollo-router/src/error.rs | 91 +++++--- apollo-router/src/orbiter/mod.rs | 6 +- .../src/plugins/authorization/mod.rs | 11 +- .../demand_control/basic_cost_calculator.rs | 87 +++++--- .../src/plugins/demand_control/directives.rs | 36 ++-- .../fixtures/basic_interface_query.graphql | 6 +- .../fixtures/basic_object_list_query.graphql | 6 +- .../fixtures/basic_object_query.graphql | 6 +- .../src/plugins/progressive_override/tests.rs | 55 +++-- .../src/plugins/record_replay/record.rs | 5 +- ...t__apollo_metrics_multiple_operations.snap | 41 +--- ...o__test__apollo_metrics_parse_failure.snap | 41 +--- ...st__apollo_metrics_validation_failure.snap | 41 +--- apollo-router/src/plugins/telemetry/mod.rs | 64 +++--- .../src/query_planner/bridge_query_planner.rs | 198 ++---------------- .../query_planner/caching_query_planner.rs | 58 ++--- .../testdata/defer_clause.graphql | 2 +- apollo-router/src/query_planner/tests.rs | 8 +- apollo-router/src/router/mod.rs | 8 +- .../layers/allow_only_http_post_mutations.rs | 3 - .../services/layers/persisted_queries/mod.rs | 16 +- .../src/services/layers/query_analysis.rs | 158 ++++++++------ .../src/services/supergraph/service.rs | 16 +- .../src/services/supergraph/tests.rs | 2 +- apollo-router/src/spec/mod.rs | 31 ++- apollo-router/src/spec/query.rs | 57 +---- apollo-router/src/spec/query/tests.rs | 76 ++++--- apollo-router/src/spec/schema.rs | 40 +--- .../testdata/supergraph_config.router.yaml | 1 - apollo-router/tests/integration/validation.rs | 4 +- apollo-router/tests/integration_tests.rs | 13 +- ...ests__defer_path_with_disabled_config.snap | 8 +- docs/source/configuration/overview.mdx | 12 -- 39 files changed, 541 insertions(+), 834 deletions(-) create mode 100644 .changesets/feat_geal_remove_legacy_validation.md diff --git a/.changesets/feat_geal_remove_legacy_validation.md b/.changesets/feat_geal_remove_legacy_validation.md new file mode 100644 index 0000000000..ff52e7ee0a --- /dev/null +++ b/.changesets/feat_geal_remove_legacy_validation.md @@ -0,0 +1,7 @@ +### Remove legacy validation ([PR #4551](https://github.com/apollographql/router/pull/4551)) + +GraphQL query validation was initially performed by the query planner in JavaScript, which caused some performance issues. Here, we are introducing a new Rust-based validation process using `apollo-compiler` from the `apollo-rs` project. This validation is also happening much earlier in the process, inside the "router service" instead of the query planner, which will reduce the load on the query planner and give back some room in the query planner cache. + +This new validation process has been running in production for months concurrently with the JavaScript version, allowing us to detect and fix any discrepancies in the new implementation. We now have enough confidence in the new Rust-based validation to entirely switch off the less performant, JavaScript validation. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4551 \ No newline at end of file diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs index aafad48e8e..c3ac8eb2cc 100644 --- a/apollo-router/src/axum_factory/tests.rs +++ b/apollo-router/src/axum_factory/tests.rs @@ -460,7 +460,7 @@ async fn it_compress_response_body() -> Result<(), ApolloRouterError> { let response = client .post(url.as_str()) .header(ACCEPT_ENCODING, HeaderValue::from_static("gzip")) - .body(json!({ "query": "query" }).to_string()) + .body(json!({ "query": "query { me { name } }" }).to_string()) .send() .await .unwrap() @@ -484,7 +484,7 @@ async fn it_compress_response_body() -> Result<(), ApolloRouterError> { let response = client .get(url.as_str()) .header(ACCEPT_ENCODING, HeaderValue::from_static("gzip")) - .query(&json!({ "query": "query" })) + .query(&json!({ "query": "query { me { name } }" })) .send() .await .unwrap() @@ -515,7 +515,7 @@ async fn it_compress_response_body() -> Result<(), ApolloRouterError> { #[tokio::test] async fn it_decompress_request_body() -> Result<(), ApolloRouterError> { - let original_body = json!({ "query": "query" }); + let original_body = json!({ "query": "query { me { name } }" }); let mut encoder = GzipEncoder::new(Vec::new()); encoder .write_all(original_body.to_string().as_bytes()) @@ -529,7 +529,10 @@ async fn it_decompress_request_body() -> Result<(), ApolloRouterError> { let example_response = expected_response.clone(); let router_service = router::service::from_supergraph_mock_callback(move |req| { let example_response = example_response.clone(); - assert_eq!(req.supergraph_request.into_body().query.unwrap(), "query"); + assert_eq!( + req.supergraph_request.into_body().query.unwrap(), + "query { me { name } }" + ); Ok(SupergraphResponse::new_from_graphql_response( example_response, req.context, @@ -615,7 +618,7 @@ async fn response() -> Result<(), ApolloRouterError> { // Post query let response = client .post(url.as_str()) - .body(json!({ "query": "query" }).to_string()) + .body(json!({ "query": "query { me { name } }" }).to_string()) .send() .await .unwrap() @@ -630,7 +633,7 @@ async fn response() -> Result<(), ApolloRouterError> { // Get query let response = client .get(url.as_str()) - .query(&json!({ "query": "query" })) + .query(&json!({ "query": "query { me { name } }" })) .send() .await .unwrap() @@ -659,7 +662,7 @@ async fn bad_response() -> Result<(), ApolloRouterError> { // Post query let err = client .post(url.as_str()) - .body(json!({ "query": "query" }).to_string()) + .body(json!({ "query": "query { me { name } }" }).to_string()) .send() .await .unwrap() @@ -672,7 +675,7 @@ async fn bad_response() -> Result<(), ApolloRouterError> { // Get query let err = client .get(url.as_str()) - .query(&json!({ "query": "query" })) + .query(&json!({ "query": "query { me { name } }" })) .send() .await .unwrap() @@ -720,7 +723,7 @@ async fn response_with_root_wildcard() -> Result<(), ApolloRouterError> { // Post query let response = client .post(url.as_str()) - .body(json!({ "query": "query" }).to_string()) + .body(json!({ "query": "query { me { name } }" }).to_string()) .send() .await .unwrap() @@ -741,7 +744,7 @@ async fn response_with_root_wildcard() -> Result<(), ApolloRouterError> { .unwrap() .to_string(), ) - .body(json!({ "query": "query" }).to_string()) + .body(json!({ "query": "query { me { name } }" }).to_string()) .send() .await .unwrap() @@ -756,7 +759,7 @@ async fn response_with_root_wildcard() -> Result<(), ApolloRouterError> { // Get query let response = client .get(url.as_str()) - .query(&json!({ "query": "query" })) + .query(&json!({ "query": "query { me { name } }" })) .send() .await .unwrap() @@ -806,7 +809,7 @@ async fn response_with_custom_endpoint() -> Result<(), ApolloRouterError> { // Post query let response = client .post(url.as_str()) - .body(json!({ "query": "query" }).to_string()) + .body(json!({ "query": "query { me { name } }" }).to_string()) .send() .await .unwrap() @@ -821,7 +824,7 @@ async fn response_with_custom_endpoint() -> Result<(), ApolloRouterError> { // Get query let response = client .get(url.as_str()) - .query(&json!({ "query": "query" })) + .query(&json!({ "query": "query { me { name } }" })) .send() .await .unwrap() @@ -870,7 +873,7 @@ async fn response_with_custom_prefix_endpoint() -> Result<(), ApolloRouterError> // Post query let response = client .post(url.as_str()) - .body(json!({ "query": "query" }).to_string()) + .body(json!({ "query": "query { me { name } }" }).to_string()) .send() .await .unwrap() @@ -885,7 +888,7 @@ async fn response_with_custom_prefix_endpoint() -> Result<(), ApolloRouterError> // Get query let response = client .get(url.as_str()) - .query(&json!({ "query": "query" })) + .query(&json!({ "query": "query { me { name } }" })) .send() .await .unwrap() @@ -940,7 +943,7 @@ async fn response_with_custom_endpoint_wildcard() -> Result<(), ApolloRouterErro // Post query let response = client .post(url.as_str()) - .body(json!({ "query": "query" }).to_string()) + .body(json!({ "query": "query { me { name } }" }).to_string()) .send() .await .unwrap() @@ -955,7 +958,7 @@ async fn response_with_custom_endpoint_wildcard() -> Result<(), ApolloRouterErro // Get query let response = client .get(url.as_str()) - .query(&json!({ "query": "query" })) + .query(&json!({ "query": "query { me { name } }" })) .send() .await .unwrap() @@ -998,7 +1001,7 @@ async fn response_failure() -> Result<(), ApolloRouterError> { .body( json!( { - "query": "query", + "query": "query { me { name } }", }) .to_string(), ) @@ -1603,7 +1606,7 @@ async fn response_shape() -> Result<(), ApolloRouterError> { let (server, client) = init(router_service).await; let query = json!( { - "query": "query { test }", + "query": "query { me { name } }", }); let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); let response = client @@ -1639,16 +1642,16 @@ async fn deferred_response_shape() -> Result<(), ApolloRouterError> { let body = stream::iter(vec![ graphql::Response::builder() .data(json!({ - "test": "hello", + "me": "id", })) .has_next(true) .build(), graphql::Response::builder() .incremental(vec![graphql::IncrementalResponse::builder() .data(json!({ - "other": "world" + "name": "Ada" })) - .path(Path::default()) + .path(Path::from("me")) .build()]) .has_next(true) .build(), @@ -1664,7 +1667,7 @@ async fn deferred_response_shape() -> Result<(), ApolloRouterError> { let (server, client) = init(router_service).await; let query = json!( { - "query": "query { test ... @defer { other } }", + "query": "query { me { id ... @defer { name } } }", }); let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); let mut response = client @@ -1684,13 +1687,13 @@ async fn deferred_response_shape() -> Result<(), ApolloRouterError> { let first = response.chunk().await.unwrap().unwrap(); assert_eq!( std::str::from_utf8(&first).unwrap(), - "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{\"data\":{\"test\":\"hello\"},\"hasNext\":true}\r\n--graphql" + "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{\"data\":{\"me\":\"id\"},\"hasNext\":true}\r\n--graphql" ); let second = response.chunk().await.unwrap().unwrap(); assert_eq!( std::str::from_utf8(&second).unwrap(), - "\r\ncontent-type: application/json\r\n\r\n{\"hasNext\":true,\"incremental\":[{\"data\":{\"other\":\"world\"},\"path\":[]}]}\r\n--graphql" + "\r\ncontent-type: application/json\r\n\r\n{\"hasNext\":true,\"incremental\":[{\"data\":{\"name\":\"Ada\"},\"path\":[\"me\"]}]}\r\n--graphql" ); let third = response.chunk().await.unwrap().unwrap(); @@ -1707,7 +1710,7 @@ async fn multipart_response_shape_with_one_chunk() -> Result<(), ApolloRouterErr let router_service = router::service::from_supergraph_mock_callback(move |req| { let body = stream::iter(vec![graphql::Response::builder() .data(json!({ - "test": "hello", + "me": "name", })) .has_next(false) .build()]) @@ -1722,7 +1725,7 @@ async fn multipart_response_shape_with_one_chunk() -> Result<(), ApolloRouterErr let (server, client) = init(router_service).await; let query = json!( { - "query": "query { test }", + "query": "query { me { name } }", }); let url = format!("{}/", server.graphql_listen_address().as_ref().unwrap()); let mut response = client @@ -1742,7 +1745,7 @@ async fn multipart_response_shape_with_one_chunk() -> Result<(), ApolloRouterErr let first = response.chunk().await.unwrap().unwrap(); assert_eq!( std::str::from_utf8(&first).unwrap(), - "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{\"data\":{\"test\":\"hello\"},\"hasNext\":false}\r\n--graphql--\r\n" + "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{\"data\":{\"me\":\"name\"},\"hasNext\":false}\r\n--graphql--\r\n" ); server.shutdown().await @@ -2067,7 +2070,7 @@ async fn listening_to_unix_socket() { let output = send_to_unix_socket( server.graphql_listen_address().as_ref().unwrap(), Method::POST, - r#"{"query":"query"}"#, + r#"{"query":"query { me { name } }"}"#, ) .await; @@ -2080,7 +2083,7 @@ async fn listening_to_unix_socket() { let output = send_to_unix_socket( server.graphql_listen_address().as_ref().unwrap(), Method::GET, - r#"query=query"#, + r#"query=query%7Bme%7Bname%7D%7D"#, ) .await; diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index e963fbdb41..6e49389d22 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -164,10 +164,6 @@ pub struct Configuration { #[serde(default)] pub(crate) experimental_chaos: Chaos, - /// Set the GraphQL validation implementation to use. - #[serde(default)] - pub(crate) experimental_graphql_validation_mode: GraphQLValidationMode, - /// Set the API schema generation implementation to use. #[serde(default)] pub(crate) experimental_api_schema_generation_mode: ApiSchemaMode, @@ -199,21 +195,6 @@ impl PartialEq for Configuration { } } -/// GraphQL validation modes. -#[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)] -#[derivative(Debug)] -#[serde(rename_all = "lowercase")] -pub(crate) enum GraphQLValidationMode { - /// Use the new Rust-based implementation. - New, - /// Use the old JavaScript-based implementation. - Legacy, - /// Use Rust-based and Javascript-based implementations side by side, logging warnings if the - /// implementations disagree. - #[default] - Both, -} - /// API schema generation modes. #[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)] #[derivative(Debug)] @@ -254,7 +235,6 @@ impl<'de> serde::Deserialize<'de> for Configuration { uplink: UplinkConfig, limits: Limits, experimental_chaos: Chaos, - experimental_graphql_validation_mode: GraphQLValidationMode, experimental_batching: Batching, } let ad_hoc: AdHocConfiguration = serde::Deserialize::deserialize(deserializer)?; @@ -273,7 +253,6 @@ impl<'de> serde::Deserialize<'de> for Configuration { .operation_limits(ad_hoc.limits) .chaos(ad_hoc.experimental_chaos) .uplink(ad_hoc.uplink) - .graphql_validation_mode(ad_hoc.experimental_graphql_validation_mode) .experimental_batching(ad_hoc.experimental_batching) .build() .map_err(|e| serde::de::Error::custom(e.to_string())) @@ -310,7 +289,6 @@ impl Configuration { operation_limits: Option, chaos: Option, uplink: Option, - graphql_validation_mode: Option, experimental_api_schema_generation_mode: Option, experimental_batching: Option, ) -> Result { @@ -338,7 +316,6 @@ impl Configuration { persisted_queries: persisted_query.unwrap_or_default(), limits: operation_limits.unwrap_or_default(), experimental_chaos: chaos.unwrap_or_default(), - experimental_graphql_validation_mode: graphql_validation_mode.unwrap_or_default(), experimental_api_schema_generation_mode: experimental_api_schema_generation_mode.unwrap_or_default(), plugins: UserPlugins { plugins: Some(plugins), @@ -386,7 +363,6 @@ impl Configuration { operation_limits: Option, chaos: Option, uplink: Option, - graphql_validation_mode: Option, experimental_batching: Option, experimental_api_schema_generation_mode: Option, ) -> Result { @@ -399,7 +375,6 @@ impl Configuration { cors: cors.unwrap_or_default(), limits: operation_limits.unwrap_or_default(), experimental_chaos: chaos.unwrap_or_default(), - experimental_graphql_validation_mode: graphql_validation_mode.unwrap_or_default(), experimental_api_schema_generation_mode: experimental_api_schema_generation_mode .unwrap_or_default(), plugins: UserPlugins { diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 81184e7452..21648e4a98 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1422,33 +1422,6 @@ expression: "&schema" }, "additionalProperties": false }, - "experimental_graphql_validation_mode": { - "description": "Set the GraphQL validation implementation to use.", - "default": "both", - "oneOf": [ - { - "description": "Use the new Rust-based implementation.", - "type": "string", - "enum": [ - "new" - ] - }, - { - "description": "Use the old JavaScript-based implementation.", - "type": "string", - "enum": [ - "legacy" - ] - }, - { - "description": "Use Rust-based and Javascript-based implementations side by side, logging warnings if the implementations disagree.", - "type": "string", - "enum": [ - "both" - ] - } - ] - }, "forbid_mutations": { "description": "Forbid mutations configuration", "type": "boolean" diff --git a/apollo-router/src/configuration/tests.rs b/apollo-router/src/configuration/tests.rs index e8e985dec1..f619a5018c 100644 --- a/apollo-router/src/configuration/tests.rs +++ b/apollo-router/src/configuration/tests.rs @@ -55,7 +55,7 @@ fn routing_url_in_schema() { REVIEWS @join__graph(name: "reviews" url: "http://localhost:4004/graphql") } "#; - let schema = crate::spec::Schema::parse(schema, &Default::default()).unwrap(); + let schema = crate::spec::Schema::parse(schema).unwrap(); let subgraphs: HashMap<&String, &Uri> = schema.subgraphs().collect(); @@ -107,7 +107,7 @@ fn missing_subgraph_url() { PRODUCTS @join__graph(name: "products" url: "http://localhost:4003/graphql") REVIEWS @join__graph(name: "reviews" url: "") }"#; - let schema_error = crate::spec::Schema::parse(schema_error, &Default::default()) + let schema_error = crate::spec::Schema::parse(schema_error) .expect_err("Must have an error because we have one missing subgraph routing url"); if let SchemaError::MissingSubgraphUrl(subgraph) = schema_error { diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs index 1c8a19ff3c..224ffc69d2 100644 --- a/apollo-router/src/context/mod.rs +++ b/apollo-router/src/context/mod.rs @@ -7,6 +7,7 @@ use std::sync::Arc; use std::time::Duration; use std::time::Instant; +use apollo_compiler::validation::Valid; use apollo_compiler::ExecutableDocument; use dashmap::mapref::multiple::RefMulti; use dashmap::mapref::multiple::RefMutMulti; @@ -253,7 +254,7 @@ impl Context { /// Read only access to the executable document. This is UNSTABLE and may be changed or removed in future router releases. /// In addition, ExecutableDocument is UNSTABLE, and may be changed or removed in future apollo-rs releases. #[doc(hidden)] - pub fn unsupported_executable_document(&self) -> Option> { + pub fn unsupported_executable_document(&self) -> Option>> { self.extensions() .lock() .get::() @@ -334,8 +335,9 @@ impl Default for BusyTimer { #[cfg(test)] mod test { - use std::sync::Arc; - + use crate::spec::Query; + use crate::spec::Schema; + use crate::Configuration; use crate::Context; #[test] @@ -413,16 +415,32 @@ mod test { #[test] fn test_executable_document_access() { let c = Context::new(); + let schema = r#" + schema + @core(feature: "https://specs.apollo.dev/core/v0.1"), + @core(feature: "https://specs.apollo.dev/join/v0.1") + { + query: Query + } + type Query { + me: String + } + directive @core(feature: String!) repeatable on SCHEMA + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + enum join__Graph { + ACCOUNTS @join__graph(name:"accounts" url: "http://localhost:4001/graphql") + INVENTORY + @join__graph(name: "inventory", url: "http://localhost:4004/graphql") + PRODUCTS + @join__graph(name: "products" url: "http://localhost:4003/graphql") + REVIEWS @join__graph(name: "reviews" url: "http://localhost:4002/graphql") + }"#; + let schema = Schema::parse_test(schema, &Default::default()).unwrap(); + let document = + Query::parse_document("{ me }", None, &schema, &Configuration::default()).unwrap(); assert!(c.unsupported_executable_document().is_none()); - c.extensions().lock().insert(Arc::new( - crate::services::layers::query_analysis::ParsedDocumentInner { - ast: Default::default(), - executable: Default::default(), - hash: Default::default(), - parse_errors: Default::default(), - validation_errors: Default::default(), - }, - )); + c.extensions().lock().insert(document); assert!(c.unsupported_executable_document().is_some()); } } diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index 4e26aa0f7d..fc95798a0d 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -1,6 +1,8 @@ //! Router errors. use std::sync::Arc; +use apollo_compiler::validation::DiagnosticList; +use apollo_compiler::validation::WithErrors; use apollo_federation::error::FederationError; use displaydoc::Display; use lazy_static::__Deref; @@ -260,8 +262,8 @@ pub(crate) enum QueryPlannerError { /// couldn't instantiate query planner; invalid schema: {0} SchemaValidationErrors(PlannerErrors), - /// invalid query - OperationValidationErrors(Vec), + /// invalid query: {0} + OperationValidationErrors(ValidationErrors), /// couldn't plan query: {0} PlanningErrors(PlanErrors), @@ -323,21 +325,9 @@ impl IntoGraphQLErrors for Vec { impl IntoGraphQLErrors for QueryPlannerError { fn into_graphql_errors(self) -> Result, Self> { match self { - QueryPlannerError::SpecError(err) => { - let gql_err = match err.custom_extension_details() { - Some(extension_details) => Error::builder() - .message(err.to_string()) - .extension_code(err.extension_code()) - .extensions(extension_details) - .build(), - None => Error::builder() - .message(err.to_string()) - .extension_code(err.extension_code()) - .build(), - }; - - Ok(vec![gql_err]) - } + QueryPlannerError::SpecError(err) => err + .into_graphql_errors() + .map_err(QueryPlannerError::SpecError), QueryPlannerError::SchemaValidationErrors(errs) => errs .into_graphql_errors() .map_err(QueryPlannerError::SchemaValidationErrors), @@ -469,9 +459,7 @@ impl From for QueryPlannerError { impl From for QueryPlannerError { fn from(err: ValidationErrors) -> Self { - QueryPlannerError::OperationValidationErrors( - err.errors.iter().map(|e| e.to_json()).collect(), - ) + QueryPlannerError::OperationValidationErrors(ValidationErrors { errors: err.errors }) } } @@ -571,13 +559,7 @@ impl std::fmt::Display for ParseErrors { } } -/// Collection of schema validation errors. -#[derive(Debug)] -pub(crate) struct ValidationErrors { - pub(crate) errors: apollo_compiler::validation::DiagnosticList, -} - -impl IntoGraphQLErrors for ValidationErrors { +impl IntoGraphQLErrors for ParseErrors { fn into_graphql_errors(self) -> Result, Self> { Ok(self .errors @@ -596,6 +578,37 @@ impl IntoGraphQLErrors for ValidationErrors { }) .unwrap_or_default(), ) + .extension_code("GRAPHQL_PARSING_FAILED") + .build() + }) + .collect()) + } +} + +/// Collection of schema validation errors. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) struct ValidationErrors { + pub(crate) errors: Vec, +} + +impl IntoGraphQLErrors for ValidationErrors { + fn into_graphql_errors(self) -> Result, Self> { + Ok(self + .errors + .iter() + .map(|diagnostic| { + Error::builder() + .message(diagnostic.message.to_string()) + .locations( + diagnostic + .locations + .iter() + .map(|loc| ErrorLocation { + line: loc.line as u32, + column: loc.column as u32, + }) + .collect(), + ) .extension_code("GRAPHQL_VALIDATION_FAILED") .build() }) @@ -603,16 +616,34 @@ impl IntoGraphQLErrors for ValidationErrors { } } +impl From for ValidationErrors { + fn from(errors: DiagnosticList) -> Self { + Self { + errors: errors.iter().map(|e| e.to_json()).collect(), + } + } +} + +impl From> for ValidationErrors { + fn from(WithErrors { errors, .. }: WithErrors) -> Self { + errors.into() + } +} + impl std::fmt::Display for ValidationErrors { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for (index, error) in self.errors.iter().enumerate() { if index > 0 { f.write_str("\n")?; } - if let Some(location) = error.get_line_column() { - write!(f, "[{}:{}] {}", location.line, location.column, error.error)?; + if let Some(location) = error.locations.first() { + write!( + f, + "[{}:{}] {}", + location.line, location.column, error.message + )?; } else { - write!(f, "{}", error.error)?; + write!(f, "{}", error.message)?; } } Ok(()) diff --git a/apollo-router/src/orbiter/mod.rs b/apollo-router/src/orbiter/mod.rs index 9a788099f7..975e914691 100644 --- a/apollo-router/src/orbiter/mod.rs +++ b/apollo-router/src/orbiter/mod.rs @@ -383,7 +383,7 @@ mod test { let config = Configuration::from_str(include_str!("testdata/redaction.router.yaml")) .expect("config must be valid"); let schema_string = include_str!("../testdata/minimal_supergraph.graphql"); - let schema = crate::spec::Schema::parse(schema_string, &config).unwrap(); + let schema = crate::spec::Schema::parse(schema_string).unwrap(); let report = create_report(Arc::new(config), Arc::new(schema)); insta::with_settings!({sort_maps => true}, { assert_yaml_snapshot!(report, { @@ -401,7 +401,7 @@ mod test { .expect("config must be valid"); config.validated_yaml = Some(Value::Null); let schema_string = include_str!("../testdata/minimal_supergraph.graphql"); - let schema = crate::spec::Schema::parse(schema_string, &config).unwrap(); + let schema = crate::spec::Schema::parse(schema_string).unwrap(); let report = create_report(Arc::new(config), Arc::new(schema)); insta::with_settings!({sort_maps => true}, { assert_yaml_snapshot!(report, { @@ -419,7 +419,7 @@ mod test { .expect("config must be valid"); config.validated_yaml = Some(json!({"garbage": "garbage"})); let schema_string = include_str!("../testdata/minimal_supergraph.graphql"); - let schema = crate::spec::Schema::parse(schema_string, &config).unwrap(); + let schema = crate::spec::Schema::parse(schema_string).unwrap(); let report = create_report(Arc::new(config), Arc::new(schema)); insta::with_settings!({sort_maps => true}, { assert_yaml_snapshot!(report, { diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs index 85445a652c..67c235936b 100644 --- a/apollo-router/src/plugins/authorization/mod.rs +++ b/apollo-router/src/plugins/authorization/mod.rs @@ -39,10 +39,10 @@ use crate::query_planner::FilteredQuery; use crate::query_planner::QueryKey; use crate::register_plugin; use crate::services::execution; +use crate::services::layers::query_analysis::ParsedDocumentInner; use crate::services::supergraph; use crate::spec::query::transform; use crate::spec::query::traverse; -use crate::spec::Query; use crate::spec::Schema; use crate::spec::SpecError; use crate::Configuration; @@ -175,14 +175,11 @@ impl AuthorizationPlugin { } pub(crate) fn query_analysis( - query: &str, + doc: &ParsedDocumentInner, operation_name: Option<&str>, schema: &Schema, - configuration: &Configuration, context: &Context, - ) -> Result<(), SpecError> { - let doc = Query::parse_document(query, operation_name, schema, configuration)?; - + ) { let CacheKeyMetadata { is_authenticated, scopes, @@ -206,8 +203,6 @@ impl AuthorizationPlugin { policies.into_iter().map(|policy| (policy, None)).collect(); context.insert(REQUIRED_POLICIES_KEY, policies).unwrap(); } - - Ok(()) } pub(crate) fn generate_cache_metadata( diff --git a/apollo-router/src/plugins/demand_control/basic_cost_calculator.rs b/apollo-router/src/plugins/demand_control/basic_cost_calculator.rs index 78315277d3..1614ec6550 100644 --- a/apollo-router/src/plugins/demand_control/basic_cost_calculator.rs +++ b/apollo-router/src/plugins/demand_control/basic_cost_calculator.rs @@ -50,7 +50,7 @@ impl BasicCostCalculator { /// bound for cost anyway. fn score_field( field: &Field, - parent_type_name: Option<&NamedType>, + parent_type_name: &NamedType, schema: &Valid, ) -> Result { if BasicCostCalculator::skipped_by_directives(field) { @@ -77,7 +77,7 @@ impl BasicCostCalculator { }; type_cost += BasicCostCalculator::score_selection_set( &field.selection_set, - Some(field.ty().inner_named_type()), + field.ty().inner_named_type(), schema, )?; @@ -112,7 +112,7 @@ impl BasicCostCalculator { fn score_inline_fragment( inline_fragment: &InlineFragment, - parent_type: Option<&NamedType>, + parent_type: &NamedType, schema: &Valid, ) -> Result { BasicCostCalculator::score_selection_set( @@ -127,9 +127,17 @@ impl BasicCostCalculator { schema: &Valid, ) -> Result { let mut cost = if operation.is_mutation() { 10.0 } else { 0.0 }; + + let Some(root_type_name) = schema.root_operation(operation.operation_type) else { + return Err(DemandControlError::QueryParseFailure(format!( + "Cannot cost {} operation because the schema does not support this root type", + operation.operation_type + ))); + }; + cost += BasicCostCalculator::score_selection_set( &operation.selection_set, - operation.name.as_ref(), + root_type_name, schema, )?; @@ -138,21 +146,23 @@ impl BasicCostCalculator { fn score_selection( selection: &Selection, - parent_type: Option<&NamedType>, + parent_type: &NamedType, schema: &Valid, ) -> Result { match selection { Selection::Field(f) => BasicCostCalculator::score_field(f, parent_type, schema), Selection::FragmentSpread(s) => BasicCostCalculator::score_fragment_spread(s), - Selection::InlineFragment(i) => { - BasicCostCalculator::score_inline_fragment(i, parent_type, schema) - } + Selection::InlineFragment(i) => BasicCostCalculator::score_inline_fragment( + i, + i.type_condition.as_ref().unwrap_or(parent_type), + schema, + ), } } fn score_selection_set( selection_set: &SelectionSet, - parent_type_name: Option<&NamedType>, + parent_type_name: &NamedType, schema: &Valid, ) -> Result { let mut cost = 0.0; @@ -331,21 +341,44 @@ mod tests { use crate::Configuration; use crate::Context; + fn parse_schema_and_operation( + schema_str: &str, + query_str: &str, + config: &Configuration, + ) -> (spec::Schema, ParsedDocument) { + let schema = spec::Schema::parse_test(schema_str, config).unwrap(); + let query = Query::parse_document(query_str, None, &schema, config).unwrap(); + (schema, query) + } + + /// Estimate cost of an operation executed on a supergraph. fn estimated_cost(schema_str: &str, query_str: &str) -> f64 { - let schema = Schema::parse_and_validate(schema_str, "").unwrap(); - let query = ExecutableDocument::parse(&schema, query_str, "").unwrap(); + let (schema, query) = + parse_schema_and_operation(schema_str, query_str, &Default::default()); + BasicCostCalculator::estimated(&query.executable, &schema.definitions).unwrap() + } + + /// Estimate cost of an operation on a plain, non-federated schema. + fn basic_estimated_cost(schema_str: &str, query_str: &str) -> f64 { + let schema = + apollo_compiler::Schema::parse_and_validate(schema_str, "schema.graphqls").unwrap(); + let query = apollo_compiler::ExecutableDocument::parse_and_validate( + &schema, + query_str, + "query.graphql", + ) + .unwrap(); BasicCostCalculator::estimated(&query, &schema).unwrap() } async fn planned_cost(schema_str: &str, query_str: &str) -> f64 { let config: Arc = Arc::new(Default::default()); + let (_schema, query) = parse_schema_and_operation(schema_str, query_str, &config); + let mut planner = BridgeQueryPlanner::new(schema_str.to_string(), config.clone()) .await .unwrap(); - let schema = spec::Schema::parse(schema_str, &config).unwrap(); - let query = Query::parse_document(query_str, None, &schema, &config).unwrap(); - let ctx = Context::new(); ctx.extensions().lock().insert::(query); @@ -366,10 +399,10 @@ mod tests { } fn actual_cost(schema_str: &str, query_str: &str, response_bytes: &'static [u8]) -> f64 { - let schema = Schema::parse_and_validate(schema_str, "").unwrap(); - let query = ExecutableDocument::parse(&schema, query_str, "").unwrap(); + let (_schema, query) = + parse_schema_and_operation(schema_str, query_str, &Default::default()); let response = Response::from_bytes("test", Bytes::from(response_bytes)).unwrap(); - BasicCostCalculator::actual(&query, &response).unwrap() + BasicCostCalculator::actual(&query.executable, &response).unwrap() } #[test] @@ -377,7 +410,7 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_query.graphql"); - assert_eq!(estimated_cost(schema, query), 0.0) + assert_eq!(basic_estimated_cost(schema, query), 0.0) } #[test] @@ -385,7 +418,7 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_mutation.graphql"); - assert_eq!(estimated_cost(schema, query), 10.0) + assert_eq!(basic_estimated_cost(schema, query), 10.0) } #[test] @@ -393,7 +426,7 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_object_query.graphql"); - assert_eq!(estimated_cost(schema, query), 1.0) + assert_eq!(basic_estimated_cost(schema, query), 1.0) } #[test] @@ -401,7 +434,7 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_interface_query.graphql"); - assert_eq!(estimated_cost(schema, query), 1.0) + assert_eq!(basic_estimated_cost(schema, query), 1.0) } #[test] @@ -409,7 +442,7 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_union_query.graphql"); - assert_eq!(estimated_cost(schema, query), 1.0) + assert_eq!(basic_estimated_cost(schema, query), 1.0) } #[test] @@ -417,7 +450,7 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_object_list_query.graphql"); - assert_eq!(estimated_cost(schema, query), 100.0) + assert_eq!(basic_estimated_cost(schema, query), 100.0) } #[test] @@ -425,7 +458,7 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_scalar_list_query.graphql"); - assert_eq!(estimated_cost(schema, query), 0.0) + assert_eq!(basic_estimated_cost(schema, query), 0.0) } #[test] @@ -433,7 +466,7 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_nested_list_query.graphql"); - assert_eq!(estimated_cost(schema, query), 10100.0) + assert_eq!(basic_estimated_cost(schema, query), 10100.0) } #[test] @@ -441,7 +474,7 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_skipped_query.graphql"); - assert_eq!(estimated_cost(schema, query), 0.0) + assert_eq!(basic_estimated_cost(schema, query), 0.0) } #[test] @@ -449,7 +482,7 @@ mod tests { let schema = include_str!("./fixtures/basic_schema.graphql"); let query = include_str!("./fixtures/basic_excluded_query.graphql"); - assert_eq!(estimated_cost(schema, query), 0.0) + assert_eq!(basic_estimated_cost(schema, query), 0.0) } #[test(tokio::test)] diff --git a/apollo-router/src/plugins/demand_control/directives.rs b/apollo-router/src/plugins/demand_control/directives.rs index 2fb9a83c99..73528107e9 100644 --- a/apollo-router/src/plugins/demand_control/directives.rs +++ b/apollo-router/src/plugins/demand_control/directives.rs @@ -32,29 +32,39 @@ pub(super) struct RequiresDirective { impl RequiresDirective { pub(super) fn from_field( field: &Field, - parent_type_name: Option<&NamedType>, + parent_type_name: &NamedType, schema: &Valid, ) -> Result, DemandControlError> { // When a user marks a subgraph schema field with `@requires`, the composition process // replaces `@requires(field: "")` with `@join__field(requires: "")`. - let requires_arg = field - .definition + // + // Note we cannot use `field.definition` in this case: The operation executes against the + // API schema, so its definition pointers point into the API schema. To find the + // `@join__field()` directive, we must instead look up the field on the type with the same + // name in the supergraph. + let definition = schema + .type_field(parent_type_name, &field.name) + .map_err(|_err| { + DemandControlError::QueryParseFailure(format!( + "Could not find the API schema type {}.{} in the supergraph. This looks like a bug", + parent_type_name, &field.name + )) + })?; + let requires_arg = definition .directives .get("join__field") .and_then(|requires| requires.argument_by_name("requires")) .and_then(|arg| arg.as_str()); - match (requires_arg, parent_type_name) { - (Some(arg), Some(type_name)) => { - let field_set = Parser::new() - .parse_field_set(schema, type_name.clone(), arg, "")?; + if let Some(arg) = requires_arg { + let field_set = + Parser::new().parse_field_set(schema, parent_type_name.clone(), arg, "")?; - Ok(Some(RequiresDirective { - fields: field_set.selection_set.clone(), - })) - } - (Some(_), None) => Err(DemandControlError::QueryParseFailure("Parent type name is required to parse fields argument of @requires but none was provided. This is likely because @requires was placed on an anonymous query.".to_string())), - (None, _) => Ok(None) + Ok(Some(RequiresDirective { + fields: field_set.selection_set.clone(), + })) + } else { + Ok(None) } } } diff --git a/apollo-router/src/plugins/demand_control/fixtures/basic_interface_query.graphql b/apollo-router/src/plugins/demand_control/fixtures/basic_interface_query.graphql index 30ed2d7d21..19b1b2962a 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/basic_interface_query.graphql +++ b/apollo-router/src/plugins/demand_control/fixtures/basic_interface_query.graphql @@ -1,3 +1,5 @@ { - interfaceInstance1 -} \ No newline at end of file + interfaceInstance1 { + field2 + } +} diff --git a/apollo-router/src/plugins/demand_control/fixtures/basic_object_list_query.graphql b/apollo-router/src/plugins/demand_control/fixtures/basic_object_list_query.graphql index 758c1b4d78..bd31a82e29 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/basic_object_list_query.graphql +++ b/apollo-router/src/plugins/demand_control/fixtures/basic_object_list_query.graphql @@ -1,3 +1,5 @@ { - someObjects -} \ No newline at end of file + someObjects { + field1 + } +} diff --git a/apollo-router/src/plugins/demand_control/fixtures/basic_object_query.graphql b/apollo-router/src/plugins/demand_control/fixtures/basic_object_query.graphql index 3fac69ac63..c8a165d6d8 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/basic_object_query.graphql +++ b/apollo-router/src/plugins/demand_control/fixtures/basic_object_query.graphql @@ -1,3 +1,5 @@ { - object1 -} \ No newline at end of file + object1 { + field1 + } +} diff --git a/apollo-router/src/plugins/progressive_override/tests.rs b/apollo-router/src/plugins/progressive_override/tests.rs index 303157ca7a..b2d795483f 100644 --- a/apollo-router/src/plugins/progressive_override/tests.rs +++ b/apollo-router/src/plugins/progressive_override/tests.rs @@ -1,7 +1,5 @@ use std::sync::Arc; -use apollo_compiler::ast::Document; -use apollo_compiler::Schema; use tower::ServiceExt; use crate::metrics::FutureMetricsExt; @@ -14,7 +12,6 @@ use crate::plugins::progressive_override::ProgressiveOverridePlugin; use crate::plugins::progressive_override::LABELS_TO_OVERRIDE_KEY; use crate::plugins::progressive_override::UNRESOLVED_LABELS_KEY; use crate::services::layers::query_analysis::ParsedDocument; -use crate::services::layers::query_analysis::ParsedDocumentInner; use crate::services::router; use crate::services::supergraph; use crate::services::RouterResponse; @@ -136,16 +133,14 @@ async fn assert_expected_and_absent_labels_for_supergraph_service( .unwrap() .supergraph_service(mock_service.boxed()); - // plugin depends on the parsed document being in the context so we'll add - // it ourselves for testing purposes - let schema = Schema::parse_and_validate(SCHEMA, "").unwrap(); - let document = Document::parse(query, "query.graphql").unwrap(); - let executable = document.to_executable(&schema).unwrap(); - let parsed_doc: ParsedDocument = Arc::from(ParsedDocumentInner { - ast: document, - executable: Arc::new(executable), - ..Default::default() - }); + let schema = crate::spec::Schema::parse_test( + include_str!("./testdata/supergraph.graphql"), + &Default::default(), + ) + .unwrap(); + let parsed_doc = + crate::spec::Query::parse_document(query, None, &schema, &crate::Configuration::default()) + .unwrap(); let context = Context::new(); context @@ -211,14 +206,14 @@ async fn plugin_supergraph_service_trims_0pc_label() { } async fn get_json_query_plan(query: &str) -> serde_json::Value { - let schema = Schema::parse_and_validate(SCHEMA, "").unwrap(); - let document = Document::parse(query, "query.graphql").unwrap(); - let executable = document.to_executable(&schema).unwrap(); - let parsed_doc: ParsedDocument = Arc::from(ParsedDocumentInner { - ast: document, - executable: Arc::new(executable), - ..Default::default() - }); + let schema = crate::spec::Schema::parse_test( + include_str!("./testdata/supergraph.graphql"), + &Default::default(), + ) + .unwrap(); + let parsed_doc = + crate::spec::Query::parse_document(query, None, &schema, &crate::Configuration::default()) + .unwrap(); let context: Context = Context::new(); context @@ -286,16 +281,14 @@ async fn query_with_labels(query: &str, labels_from_coprocessors: Vec<&str>) { .unwrap() .supergraph_service(mock_service.boxed()); - // plugin depends on the parsed document being in the context so we'll add - // it ourselves for testing purposes - let schema = Schema::parse_and_validate(SCHEMA, "").unwrap(); - let document = Document::parse(query, "query.graphql").unwrap(); - let executable = document.to_executable(&schema).unwrap(); - let parsed_doc: ParsedDocument = Arc::from(ParsedDocumentInner { - ast: document, - executable: Arc::new(executable), - ..Default::default() - }); + let schema = crate::spec::Schema::parse_test( + include_str!("./testdata/supergraph.graphql"), + &Default::default(), + ) + .unwrap(); + let parsed_doc = + crate::spec::Query::parse_document(query, None, &schema, &crate::Configuration::default()) + .unwrap(); let context = Context::new(); context diff --git a/apollo-router/src/plugins/record_replay/record.rs b/apollo-router/src/plugins/record_replay/record.rs index 1ceb5f85cc..134a431524 100644 --- a/apollo-router/src/plugins/record_replay/record.rs +++ b/apollo-router/src/plugins/record_replay/record.rs @@ -66,10 +66,7 @@ impl Plugin for Record { enabled: init.config.enabled, supergraph_sdl: init.supergraph_sdl.clone(), storage_path: storage_path.clone().into(), - schema: Arc::new(Schema::parse( - init.supergraph_sdl.clone().as_str(), - &Configuration::default(), - )?), + schema: Arc::new(Schema::parse(init.supergraph_sdl.clone().as_str())?), }; if init.config.enabled { diff --git a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap index edc8cf10c7..a4ce331e08 100644 --- a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap +++ b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_multiple_operations.snap @@ -2,43 +2,4 @@ source: apollo-router/src/plugins/telemetry/metrics/apollo.rs expression: results --- -[ - { - "request_id": "[REDACTED]", - "stats": { - "## GraphQLValidationFailure\n": { - "stats_with_context": { - "context": { - "client_name": "test_client", - "client_version": "1.0-test", - "operation_type": "query", - "operation_subtype": "" - }, - "query_latency_stats": { - "latency": { - "secs": 0, - "nanos": 100000000 - }, - "cache_hit": false, - "persisted_query_hit": null, - "cache_latency": null, - "root_error_stats": { - "children": {}, - "errors_count": 0, - "requests_with_errors_count": 0 - }, - "has_errors": true, - "public_cache_ttl_latency": null, - "private_cache_ttl_latency": null, - "registered_operation": false, - "forbidden_operation": false, - "without_field_instrumentation": false - }, - "per_type_stat": {} - }, - "referenced_fields_by_type": {} - } - }, - "licensed_operation_count_by_type": null - } -] +[] diff --git a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap index c4a2ff94ef..a4ce331e08 100644 --- a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap +++ b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_parse_failure.snap @@ -2,43 +2,4 @@ source: apollo-router/src/plugins/telemetry/metrics/apollo.rs expression: results --- -[ - { - "request_id": "[REDACTED]", - "stats": { - "## GraphQLParseFailure\n": { - "stats_with_context": { - "context": { - "client_name": "test_client", - "client_version": "1.0-test", - "operation_type": "query", - "operation_subtype": "" - }, - "query_latency_stats": { - "latency": { - "secs": 0, - "nanos": 100000000 - }, - "cache_hit": false, - "persisted_query_hit": null, - "cache_latency": null, - "root_error_stats": { - "children": {}, - "errors_count": 0, - "requests_with_errors_count": 0 - }, - "has_errors": true, - "public_cache_ttl_latency": null, - "private_cache_ttl_latency": null, - "registered_operation": false, - "forbidden_operation": false, - "without_field_instrumentation": false - }, - "per_type_stat": {} - }, - "referenced_fields_by_type": {} - } - }, - "licensed_operation_count_by_type": null - } -] +[] diff --git a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap index edc8cf10c7..a4ce331e08 100644 --- a/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap +++ b/apollo-router/src/plugins/telemetry/metrics/snapshots/apollo_router__plugins__telemetry__metrics__apollo__test__apollo_metrics_validation_failure.snap @@ -2,43 +2,4 @@ source: apollo-router/src/plugins/telemetry/metrics/apollo.rs expression: results --- -[ - { - "request_id": "[REDACTED]", - "stats": { - "## GraphQLValidationFailure\n": { - "stats_with_context": { - "context": { - "client_name": "test_client", - "client_version": "1.0-test", - "operation_type": "query", - "operation_subtype": "" - }, - "query_latency_stats": { - "latency": { - "secs": 0, - "nanos": 100000000 - }, - "cache_hit": false, - "persisted_query_hit": null, - "cache_latency": null, - "root_error_stats": { - "children": {}, - "errors_count": 0, - "requests_with_errors_count": 0 - }, - "has_errors": true, - "public_cache_ttl_latency": null, - "private_cache_ttl_latency": null, - "registered_operation": false, - "forbidden_operation": false, - "without_field_instrumentation": false - }, - "per_type_stat": {} - }, - "referenced_fields_by_type": {} - } - }, - "licensed_operation_count_by_type": null - } -] +[] diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 97755df1f7..8c4e4da8c7 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -288,6 +288,8 @@ impl Plugin for Telemetry { let span_mode = config.instrumentation.spans.mode; let use_legacy_request_span = matches!(config.instrumentation.spans.mode, SpanMode::Deprecated); + let field_level_instrumentation_ratio = self.field_level_instrumentation_ratio; + let metrics_sender = self.apollo_metrics_sender.clone(); ServiceBuilder::new() .map_response(move |response: router::Response| { @@ -335,18 +337,24 @@ impl Plugin for Telemetry { ); } - let client_name: &str = request + let client_name = request .router_request .headers() .get(&config_request.apollo.client_name_header) - .and_then(|h| h.to_str().ok()) - .unwrap_or(""); + .and_then(|h| h.to_str().ok()); let client_version = request .router_request .headers() .get(&config_request.apollo.client_version_header) - .and_then(|h| h.to_str().ok()) - .unwrap_or(""); + .and_then(|h| h.to_str().ok()); + + if let Some(name) = client_name { + let _ = request.context.insert(CLIENT_NAME, name.to_owned()); + } + + if let Some(version) = client_version { + let _ = request.context.insert(CLIENT_VERSION, version.to_owned()); + } let mut custom_attributes = config_request .instrumentation @@ -356,8 +364,8 @@ impl Plugin for Telemetry { .on_request(request); custom_attributes.extend([ - KeyValue::new(CLIENT_NAME_KEY, client_name.to_string()), - KeyValue::new(CLIENT_VERSION_KEY, client_version.to_string()), + KeyValue::new(CLIENT_NAME_KEY, client_name.unwrap_or("").to_string()), + KeyValue::new(CLIENT_VERSION_KEY, client_version.unwrap_or("").to_string()), KeyValue::new( Key::from_static_str("apollo_private.http.request_headers"), filter_headers( @@ -387,6 +395,7 @@ impl Plugin for Telemetry { fut| { let start = Instant::now(); let config = config_later.clone(); + let sender = metrics_sender.clone(); Self::plugin_metrics(&config); @@ -433,6 +442,29 @@ impl Plugin for Telemetry { } } + if response + .context + .extensions() + .lock() + .get::>() + .map(|u| { + u.stats_report_key == "## GraphQLValidationFailure\n" + || u.stats_report_key == "## GraphQLParseFailure\n" + }) + .unwrap_or(false) + { + Self::update_apollo_metrics( + &response.context, + field_level_instrumentation_ratio, + sender, + true, + start.elapsed(), + // the query is invalid, we did not parse the operation kind + OperationKind::Query, + None, + ); + } + if response.response.status() >= StatusCode::BAD_REQUEST { span.record(OTEL_STATUS_CODE, OTEL_STATUS_CODE_ERROR); } else { @@ -940,27 +972,9 @@ impl Telemetry { field_level_instrumentation_ratio: f64, req: &SupergraphRequest, ) { - let apollo_config = &config.apollo; let context = &req.context; let http_request = &req.supergraph_request; let headers = http_request.headers(); - let client_name_header = &apollo_config.client_name_header; - let client_version_header = &apollo_config.client_version_header; - if let Some(name) = headers - .get(client_name_header) - .and_then(|h| h.to_str().ok()) - .map(|s| s.to_owned()) - { - let _ = context.insert(CLIENT_NAME, name); - } - - if let Some(version) = headers - .get(client_version_header) - .and_then(|h| h.to_str().ok()) - .map(|s| s.to_owned()) - { - let _ = context.insert(CLIENT_VERSION, version); - } let (should_log_headers, should_log_body) = config.exporters.logging.should_log(req); if should_log_headers { diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index bad73ffbe8..f36bea7467 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -25,12 +25,10 @@ use tower::Service; use super::PlanNode; use super::QueryKey; -use crate::configuration::GraphQLValidationMode; use crate::error::PlanErrors; use crate::error::QueryPlannerError; use crate::error::SchemaError; use crate::error::ServiceBuildError; -use crate::error::ValidationErrors; use crate::graphql; use crate::introspection::Introspection; use crate::json_ext::Object; @@ -53,13 +51,6 @@ use crate::spec::Schema; use crate::spec::SpecError; use crate::Configuration; -// For reporting validation results with `experimental_graphql_validation_mode: both`. -const VALIDATION_SOURCE_SCHEMA: &str = "schema"; -const VALIDATION_SOURCE_OPERATION: &str = "operation"; -const VALIDATION_FALSE_NEGATIVE: &str = "false_negative"; -const VALIDATION_FALSE_POSITIVE: &str = "false_positive"; -const VALIDATION_MATCH: &str = "match"; - #[derive(Clone)] /// A query planner that calls out to the nodejs router-bridge query planner. /// @@ -95,7 +86,7 @@ impl BridgeQueryPlanner { sdl: String, configuration: Arc, ) -> Result { - let schema = Schema::parse(&sdl, &configuration)?; + let schema = Schema::parse(&sdl)?; let planner = Planner::new( sdl, @@ -105,10 +96,7 @@ impl BridgeQueryPlanner { incremental_delivery: Some(IncrementalDeliverySupport { enable_defer: Some(configuration.supergraph.defer_support), }), - graphql_validation: matches!( - configuration.experimental_graphql_validation_mode, - GraphQLValidationMode::Legacy | GraphQLValidationMode::Both - ), + graphql_validation: false, debug: Some(QueryPlannerDebugConfig { bypass_planner_for_single_subgraph: None, max_evaluated_plans: configuration @@ -123,46 +111,7 @@ impl BridgeQueryPlanner { }), }, ) - .await; - - let planner = match planner { - Ok(planner) => planner, - Err(err) => { - if configuration.experimental_graphql_validation_mode == GraphQLValidationMode::Both - { - let has_validation_errors = err.iter().any(|err| err.is_validation_error()); - - if has_validation_errors && !schema.has_errors() { - tracing::warn!( - monotonic_counter.apollo.router.operations.validation = 1u64, - validation.source = VALIDATION_SOURCE_SCHEMA, - validation.result = VALIDATION_FALSE_NEGATIVE, - "validation mismatch: JS query planner reported a schema validation error, but apollo-rs did not" - ); - } - } - - return Err(err.into()); - } - }; - - if configuration.experimental_graphql_validation_mode == GraphQLValidationMode::Both { - if schema.has_errors() { - tracing::warn!( - monotonic_counter.apollo.router.operations.validation = 1u64, - validation.source = VALIDATION_SOURCE_SCHEMA, - validation.result = VALIDATION_FALSE_POSITIVE, - "validation mismatch: apollo-rs reported a schema validation error, but JS query planner did not" - ); - } else { - // false_negative was an early return so we know it was correct here - tracing::info!( - monotonic_counter.apollo.router.operations.validation = 1u64, - validation.source = VALIDATION_SOURCE_SCHEMA, - validation.result = VALIDATION_MATCH - ); - } - } + .await?; let planner = Arc::new(planner); @@ -238,7 +187,7 @@ impl BridgeQueryPlanner { api_schema? } }; - let api_schema = Schema::parse(&api_schema_string, &configuration)?; + let api_schema = Schema::parse(&api_schema_string)?; let schema = Arc::new(schema.with_api_schema(api_schema)); @@ -246,7 +195,7 @@ impl BridgeQueryPlanner { HashMap::new(); for (name, schema_str) in planner.subgraphs().await? { let schema = apollo_compiler::Schema::parse_and_validate(schema_str, "") - .map_err(|e| SchemaError::Validate(ValidationErrors { errors: e.errors }))?; + .map_err(|errors| SchemaError::Validate(errors.into()))?; subgraph_schemas.insert(name, Arc::new(schema)); } let subgraph_schemas = Arc::new(subgraph_schemas); @@ -284,10 +233,7 @@ impl BridgeQueryPlanner { incremental_delivery: Some(IncrementalDeliverySupport { enable_defer: Some(configuration.supergraph.defer_support), }), - graphql_validation: matches!( - configuration.experimental_graphql_validation_mode, - GraphQLValidationMode::Legacy | GraphQLValidationMode::Both - ), + graphql_validation: false, reuse_query_fragments: configuration.supergraph.reuse_query_fragments, generate_query_fragments: Some( configuration.supergraph.generate_query_fragments, @@ -310,14 +256,14 @@ impl BridgeQueryPlanner { ); let api_schema = planner.api_schema().await?; - let api_schema = Schema::parse(&api_schema.schema, &configuration)?; - let schema = Arc::new(Schema::parse(&schema, &configuration)?.with_api_schema(api_schema)); + let api_schema = Schema::parse(&api_schema.schema)?; + let schema = Arc::new(Schema::parse(&schema)?.with_api_schema(api_schema)); let mut subgraph_schemas: HashMap>> = HashMap::new(); for (name, schema_str) in planner.subgraphs().await? { let schema = apollo_compiler::Schema::parse_and_validate(schema_str, "") - .map_err(|e| SchemaError::Validate(ValidationErrors { errors: e.errors }))?; + .map_err(|errors| SchemaError::Validate(errors.into()))?; subgraph_schemas.insert(name, Arc::new(schema)); } let subgraph_schemas = Arc::new(subgraph_schemas); @@ -362,7 +308,6 @@ impl BridgeQueryPlanner { operation_name: Option<&str>, doc: &ParsedDocument, ) -> Result { - Query::check_errors(doc)?; let executable = &doc.executable; crate::spec::operation_limits::check( &self.configuration, @@ -370,14 +315,6 @@ impl BridgeQueryPlanner { executable, operation_name, )?; - let validation_error = match self.configuration.experimental_graphql_validation_mode { - GraphQLValidationMode::Legacy => None, - GraphQLValidationMode::New => { - Query::validate_query(doc)?; - None - } - GraphQLValidationMode::Both => Query::validate_query(doc).err(), - }; let (fragments, operations, defer_stats, schema_aware_hash) = Query::extract_query_information(&self.schema, executable, operation_name)?; @@ -400,7 +337,6 @@ impl BridgeQueryPlanner { subselections, defer_stats, is_original: true, - validation_error, schema_aware_hash, }) } @@ -430,61 +366,6 @@ impl BridgeQueryPlanner { selections: Query, plan_options: PlanOptions, ) -> Result { - fn is_validation_error(errors: &PlanErrors) -> bool { - errors.errors.iter().all(|err| err.validation_error) - } - - /// Compare errors from graphql-js and apollo-rs validation, and produce metrics on - /// whether they had the same result. - /// - /// The result isn't inspected deeply: it only checks validation success/failure. - fn compare_validation_errors( - js_validation_error: Option<&PlanErrors>, - rs_validation_error: Option<&crate::error::ValidationErrors>, - ) { - match ( - js_validation_error.map_or(false, is_validation_error), - rs_validation_error, - ) { - (false, Some(validation_error)) => { - let error_code = validation_error - .errors - .iter() - .next() - .and_then(|err| err.error.unstable_error_name()); - tracing::warn!( - monotonic_counter.apollo.router.operations.validation = 1u64, - validation.source = VALIDATION_SOURCE_OPERATION, - validation.result = VALIDATION_FALSE_POSITIVE, - validation.code = error_code, - "validation mismatch: JS query planner did not report query validation error, but apollo-rs did" - ); - tracing::warn!( - "validation mismatch: Rust validation reported: {validation_error}" - ); - } - (true, None) => { - tracing::warn!( - monotonic_counter.apollo.router.operations.validation = 1u64, - validation.source = VALIDATION_SOURCE_OPERATION, - validation.result = VALIDATION_FALSE_NEGATIVE, - "validation mismatch: apollo-rs did not report query validation error, but JS query planner did" - ); - tracing::warn!( - "validation mismatch: JS validation reported: {}", - // Unwrapping is safe because `is_validation_error` is true - js_validation_error.unwrap(), - ); - } - // if JS and Rust implementations agree, we return the JS result for now. - _ => tracing::info!( - monotonic_counter.apollo.router.operations.validation = 1u64, - validation.source = VALIDATION_SOURCE_OPERATION, - validation.result = VALIDATION_MATCH, - ), - } - } - let planner_result = match self .planner .plan(filtered_query.clone(), operation.clone(), plan_options) @@ -501,26 +382,10 @@ impl BridgeQueryPlanner { } Err(err) => { let plan_errors: PlanErrors = err.into(); - if matches!( - self.configuration.experimental_graphql_validation_mode, - GraphQLValidationMode::Both - ) { - compare_validation_errors( - Some(&plan_errors), - selections.validation_error.as_ref(), - ); - } return Err(QueryPlannerError::from(plan_errors)); } }; - if matches!( - self.configuration.experimental_graphql_validation_mode, - GraphQLValidationMode::Both - ) { - compare_validation_errors(None, selections.validation_error.as_ref()); - } - // the `statsReportKey` field should match the original query instead of the filtered query, to index them all under the same query let operation_signature = if original_query != filtered_query { Some( @@ -618,9 +483,8 @@ impl Service for BridgeQueryPlanner { } Ok(modified_query) => { let executable_document = modified_query - .to_executable(schema) - // Assume transformation creates a valid document: ignore conversion errors - .unwrap_or_else(|invalid| invalid.partial); + .to_executable_validate(schema) + .map_err(|e| SpecError::ValidationError(e.into()))?; let hash = QueryHashVisitor::hash_query( schema, &executable_document, @@ -631,11 +495,6 @@ impl Service for BridgeQueryPlanner { executable: Arc::new(executable_document), ast: modified_query, hash: Arc::new(QueryHash(hash)), - // Carry errors from previous ParsedDocument - // and assume transformation doesnโ€™t introduce new errors. - // TODO: check the latter? - parse_errors: doc.parse_errors.clone(), - validation_errors: doc.validation_errors.clone(), }); context .extensions() @@ -743,9 +602,8 @@ impl BridgeQueryPlanner { if let Some((unauthorized_paths, new_doc)) = filter_res { key.filtered_query = new_doc.to_string(); let executable_document = new_doc - .to_executable(&self.schema.api_schema().definitions) - // Assume transformation creates a valid document: ignore conversion errors - .unwrap_or_else(|invalid| invalid.partial); + .to_executable_validate(&self.schema.api_schema().definitions) + .map_err(|e| SpecError::ValidationError(e.into()))?; let hash = QueryHashVisitor::hash_query( &self.schema.definitions, &executable_document, @@ -756,11 +614,6 @@ impl BridgeQueryPlanner { executable: Arc::new(executable_document), ast: new_doc, hash: Arc::new(QueryHash(hash)), - // Carry errors from previous ParsedDocument - // and assume transformation doesnโ€™t introduce new errors. - // TODO: check the latter? - parse_errors: doc.parse_errors.clone(), - validation_errors: doc.validation_errors.clone(), }); selections.unauthorized.paths = unauthorized_paths; } @@ -1055,28 +908,6 @@ mod tests { } } - #[test(tokio::test)] - async fn test_plan_invalid_query() { - let err = plan( - EXAMPLE_SCHEMA, - "fragment UnusedTestFragment on User { id } query { me { id } }", - "fragment UnusedTestFragment on User { id } query { me { id } }", - None, - PlanOptions::default(), - ) - .await - .unwrap_err(); - - match err { - QueryPlannerError::PlanningErrors(errors) => { - insta::assert_debug_snapshot!("plan_invalid_query_errors", errors); - } - e => { - panic!("invalid query planning should have failed: {e:?}"); - } - } - } - #[test] fn empty_query_plan() { serde_json::from_value::(json!({ "plan": { "kind": "QueryPlan"} } )).expect( @@ -1124,7 +955,7 @@ mod tests { #[test(tokio::test)] async fn empty_query_plan_should_be_a_planner_error() { - let schema = Schema::parse(EXAMPLE_SCHEMA, &Default::default()).unwrap(); + let schema = Schema::parse(EXAMPLE_SCHEMA).unwrap(); let query = include_str!("testdata/unknown_introspection_query.graphql"); let planner = BridgeQueryPlanner::new(EXAMPLE_SCHEMA.to_string(), Default::default()) @@ -1505,7 +1336,6 @@ mod tests { ) -> Result { let mut configuration: Configuration = Default::default(); configuration.supergraph.introspection = true; - configuration.experimental_graphql_validation_mode = GraphQLValidationMode::Both; let configuration = Arc::new(configuration); let planner = BridgeQueryPlanner::new(schema.to_string(), configuration.clone()) diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index 74d72d9f0d..ed2cb0d396 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -40,7 +40,6 @@ use crate::services::query_planner; use crate::services::QueryPlannerContent; use crate::services::QueryPlannerRequest; use crate::services::QueryPlannerResponse; -use crate::spec::Query; use crate::spec::Schema; use crate::spec::SpecError; use crate::Configuration; @@ -228,14 +227,16 @@ where let entry = self.cache.get(&caching_key).await; if entry.is_first() { - let err_res = Query::check_errors(&doc); - if let Err(error) = err_res { - let e = Arc::new(QueryPlannerError::SpecError(error)); - tokio::spawn(async move { - entry.insert(Err(e)).await; - }); - continue; - } + let doc = match query_analysis.parse_document(&query, operation.as_deref()) { + Ok(doc) => doc, + Err(error) => { + let e = Arc::new(QueryPlannerError::SpecError(error)); + tokio::spawn(async move { + entry.insert(Err(e)).await; + }); + continue; + } + }; let schema = &self.schema.api_schema().definitions; if let Ok(modified_query) = add_defer_labels(schema, &doc.ast) { @@ -414,25 +415,6 @@ where // of restarting the query planner until another timeout tokio::task::spawn( async move { - let err_res = Query::check_errors(&doc); - - if let Err(error) = err_res { - request - .context - .extensions() - .lock() - .insert(Arc::new(UsageReporting { - stats_report_key: error.get_error_key().to_string(), - referenced_fields_by_type: HashMap::new(), - })); - let e = Arc::new(QueryPlannerError::SpecError(error)); - let err = e.clone(); - tokio::spawn(async move { - entry.insert(Err(err)).await; - }); - return Err(CacheResolverError::RetrievalError(e)); - } - let res = self.delegate.ready().await?.call(request).await; match res { @@ -451,7 +433,7 @@ where context .extensions() .lock() - .insert(plan.usage_reporting.clone()); + .insert::>(plan.usage_reporting.clone()); } Ok(QueryPlannerResponse { content, @@ -489,7 +471,7 @@ where context .extensions() .lock() - .insert(plan.usage_reporting.clone()); + .insert::>(plan.usage_reporting.clone()); } Ok(QueryPlannerResponse::builder() @@ -504,14 +486,16 @@ where .context .extensions() .lock() - .insert(pe.usage_reporting.clone()); + .insert::>(Arc::new( + pe.usage_reporting.clone(), + )); } QueryPlannerError::SpecError(e) => { request .context .extensions() .lock() - .insert(Arc::new(UsageReporting { + .insert::>(Arc::new(UsageReporting { stats_report_key: e.get_error_key().to_string(), referenced_fields_by_type: HashMap::new(), })); @@ -641,9 +625,7 @@ mod tests { }); let configuration = Arc::new(crate::Configuration::default()); - let schema = Arc::new( - Schema::parse(include_str!("testdata/schema.graphql"), &configuration).unwrap(), - ); + let schema = Arc::new(Schema::parse(include_str!("testdata/schema.graphql")).unwrap()); let mut planner = CachingQueryPlanner::new(delegate, schema, &configuration, IndexMap::new()) @@ -652,8 +634,7 @@ mod tests { let configuration = Configuration::default(); - let schema = - Schema::parse(include_str!("testdata/schema.graphql"), &configuration).unwrap(); + let schema = Schema::parse(include_str!("testdata/schema.graphql")).unwrap(); let doc1 = Query::parse_document( "query Me { me { username } }", @@ -733,8 +714,7 @@ mod tests { let configuration = Configuration::default(); - let schema = - Schema::parse(include_str!("testdata/schema.graphql"), &configuration).unwrap(); + let schema = Schema::parse(include_str!("testdata/schema.graphql")).unwrap(); let doc = Query::parse_document( "query Me { me { username } }", diff --git a/apollo-router/src/query_planner/testdata/defer_clause.graphql b/apollo-router/src/query_planner/testdata/defer_clause.graphql index dac7093d22..6f50928fe9 100644 --- a/apollo-router/src/query_planner/testdata/defer_clause.graphql +++ b/apollo-router/src/query_planner/testdata/defer_clause.graphql @@ -1,6 +1,6 @@ schema @link(url: "https://specs.apollo.dev/link/v1.0") - @link(url: "https://specs.apollo.dev/join/v0.2", for: EXECUTION) { + @link(url: "https://specs.apollo.dev/join/v0.1", for: EXECUTION) { query: Query } diff --git a/apollo-router/src/query_planner/tests.rs b/apollo-router/src/query_planner/tests.rs index 21ff529b05..c67a3b55d3 100644 --- a/apollo-router/src/query_planner/tests.rs +++ b/apollo-router/src/query_planner/tests.rs @@ -23,6 +23,7 @@ use crate::plugin; use crate::plugin::test::MockSubgraph; use crate::query_planner; use crate::query_planner::fetch::FetchNode; +use crate::query_planner::BridgeQueryPlanner; use crate::request; use crate::services::subgraph_service::MakeSubgraphService; use crate::services::supergraph; @@ -420,7 +421,12 @@ async fn defer_if_condition() { }"#; let schema = include_str!("testdata/defer_clause.graphql"); - let schema = Arc::new(Schema::parse_test(schema, &Default::default()).unwrap()); + // we need to use the planner here instead of Schema::parse_test because that one uses the router bridge's api_schema function + // does not keep the defer directive definition + let planner = BridgeQueryPlanner::new(schema.to_string(), Arc::new(Configuration::default())) + .await + .unwrap(); + let schema = planner.schema(); let root: PlanNode = serde_json::from_str(include_str!("testdata/defer_clause_plan.json")).unwrap(); diff --git a/apollo-router/src/router/mod.rs b/apollo-router/src/router/mod.rs index 8424a89aa1..86fbc922c3 100644 --- a/apollo-router/src/router/mod.rs +++ b/apollo-router/src/router/mod.rs @@ -492,11 +492,11 @@ mod tests { let response = router_handle.request(request).await.unwrap(); assert_eq!( - "parsing error: no field `name` in type `User`", response.errors[0].message, + "type `User` does not have a field `name`", response.errors[0].message, "{response:?}" ); assert_eq!( - "PARSING_ERROR", + "GRAPHQL_VALIDATION_FAILED", response.errors[0].extensions.get("code").unwrap() ); @@ -554,11 +554,11 @@ mod tests { let response = router_handle.request(request).await.unwrap(); assert_eq!( - "parsing error: no field `name` in type `User`", + "type `User` does not have a field `name`", response.errors[0].message ); assert_eq!( - "PARSING_ERROR", + "GRAPHQL_VALIDATION_FAILED", response.errors[0].extensions.get("code").unwrap() ); router_handle.shutdown().await.unwrap(); diff --git a/apollo-router/src/services/layers/allow_only_http_post_mutations.rs b/apollo-router/src/services/layers/allow_only_http_post_mutations.rs index 22cb282121..c44f20ad81 100644 --- a/apollo-router/src/services/layers/allow_only_http_post_mutations.rs +++ b/apollo-router/src/services/layers/allow_only_http_post_mutations.rs @@ -284,7 +284,6 @@ mod forbid_http_get_mutations_tests { let ast = ast::Document::parse(query, "").unwrap(); let (_schema, executable) = ast.to_mixed_validate().unwrap(); - let executable = executable.into_inner(); let context = Context::new(); context @@ -294,8 +293,6 @@ mod forbid_http_get_mutations_tests { ast, executable: Arc::new(executable), hash: Default::default(), - parse_errors: None, - validation_errors: None, })); SupergraphRequest::fake_builder() diff --git a/apollo-router/src/services/layers/persisted_queries/mod.rs b/apollo-router/src/services/layers/persisted_queries/mod.rs index d87ab6a23b..16f8d7b04c 100644 --- a/apollo-router/src/services/layers/persisted_queries/mod.rs +++ b/apollo-router/src/services/layers/persisted_queries/mod.rs @@ -196,7 +196,6 @@ impl PersistedQueryLayer { // __type/__schema/__typename.) We do want to make sure the document // parsed properly before poking around at it, though. if self.introspection_enabled - && doc.parse_errors.is_none() && doc .executable .all_operations() @@ -205,12 +204,7 @@ impl PersistedQueryLayer { return Ok(request); } - let ast_result = if doc.parse_errors.is_none() { - Ok(&doc.ast) - } else { - Err(operation_body.as_str()) - }; - match manifest_poller.action_for_freeform_graphql(ast_result) { + match manifest_poller.action_for_freeform_graphql(Ok(&doc.ast)) { FreeformGraphQLAction::Allow => { tracing::info!(monotonic_counter.apollo.router.operations.persisted_queries = 1u64,); Ok(request) @@ -705,7 +699,7 @@ mod tests { let schema = Arc::new( Schema::parse_test( include_str!("../../../testdata/supergraph.graphql"), - &config, + &Default::default(), ) .unwrap(), ); @@ -742,12 +736,6 @@ mod tests { "fragment A on Query { me { id } } query SomeOp { ...A ...B } fragment,,, B on Query{me{username,name} } # yeah" ).await; - // Documents with invalid syntax don't match... - denied_by_safelist(&pq_layer, &query_analysis_layer, "}}}}").await; - - // ... unless they precisely match a safelisted document that also has invalid syntax. - allowed_by_safelist(&pq_layer, &query_analysis_layer, "}}}").await; - // Introspection queries are allowed (even using fragments and aliases), because // introspection is enabled. allowed_by_safelist( diff --git a/apollo-router/src/services/layers/query_analysis.rs b/apollo-router/src/services/layers/query_analysis.rs index 07630df06b..06794d9f69 100644 --- a/apollo-router/src/services/layers/query_analysis.rs +++ b/apollo-router/src/services/layers/query_analysis.rs @@ -1,19 +1,22 @@ +use std::collections::HashMap; use std::fmt::Display; use std::fmt::Formatter; use std::hash::Hash; use std::sync::Arc; use apollo_compiler::ast; -use apollo_compiler::validation::DiagnosticList; +use apollo_compiler::validation::Valid; use apollo_compiler::ExecutableDocument; use http::StatusCode; use lru::LruCache; +use router_bridge::planner::UsageReporting; use tokio::sync::Mutex; use crate::context::OPERATION_KIND; use crate::context::OPERATION_NAME; use crate::graphql::Error; use crate::graphql::ErrorExtension; +use crate::graphql::IntoGraphQLErrors; use crate::plugins::authorization::AuthorizationPlugin; use crate::query_planner::fetch::QueryHash; use crate::query_planner::OperationKind; @@ -31,7 +34,7 @@ use crate::Context; pub(crate) struct QueryAnalysisLayer { pub(crate) schema: Arc, configuration: Arc, - cache: Arc>>, + cache: Arc>>>, enable_authorization_directives: bool, } @@ -117,93 +120,114 @@ impl QueryAnalysisLayer { }) .cloned(); - let (context, doc) = match entry { + let res = match entry { None => { let span = tracing::info_span!("parse_query", "otel.kind" = "INTERNAL"); - let doc = match span.in_scope(|| self.parse_document(&query, op_name.as_deref())) { - Ok(doc) => doc, - Err(err) => { + match span.in_scope(|| self.parse_document(&query, op_name.as_deref())) { + Err(errors) => { + (*self.cache.lock().await).put( + QueryAnalysisKey { + query, + operation_name: op_name, + }, + Err(errors.clone()), + ); + let errors = match errors.into_graphql_errors() { + Ok(v) => v, + Err(errors) => vec![Error::builder() + .message(errors.to_string()) + .extension_code(errors.extension_code()) + .build()], + }; + return Err(SupergraphResponse::builder() - .errors(vec![Error::builder() - .message(err.to_string()) - .extension_code(err.extension_code()) - .build()]) + .errors(errors) .status_code(StatusCode::BAD_REQUEST) .context(request.context) .build() .expect("response is valid")); } - }; - - let context = Context::new(); - - let operation = doc.executable.get_operation(op_name.as_deref()).ok(); - let operation_name = operation - .as_ref() - .and_then(|operation| operation.name.as_ref().map(|s| s.as_str().to_owned())); - - context.insert(OPERATION_NAME, operation_name).unwrap(); - let operation_kind = operation.map(|op| OperationKind::from(op.operation_type)); - context - .insert(OPERATION_KIND, operation_kind.unwrap_or_default()) - .expect("cannot insert operation kind in the context; this is a bug"); - - if self.enable_authorization_directives { - if let Err(err) = AuthorizationPlugin::query_analysis( - &query, - op_name.as_deref(), - &self.schema, - &self.configuration, - &context, - ) { - return Err(SupergraphResponse::builder() - .errors(vec![Error::builder() - .message(err.to_string()) - .extension_code(err.extension_code()) - .build()]) - .status_code(StatusCode::BAD_REQUEST) - .context(request.context) - .build() - .expect("response is valid")); + Ok(doc) => { + let context = Context::new(); + + let operation = doc.executable.get_operation(op_name.as_deref()).ok(); + let operation_name = operation.as_ref().and_then(|operation| { + operation.name.as_ref().map(|s| s.as_str().to_owned()) + }); + + if self.enable_authorization_directives { + AuthorizationPlugin::query_analysis( + &doc, + operation_name.as_deref(), + &self.schema, + &context, + ); + } + + context + .insert(OPERATION_NAME, operation_name) + .expect("cannot insert operation name into context; this is a bug"); + let operation_kind = + operation.map(|op| OperationKind::from(op.operation_type)); + context + .insert(OPERATION_KIND, operation_kind.unwrap_or_default()) + .expect("cannot insert operation kind in the context; this is a bug"); + + (*self.cache.lock().await).put( + QueryAnalysisKey { + query, + operation_name: op_name, + }, + Ok((context.clone(), doc.clone())), + ); + + Ok((context, doc)) } } - - (*self.cache.lock().await).put( - QueryAnalysisKey { - query, - operation_name: op_name, - }, - (context.clone(), doc.clone()), - ); - - (context, doc) } Some(c) => c, }; - request.context.extend(&context); - request - .context - .extensions() - .lock() - .insert::(doc); - - Ok(SupergraphRequest { - supergraph_request: request.supergraph_request, - context: request.context, - }) + match res { + Ok((context, doc)) => { + request.context.extend(&context); + request + .context + .extensions() + .lock() + .insert::(doc); + Ok(SupergraphRequest { + supergraph_request: request.supergraph_request, + context: request.context, + }) + } + Err(errors) => { + request + .context + .extensions() + .lock() + .insert(Arc::new(UsageReporting { + stats_report_key: errors.get_error_key().to_string(), + referenced_fields_by_type: HashMap::new(), + })); + Err(SupergraphResponse::builder() + .errors(errors.into_graphql_errors().unwrap_or_default()) + .status_code(StatusCode::BAD_REQUEST) + .context(request.context) + .build() + .expect("response is valid")) + } + } } } pub(crate) type ParsedDocument = Arc; -#[derive(Debug, Default)] +#[derive(Debug)] pub(crate) struct ParsedDocumentInner { pub(crate) ast: ast::Document, - pub(crate) executable: Arc, + pub(crate) executable: Arc>, pub(crate) hash: Arc, - pub(crate) parse_errors: Option, - pub(crate) validation_errors: Option, } impl Display for ParsedDocumentInner { diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index 11121b24bc..3019a1f986 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -27,7 +27,6 @@ use tracing_futures::Instrument; use crate::configuration::Batching; use crate::context::OPERATION_NAME; use crate::error::CacheResolverError; -use crate::error::QueryPlannerError; use crate::graphql; use crate::graphql::IntoGraphQLErrors; use crate::graphql::Response; @@ -51,7 +50,6 @@ use crate::services::execution::QueryPlan; use crate::services::layers::allow_only_http_post_mutations::AllowOnlyHttpPostMutationsLayer; use crate::services::layers::content_negotiation; use crate::services::layers::persisted_queries::PersistedQueryLayer; -use crate::services::layers::query_analysis::ParsedDocument; use crate::services::layers::query_analysis::QueryAnalysisLayer; use crate::services::new_service::ServiceFactory; use crate::services::query_planner; @@ -67,7 +65,6 @@ use crate::services::QueryPlannerContent; use crate::services::QueryPlannerResponse; use crate::services::SupergraphRequest; use crate::services::SupergraphResponse; -use crate::spec::Query; use crate::spec::Schema; use crate::Configuration; use crate::Context; @@ -605,18 +602,19 @@ async fn plan_query( // Some tests do populate the document, so we only do it if it's not already there. if !{ let lock = context.extensions().lock(); - lock.contains_key::() + lock.contains_key::() } { - let doc = Query::parse_document( + let doc = crate::spec::Query::parse_document( &query_str, operation_name.as_deref(), &schema, &Configuration::default(), ) - .map_err(QueryPlannerError::SpecError)?; - Query::check_errors(&doc).map_err(crate::error::QueryPlannerError::from)?; - Query::validate_query(&doc).map_err(crate::error::QueryPlannerError::from)?; - context.extensions().lock().insert::(doc); + .map_err(crate::error::QueryPlannerError::from)?; + context + .extensions() + .lock() + .insert::(doc); } planning diff --git a/apollo-router/src/services/supergraph/tests.rs b/apollo-router/src/services/supergraph/tests.rs index dc598d927d..df88bc123e 100644 --- a/apollo-router/src/services/supergraph/tests.rs +++ b/apollo-router/src/services/supergraph/tests.rs @@ -1057,7 +1057,7 @@ async fn subscription_callback_schema_reload() { let new_schema = format!("{SCHEMA} "); // reload schema - let schema = Schema::parse(&new_schema, &configuration).unwrap(); + let schema = Schema::parse_test(&new_schema, &configuration).unwrap(); notify.broadcast_schema(Arc::new(schema)); insta::assert_json_snapshot!(tokio::time::timeout( Duration::from_secs(1), diff --git a/apollo-router/src/spec/mod.rs b/apollo-router/src/spec/mod.rs index add1927b27..1c1a8667e8 100644 --- a/apollo-router/src/spec/mod.rs +++ b/apollo-router/src/spec/mod.rs @@ -20,7 +20,9 @@ use serde::Deserialize; use serde::Serialize; use thiserror::Error; +use crate::error::ValidationErrors; use crate::graphql::ErrorExtension; +use crate::graphql::IntoGraphQLErrors; use crate::json_ext::Object; pub(crate) const LINK_DIRECTIVE_NAME: &str = "link"; @@ -41,8 +43,8 @@ pub(crate) enum SpecError { InvalidField(String, String), /// parsing error: {0} ParsingError(String), - /// validation error - ValidationError(Vec), + /// validation error: {0} + ValidationError(ValidationErrors), /// Unknown operation named "{0}" UnknownOperation(String), /// subscription operation is not supported @@ -98,3 +100,28 @@ impl ErrorExtension for SpecError { (!obj.is_empty()).then_some(obj) } } + +impl IntoGraphQLErrors for SpecError { + fn into_graphql_errors(self) -> Result, Self> { + match self { + SpecError::ValidationError(e) => { + e.into_graphql_errors().map_err(SpecError::ValidationError) + } + _ => { + let gql_err = match self.custom_extension_details() { + Some(extension_details) => crate::graphql::Error::builder() + .message(self.to_string()) + .extension_code(self.extension_code()) + .extensions(extension_details) + .build(), + None => crate::graphql::Error::builder() + .message(self.to_string()) + .extension_code(self.extension_code()) + .build(), + }; + + Ok(vec![gql_err]) + } + } + } +} diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 6ef3333211..4bee87adbd 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -9,7 +9,6 @@ use std::sync::Arc; use apollo_compiler::executable; use apollo_compiler::schema::ExtendedType; -use apollo_compiler::validation::WithErrors; use apollo_compiler::ExecutableDocument; use derivative::Derivative; use indexmap::IndexSet; @@ -23,9 +22,7 @@ use self::change::QueryHashVisitor; use self::subselections::BooleanValues; use self::subselections::SubSelectionKey; use self::subselections::SubSelectionValue; -use crate::configuration::GraphQLValidationMode; use crate::error::FetchError; -use crate::error::ValidationErrors; use crate::graphql::Error; use crate::graphql::Request; use crate::graphql::Response; @@ -72,14 +69,6 @@ pub(crate) struct Query { pub(crate) defer_stats: DeferStats, #[derivative(PartialEq = "ignore", Hash = "ignore")] pub(crate) is_original: bool, - /// Validation errors, used for comparison with the JS implementation. - /// - /// `ValidationErrors` is not serde-serializable. If this comes from cache, - /// the plan ought also to be cached, so we should not need this value anyways. - /// XXX(@goto-bus-stop): Remove when only Rust validation is used - #[derivative(PartialEq = "ignore", Hash = "ignore")] - #[serde(skip)] - pub(crate) validation_error: Option, /// This is a hash that depends on: /// - the query itself @@ -120,7 +109,6 @@ impl Query { conditional_defer_variable_names: IndexSet::new(), }, is_original: true, - validation_error: None, schema_aware_hash: vec![], } } @@ -286,23 +274,17 @@ impl Query { let parser = &mut apollo_compiler::Parser::new() .recursion_limit(configuration.limits.parser_max_recursion) .token_limit(configuration.limits.parser_max_tokens); - let (ast, parse_errors) = match parser.parse_ast(query, "query.graphql") { - Ok(ast) => (ast, None), - Err(WithErrors { partial, errors }) => (partial, Some(errors)), + let ast = match parser.parse_ast(query, "query.graphql") { + Ok(ast) => ast, + Err(errors) => { + return Err(SpecError::ValidationError(errors.into())); + } }; let schema = &schema.api_schema().definitions; - let validate = - configuration.experimental_graphql_validation_mode != GraphQLValidationMode::Legacy; - // Stretch the meaning of "assume valid" to "weโ€™ll check later" - let (executable_document, validation_errors) = if validate { - match ast.to_executable_validate(schema) { - Ok(doc) => (doc.into_inner(), None), - Err(WithErrors { partial, errors }) => (partial, Some(errors)), - } - } else { - match ast.to_executable(schema) { - Ok(doc) => (doc, None), - Err(WithErrors { partial, .. }) => (partial, None), + let executable_document = match ast.to_executable_validate(schema) { + Ok(doc) => doc, + Err(errors) => { + return Err(SpecError::ValidationError(errors.into())); } }; @@ -312,13 +294,10 @@ impl Query { let hash = QueryHashVisitor::hash_query(schema, &executable_document, operation_name) .map_err(|e| SpecError::QueryHashing(e.to_string()))?; - Ok(Arc::new(ParsedDocumentInner { ast, executable: Arc::new(executable_document), hash: Arc::new(QueryHash(hash)), - parse_errors, - validation_errors, })) } @@ -331,7 +310,6 @@ impl Query { let query = query.into(); let doc = Self::parse_document(&query, operation_name, schema, configuration)?; - Self::check_errors(&doc)?; let (fragments, operations, defer_stats, schema_aware_hash) = Self::extract_query_information(schema, &doc.executable, operation_name)?; @@ -344,27 +322,10 @@ impl Query { filtered_query: None, defer_stats, is_original: true, - validation_error: None, schema_aware_hash, }) } - /// Check for parse errors in a query in the compiler. - pub(crate) fn check_errors(document: &ParsedDocument) -> Result<(), SpecError> { - match document.parse_errors.clone() { - Some(errors) => Err(SpecError::ParsingError(errors.to_string())), - None => Ok(()), - } - } - - /// Check for validation errors in a query in the compiler. - pub(crate) fn validate_query(document: &ParsedDocument) -> Result<(), ValidationErrors> { - match document.validation_errors.clone() { - Some(errors) => Err(ValidationErrors { errors }), - None => Ok(()), - } - } - /// Extract serializable data structures from the apollo-compiler HIR. pub(crate) fn extract_query_information( schema: &Schema, diff --git a/apollo-router/src/spec/query/tests.rs b/apollo-router/src/spec/query/tests.rs index 65c6d56c92..4869910d46 100644 --- a/apollo-router/src/spec/query/tests.rs +++ b/apollo-router/src/spec/query/tests.rs @@ -1692,55 +1692,56 @@ fn variable_validation() { // https://spec.graphql.org/June2018/#sec-Input-Objects assert_validation!( - "input Foo{ y: String } type Query { x: String }", - "query($foo:Foo){x}", + "input Foo{ y: String } type Query { x(foo: Foo): String }", + "query($foo:Foo){x(foo: $foo)}", json!({}) ); assert_validation!( - "input Foo{ y: String } type Query { x: String }", - "query($foo:Foo){x}", + "input Foo{ y: String } type Query { x(foo: Foo): String }", + "query($foo:Foo){x(foo: $foo)}", json!({"foo":{}}) ); assert_validation_error!( - "input Foo{ y: String } type Query { x: String }", - "query($foo:Foo){x}", + "input Foo{ y: String } type Query { x(foo: Foo): String }", + "query($foo:Foo){x(foo: $foo)}", json!({"foo":1}) ); assert_validation_error!( - "input Foo{ y: String } type Query { x: String }", - "query($foo:Foo){x}", + "input Foo{ y: String } type Query { x(foo: Foo): String }", + "query($foo:Foo){x(foo: $foo)}", json!({"foo":"str"}) ); assert_validation_error!( - "input Foo{x:Int!} type Query { x: String }", - "query($foo:Foo){x}", + "input Foo{x:Int!} type Query { x(foo: Foo): String }", + "query($foo:Foo){x(foo: $foo)}", json!({"foo":{}}) ); assert_validation!( - "input Foo{x:Int!} type Query { x: String }", - "query($foo:Foo){x}", + "input Foo{x:Int!} type Query { x(foo: Foo): String }", + "query($foo:Foo){x(foo: $foo)}", json!({"foo":{"x":1}}) ); assert_validation!( - "scalar Foo type Query { x: String }", - "query($foo:Foo!){x}", + "scalar Foo type Query { x(foo: Foo): String }", + "query($foo:Foo!){x(foo: $foo)}", json!({"foo":{}}) ); assert_validation!( - "scalar Foo type Query { x: String }", - "query($foo:Foo!){x}", + "scalar Foo type Query { x(foo: Foo): String }", + "query($foo:Foo!){x(foo: $foo)}", json!({"foo":1}) ); assert_validation_error!( - "scalar Foo type Query { x: String }", - "query($foo:Foo!){x}", + "scalar Foo type Query { x(foo: Foo): String }", + "query($foo:Foo!){x(foo: $foo)}", json!({}) ); assert_validation!( - "input Foo{bar:Bar!} input Bar{x:Int!} type Query { x: String }", - "query($foo:Foo){x}", + "input Foo{bar:Bar!} input Bar{x:Int!} type Query { x(foo: Foo): String }", + "query($foo:Foo){x(foo: $foo)}", json!({"foo":{"bar":{"x":1}}}) ); + assert_validation!( "enum Availability{AVAILABLE} type Product{availability:Availability! name:String} type Query{products(availability: Availability!): [Product]!}", "query GetProductsByAvailability($availability: Availability!){products(availability: $availability) {name}}", @@ -4039,7 +4040,7 @@ fn skip() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldSkip: Boolean) { + "query Example($shouldSkip: Boolean!) { get { id name @skip(if: $shouldSkip) @@ -4066,7 +4067,7 @@ fn skip() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldSkip: Boolean) { + "query Example($shouldSkip: Boolean!) { get { id name @skip(if: $shouldSkip) @@ -4095,7 +4096,7 @@ fn skip() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldSkip: Boolean) { + "query Example($shouldSkip: Boolean!) { get { id name @skip(if: $shouldSkip) @@ -4123,7 +4124,7 @@ fn skip() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldSkip: Boolean = true) { + "query Example($shouldSkip: Boolean! = true) { get { id name @skip(if: $shouldSkip) @@ -4151,7 +4152,7 @@ fn skip() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldSkip: Boolean = true) { + "query Example($shouldSkip: Boolean! = true) { get { id name @skip(if: $shouldSkip) @@ -4551,7 +4552,7 @@ fn include() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldInclude: Boolean) { + "query Example($shouldInclude: Boolean!) { get { id name @include(if: $shouldInclude) @@ -4578,7 +4579,7 @@ fn include() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldInclude: Boolean) { + "query Example($shouldInclude: Boolean!) { get { id name @include(if: $shouldInclude) @@ -4607,7 +4608,7 @@ fn include() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldInclude: Boolean = false) { + "query Example($shouldInclude: Boolean! = false) { get { id name @include(if: $shouldInclude) @@ -4632,7 +4633,7 @@ fn include() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldInclude: Boolean = false) { + "query Example($shouldInclude: Boolean! = false) { get { id name @include(if: $shouldInclude) @@ -4660,7 +4661,7 @@ fn include() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldInclude: Boolean) { + "query Example($shouldInclude: Boolean!) { get { name } @@ -4692,7 +4693,7 @@ fn include() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldInclude: Boolean) { + "query Example($shouldInclude: Boolean!) { get { name } @@ -4719,7 +4720,7 @@ fn include() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldInclude: Boolean) { + "query Example($shouldInclude: Boolean!) { get { name } @@ -4749,7 +4750,7 @@ fn include() { FormatTest::builder() .schema(schema) .query( - "query Example($shouldInclude: Boolean) { + "query Example($shouldInclude: Boolean!) { get { name } @@ -5704,8 +5705,7 @@ fn test_error_path_works_across_inline_fragments() { id: ID! myField: String! } -"#, - &Default::default(), +"#, &Default::default() ) .unwrap(); @@ -5769,7 +5769,7 @@ fn test_query_not_named_query() { type TheOneAndOnlyQuery { example: Boolean } "#, - &config, + &Default::default(), ) .unwrap(); let query = Query::parse("{ example }", None, &schema, &config).unwrap(); @@ -5815,7 +5815,7 @@ fn filtered_defer_fragment() { c: String! } "#, - &config, + &Default::default(), ) .unwrap(); let query = r#"{ @@ -5856,7 +5856,6 @@ fn filtered_defer_fragment() { defer_stats, is_original: true, unauthorized: UnauthorizedPaths::default(), - validation_error: None, schema_aware_hash, }; @@ -5884,7 +5883,6 @@ fn filtered_defer_fragment() { defer_stats, is_original: false, unauthorized: UnauthorizedPaths::default(), - validation_error: None, schema_aware_hash, }; diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs index c1fffc5b98..20df362070 100644 --- a/apollo-router/src/spec/schema.rs +++ b/apollo-router/src/spec/schema.rs @@ -7,19 +7,15 @@ use std::time::Instant; use apollo_compiler::ast; use apollo_compiler::schema::Implementers; -use apollo_compiler::validation::DiagnosticList; use apollo_compiler::validation::Valid; -use apollo_compiler::validation::WithErrors; use http::Uri; use semver::Version; use semver::VersionReq; use sha2::Digest; use sha2::Sha256; -use crate::configuration::GraphQLValidationMode; use crate::error::ParseErrors; use crate::error::SchemaError; -use crate::error::ValidationErrors; use crate::query_planner::OperationKind; use crate::Configuration; @@ -28,8 +24,6 @@ use crate::Configuration; pub(crate) struct Schema { pub(crate) raw_sdl: Arc, pub(crate) definitions: Valid, - /// Stored for comparison with the validation errors from query planning. - diagnostics: Option, subgraphs: HashMap, pub(crate) implementers_map: HashMap, api_schema: Option>, @@ -38,7 +32,7 @@ pub(crate) struct Schema { impl Schema { #[cfg(test)] pub(crate) fn parse_test(s: &str, configuration: &Configuration) -> Result { - let schema = Self::parse(s, configuration)?; + let schema = Self::parse(s)?; let api_schema = Self::parse( &schema .create_api_schema(configuration) @@ -49,7 +43,6 @@ impl Schema { "The supergraph schema failed to produce a valid API schema: {err}" )) })?, - configuration, )?; Ok(schema.with_api_schema(api_schema)) } @@ -69,32 +62,16 @@ impl Schema { }) } - pub(crate) fn parse(sdl: &str, configuration: &Configuration) -> Result { + pub(crate) fn parse(sdl: &str) -> Result { let start = Instant::now(); let ast = Self::parse_ast(sdl)?; - let validate = - configuration.experimental_graphql_validation_mode != GraphQLValidationMode::Legacy; - // Stretch the meaning of "assume valid" to "weโ€™ll check later that itโ€™s valid" - let (definitions, diagnostics) = if validate { - match ast.to_schema_validate() { - Ok(schema) => (schema, None), - Err(WithErrors { partial, errors }) => (Valid::assume_valid(partial), Some(errors)), - } - } else { - match ast.to_schema() { - Ok(schema) => (Valid::assume_valid(schema), None), - Err(WithErrors { partial, .. }) => (Valid::assume_valid(partial), None), + let definitions = match ast.to_schema_validate() { + Ok(schema) => schema, + Err(errors) => { + return Err(SchemaError::Validate(errors.into())); } }; - // Only error out if new validation is used: with `Both`, we take the legacy - // validation as authoritative and only use the new result for comparison - if configuration.experimental_graphql_validation_mode == GraphQLValidationMode::New { - if let Some(errors) = diagnostics { - return Err(SchemaError::Validate(ValidationErrors { errors })); - } - } - let mut subgraphs = HashMap::new(); // TODO: error if not found? if let Some(join_enum) = definitions.get_enum("join__Graph") { @@ -141,7 +118,6 @@ impl Schema { Ok(Schema { raw_sdl: Arc::new(sdl.to_owned()), definitions, - diagnostics, subgraphs, implementers_map, api_schema: None, @@ -248,10 +224,6 @@ impl Schema { } } - pub(crate) fn has_errors(&self) -> bool { - self.diagnostics.is_some() - } - /// Return the federation major version based on the @link or @core directives in the schema, /// or None if there are no federation directives. pub(crate) fn federation_version(&self) -> Option { diff --git a/apollo-router/src/testdata/supergraph_config.router.yaml b/apollo-router/src/testdata/supergraph_config.router.yaml index 23d31bdd2f..c0312cea1d 100644 --- a/apollo-router/src/testdata/supergraph_config.router.yaml +++ b/apollo-router/src/testdata/supergraph_config.router.yaml @@ -2,4 +2,3 @@ supergraph: listen: 127.0.0.1:0 health_check: listen: 127.0.0.1:0 -experimental_graphql_validation_mode: both diff --git a/apollo-router/tests/integration/validation.rs b/apollo-router/tests/integration/validation.rs index ed6941965a..82a3270bc1 100644 --- a/apollo-router/tests/integration/validation.rs +++ b/apollo-router/tests/integration/validation.rs @@ -5,9 +5,7 @@ use tower::ServiceExt; async fn test_supergraph_validation_errors_are_passed_on() { create_test_service_factory_from_yaml( include_str!("../../src/testdata/invalid_supergraph.graphql"), - r#" - experimental_graphql_validation_mode: both -"#, + "supergraph:\n introspection: true\n", ) .await; } diff --git a/apollo-router/tests/integration_tests.rs b/apollo-router/tests/integration_tests.rs index 92024ae704..86746338dc 100644 --- a/apollo-router/tests/integration_tests.rs +++ b/apollo-router/tests/integration_tests.rs @@ -115,12 +115,12 @@ async fn api_schema_hides_field() { let message = &actual.errors[0].message; assert!( - message.contains("no field `inStock` in type `Product`"), + message.contains("type `Product` does not have a field `inStock`"), "{message}" ); assert_eq!( actual.errors[0].extensions["code"].as_str(), - Some("PARSING_ERROR"), + Some("GRAPHQL_VALIDATION_FAILED"), ); } @@ -138,8 +138,7 @@ async fn validation_errors_from_rust() { "apollo": { "field_level_instrumentation_sampler": "always_off" } - }, - "experimental_graphql_validation_mode": "new", + } }), ) .await; @@ -827,8 +826,7 @@ async fn defer_path_with_disabled_config() { "apollo.include_subgraph_errors": { "all": true } - }, - "experimental_graphql_validation_mode": "both", + } }); let request = supergraph::Request::fake_builder() .query( @@ -1252,8 +1250,7 @@ async fn query_rust( "apollo": { "field_level_instrumentation_sampler": "always_off" } - }, - "experimental_graphql_validation_mode": "both", + } }), ) .await diff --git a/apollo-router/tests/snapshots/integration_tests__defer_path_with_disabled_config.snap b/apollo-router/tests/snapshots/integration_tests__defer_path_with_disabled_config.snap index 6068eed14f..c1a9d7d8c8 100644 --- a/apollo-router/tests/snapshots/integration_tests__defer_path_with_disabled_config.snap +++ b/apollo-router/tests/snapshots/integration_tests__defer_path_with_disabled_config.snap @@ -5,7 +5,13 @@ expression: stream.next().await.unwrap().unwrap() { "errors": [ { - "message": "Unknown directive \"@defer\".", + "message": "cannot find directive `@defer` in this document", + "locations": [ + { + "line": 4, + "column": 20 + } + ], "extensions": { "code": "GRAPHQL_VALIDATION_FAILED" } diff --git a/docs/source/configuration/overview.mdx b/docs/source/configuration/overview.mdx index 8d9367c8df..7bba43ab2f 100644 --- a/docs/source/configuration/overview.mdx +++ b/docs/source/configuration/overview.mdx @@ -804,18 +804,6 @@ In versions of the Apollo Router prior to 1.17, this limit was defined via the c -### GraphQL Validation Mode - -We are experimenting with a new GraphQL validation implementation written in Rust. The legacy implementation is part of the JavaScript query planner. This is part of a project to remove JavaScript from the Router to improve performance and memory behavior. - -To opt in to the new validation implementation, set: - -```yaml {4,8} title="router.yaml" -experimental_graphql_validation_mode: new -``` - -This is an experimental option while we are still finding edge cases in the new implementation, but it will become the default in the future. - ### Plugins You can customize the Apollo Router's behavior with [plugins](../customizations/overview). Each plugin can have its own section in the configuration file with arbitrary values: From 61a615a7d6c6495ba2366e2dd8c46f69d2ab3cdf Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Thu, 11 Apr 2024 16:02:48 +0100 Subject: [PATCH 08/46] Move PluginTestHarness out of telemetry for use in other plugins (#4932) This is suited for unit testing where you wish to inspect the request and response both at the start of the pipeline and at the destination service. The existing TestHarness is about firing up the entire router stack. Alternatives that we have tried are mocking, which never ends well. This test harness is super simple, and allows you to create a plugin, fire a fake request through, perform assertion on the request during the pipeline execution, return a fake response, perform validation on the returned response. It's only used in telemetry at the moment, but it'll be useful for demand control. --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. Co-authored-by: bryn --- apollo-router/src/logging/mod.rs | 24 ++- apollo-router/src/plugins/mod.rs | 2 + .../src/plugins/telemetry/logging/mod.rs | 94 +-------- apollo-router/src/plugins/test.rs | 185 ++++++++++++++++++ 4 files changed, 204 insertions(+), 101 deletions(-) create mode 100644 apollo-router/src/plugins/test.rs diff --git a/apollo-router/src/logging/mod.rs b/apollo-router/src/logging/mod.rs index 6948bb63d3..5d0b75879d 100644 --- a/apollo-router/src/logging/mod.rs +++ b/apollo-router/src/logging/mod.rs @@ -39,6 +39,9 @@ pub(crate) mod test { use serde_json::Value; use tracing_core::LevelFilter; use tracing_core::Subscriber; + use tracing_subscriber::layer::SubscriberExt; + + use crate::plugins::telemetry::dynamic_attribute::DynAttributeLayer; pub(crate) struct SnapshotSubscriber { buffer: Arc>>, @@ -99,15 +102,18 @@ pub(crate) mod test { assertion, }; - tracing_subscriber::fmt() - .json() - .with_max_level(level) - .without_time() - .with_target(false) - .with_file(false) - .with_line_number(false) - .with_writer(Mutex::new(collector)) - .finish() + tracing_subscriber::registry::Registry::default() + .with(level) + .with(DynAttributeLayer::new()) + .with( + tracing_subscriber::fmt::Layer::default() + .json() + .without_time() + .with_target(false) + .with_file(false) + .with_line_number(false) + .with_writer(Mutex::new(collector)), + ) } } } diff --git a/apollo-router/src/plugins/mod.rs b/apollo-router/src/plugins/mod.rs index 5953aa7101..1b8c60cec5 100644 --- a/apollo-router/src/plugins/mod.rs +++ b/apollo-router/src/plugins/mod.rs @@ -37,4 +37,6 @@ mod record_replay; pub(crate) mod rhai; pub(crate) mod subscription; pub(crate) mod telemetry; +#[cfg(test)] +pub(crate) mod test; pub(crate) mod traffic_shaping; diff --git a/apollo-router/src/plugins/telemetry/logging/mod.rs b/apollo-router/src/plugins/telemetry/logging/mod.rs index 1064f28d35..eb8175cb19 100644 --- a/apollo-router/src/plugins/telemetry/logging/mod.rs +++ b/apollo-router/src/plugins/telemetry/logging/mod.rs @@ -1,18 +1,12 @@ //TODO move telemetry logging functionality to this file #[cfg(test)] mod test { - use std::any::TypeId; - - use tower::BoxError; - use tower::ServiceBuilder; - use tower_service::Service; use tracing_futures::WithSubscriber; use crate::assert_snapshot_subscriber; use crate::graphql; - use crate::plugin::DynPlugin; - use crate::plugin::Plugin; use crate::plugins::telemetry::Telemetry; + use crate::plugins::test::PluginTestHarness; use crate::services::router; use crate::services::subgraph; use crate::services::supergraph; @@ -110,7 +104,7 @@ mod test { #[tokio::test(flavor = "multi_thread")] async fn test_when_header() { let test_harness: PluginTestHarness = PluginTestHarness::builder() - .yaml(include_str!( + .config(include_str!( "testdata/experimental_when_header.router.yaml" )) .build() @@ -143,88 +137,4 @@ mod test { .with_subscriber(assert_snapshot_subscriber!()) .await } - - // Maybe factor this out after making it more usable - // The difference with this and the `TestHarness` is that this has much less of the router being wired up and is useful for testing a single plugin in isolation. - // In particular the `TestHarness` isn't good for testing things with logging. - // For now let's try and increase the coverage of the telemetry plugin using this and see how it goes. - - struct PluginTestHarness { - plugin: Box, - phantom: std::marker::PhantomData, - } - #[buildstructor::buildstructor] - impl PluginTestHarness { - #[builder] - async fn new(yaml: Option<&'static str>) -> Self { - let factory = crate::plugin::plugins() - .find(|factory| factory.type_id == TypeId::of::()) - .expect("plugin not registered"); - let name = &factory.name.replace("apollo.", ""); - let config = yaml - .map(|yaml| serde_yaml::from_str::(yaml).unwrap()) - .map(|mut config| { - config - .as_object_mut() - .expect("invalid yaml") - .remove(name) - .expect("no config for plugin") - }) - .unwrap_or_else(|| serde_json::Value::Object(Default::default())); - - let plugin = factory - .create_instance_without_schema(&config) - .await - .expect("failed to create plugin"); - - Self { - plugin, - phantom: Default::default(), - } - } - - #[allow(dead_code)] - async fn call_router( - &self, - request: router::Request, - response_fn: fn(router::Request) -> router::Response, - ) -> Result { - let service: router::BoxService = router::BoxService::new( - ServiceBuilder::new() - .service_fn(move |req: router::Request| async move { Ok((response_fn)(req)) }), - ); - - self.plugin.router_service(service).call(request).await - } - - async fn call_supergraph( - &self, - request: supergraph::Request, - response_fn: fn(supergraph::Request) -> supergraph::Response, - ) -> Result { - let service: supergraph::BoxService = - supergraph::BoxService::new(ServiceBuilder::new().service_fn( - move |req: supergraph::Request| async move { Ok((response_fn)(req)) }, - )); - - self.plugin.supergraph_service(service).call(request).await - } - - async fn call_subgraph( - &self, - request: subgraph::Request, - response_fn: fn(subgraph::Request) -> subgraph::Response, - ) -> Result { - let name = request.subgraph_name.clone(); - let service: subgraph::BoxService = - subgraph::BoxService::new(ServiceBuilder::new().service_fn( - move |req: subgraph::Request| async move { Ok((response_fn)(req)) }, - )); - - self.plugin - .subgraph_service(&name.expect("subgraph name must be populated"), service) - .call(request) - .await - } - } } diff --git a/apollo-router/src/plugins/test.rs b/apollo-router/src/plugins/test.rs new file mode 100644 index 0000000000..25955b1cb6 --- /dev/null +++ b/apollo-router/src/plugins/test.rs @@ -0,0 +1,185 @@ +use std::any::TypeId; +use std::str::FromStr; +use std::sync::Arc; + +use apollo_compiler::validation::Valid; +use serde_json::Value; +use tower::BoxError; +use tower::ServiceBuilder; +use tower_service::Service; + +use crate::plugin::DynPlugin; +use crate::plugin::Plugin; +use crate::plugin::PluginInit; +use crate::query_planner::BridgeQueryPlanner; +use crate::services::http; +use crate::services::router; +use crate::services::subgraph; +use crate::services::supergraph; +use crate::Configuration; +use crate::Notify; + +/// Test harness for plugins +/// The difference between this and the regular TestHarness is that this is more suited for unit testing. +/// It doesn't create the entire router stack, and is mostly just a convenient way to call a plugin service given an optional config and a schema. +/// +/// Here is a basic example that calls a router service and checks that validates logs are generated for the telemetry plugin. +/// +/// ``` +/// #[tokio::test(flavor = "multi_thread")] +/// async fn test_router_service() { +/// let test_harness: PluginTestHarness = PluginTestHarness::builder().build().await; +/// +/// async { +/// let mut response = test_harness +/// .call_router( +/// router::Request::fake_builder() +/// .body("query { foo }") +/// .build() +/// .expect("expecting valid request"), +/// |_r| { +/// tracing::info!("response"); +/// router::Response::fake_builder() +/// .header("custom-header", "val1") +/// .data(serde_json::json!({"data": "res"})) +/// .build() +/// .expect("expecting valid response") +/// }, +/// ) +/// .await +/// .expect("expecting successful response"); +/// +/// response.next_response().await; +/// } +/// .with_subscriber(assert_snapshot_subscriber!()) +/// .await +/// } +/// ``` +/// +/// You can pass in a configuration and a schema to the test harness. If you pass in a schema, the test harness will create a query planner and use the schema to extract subgraph schemas. +/// +/// +pub(crate) struct PluginTestHarness { + plugin: Box, + phantom: std::marker::PhantomData, +} +#[buildstructor::buildstructor] +impl PluginTestHarness { + #[builder] + pub(crate) async fn new<'a, 'b>(config: Option<&'a str>, schema: Option<&'b str>) -> Self { + let factory = crate::plugin::plugins() + .find(|factory| factory.type_id == TypeId::of::()) + .expect("plugin not registered"); + + let config = Configuration::from_str(config.unwrap_or_default()) + .expect("valid config required for test"); + + let name = &factory.name.replace("apollo.", ""); + let config_for_plugin = config + .validated_yaml + .clone() + .expect("invalid yaml") + .as_object() + .expect("invalid yaml") + .get(name) + .cloned() + .unwrap_or(Value::Object(Default::default())); + + let (supergraph_sdl, parsed_schema, subgraph_schemas) = if let Some(schema) = schema { + let planner = BridgeQueryPlanner::new(schema.to_string(), Arc::new(config)) + .await + .unwrap(); + ( + schema.to_string(), + planner.schema().definitions.clone(), + planner.subgraph_schemas(), + ) + } else { + ( + "".to_string(), + Valid::assume_valid(apollo_compiler::Schema::new()), + Default::default(), + ) + }; + + let plugin_init = PluginInit::builder() + .config(config_for_plugin.clone()) + .supergraph_sdl(Arc::new(supergraph_sdl)) + .supergraph_schema(Arc::new(parsed_schema)) + .subgraph_schemas(subgraph_schemas) + .notify(Notify::default()) + .build(); + + let plugin = factory + .create_instance(plugin_init) + .await + .expect("failed to create plugin"); + + Self { + plugin, + phantom: Default::default(), + } + } + + #[allow(dead_code)] + pub(crate) async fn call_router( + &self, + request: router::Request, + response_fn: fn(router::Request) -> router::Response, + ) -> Result { + let service: router::BoxService = router::BoxService::new( + ServiceBuilder::new() + .service_fn(move |req: router::Request| async move { Ok((response_fn)(req)) }), + ); + + self.plugin.router_service(service).call(request).await + } + + pub(crate) async fn call_supergraph( + &self, + request: supergraph::Request, + response_fn: fn(supergraph::Request) -> supergraph::Response, + ) -> Result { + let service: supergraph::BoxService = supergraph::BoxService::new( + ServiceBuilder::new() + .service_fn(move |req: supergraph::Request| async move { Ok((response_fn)(req)) }), + ); + + self.plugin.supergraph_service(service).call(request).await + } + + #[allow(dead_code)] + pub(crate) async fn call_subgraph( + &self, + request: subgraph::Request, + response_fn: fn(subgraph::Request) -> subgraph::Response, + ) -> Result { + let name = request.subgraph_name.clone(); + let service: subgraph::BoxService = subgraph::BoxService::new( + ServiceBuilder::new() + .service_fn(move |req: subgraph::Request| async move { Ok((response_fn)(req)) }), + ); + + self.plugin + .subgraph_service(&name.expect("subgraph name must be populated"), service) + .call(request) + .await + } + #[allow(dead_code)] + pub(crate) async fn call_http_client( + &self, + subgraph_name: &str, + request: http::HttpRequest, + response_fn: fn(http::HttpRequest) -> http::HttpResponse, + ) -> Result { + let service: http::BoxService = http::BoxService::new( + ServiceBuilder::new() + .service_fn(move |req: http::HttpRequest| async move { Ok((response_fn)(req)) }), + ); + + self.plugin + .http_client_service(subgraph_name, service) + .call(request) + .await + } +} From def5092917dc3ccf1f75c4164011968531619854 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Fri, 12 Apr 2024 10:31:48 +0200 Subject: [PATCH 09/46] fix the rhai version to 1.17 until we fix the test failures in 1.18 (#4945) otherwise the test_updated CI build will fail --- apollo-router/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index a03422dd21..b6fcae8cd3 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -177,7 +177,7 @@ prost = "0.12.3" prost-types = "0.12.3" proteus = "0.5.0" rand = "0.8.5" -rhai = { version = "1.17.1", features = ["sync", "serde", "internals"] } +rhai = { version = "=1.17.1", features = ["sync", "serde", "internals"] } regex = "1.10.3" reqwest = { version = "0.11.24", default-features = false, features = [ "rustls-tls", From 2abe809da12694e8b10e490b6e7ac09657ad5cec Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Fri, 12 Apr 2024 10:42:30 +0200 Subject: [PATCH 10/46] cross compile OSX x86_64 router builds from M1 (#4933) CircleCI OSX x86_64 workers are deprecated and will be removed in June. To get ahead of that, we are now building those versions from the M1 workers. This required a change in router-bridge, to start the deno instance of the query planner without snapshots: https://github.com/apollographql/federation-rs/commit/6c23a0785d874b4787dd5025c0da8c4916cdd50d In our tests, this did not really affect the startup time, this change is safe to use. Co-authored-by: o0Ignition0o --- .circleci/config.yml | 65 ++++++++++++++++++++-------------------- Cargo.lock | 4 +-- apollo-router/Cargo.toml | 2 +- 3 files changed, 36 insertions(+), 35 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b512d02af2..584a165dcc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -43,20 +43,13 @@ executors: environment: CARGO_BUILD_JOBS: 8 RUST_TEST_THREADS: 8 - arm_macos_build: &arm_macos_build_executor + macos_build: &macos_build_executor macos: # See https://circleci.com/docs/xcode-policy along with the support matrix # at https://circleci.com/docs/using-macos#supported-xcode-versions. # We use the major.minor notation to bring in compatible patches. xcode: 14.2 resource_class: macos.m1.medium.gen1 - intel_macos_build: &intel_macos_build_executor - macos: - # See https://circleci.com/docs/xcode-policy along with the support matrix - # at https://circleci.com/docs/using-macos#supported-xcode-versions. - # We use the major.minor notation to bring in compatible patches. - xcode: 14.2 - resource_class: macos.x86.medium.gen2 macos_test: &macos_test_executor macos: # See https://circleci.com/docs/xcode-policy along with the support matrix @@ -167,7 +160,7 @@ commands: - when: condition: or: - - equal: [ *arm_macos_build_executor, << parameters.platform >> ] + - equal: [ *macos_build_executor, << parameters.platform >> ] - equal: [ *macos_test_executor, << parameters.platform >> ] steps: - run: @@ -178,20 +171,7 @@ commands: - run: name: Write arch command: | - echo 'osx-aarch64' >> ~/.arch - - when: - condition: - equal: [ *intel_macos_build_executor, << parameters.platform >> ] - steps: - - run: - name: Make link to md5 - command: | - mkdir -p ~/.local/aliases - ln -s /sbin/md5 ~/.local/aliases/md5sum - - run: - name: Write arch - command: | - echo 'osx-x86' >> ~/.arch + echo 'osx' >> ~/.arch - when: condition: or: @@ -264,8 +244,7 @@ commands: - when: condition: or: - - equal: [ *intel_macos_build_executor, << parameters.platform >> ] - - equal: [ *arm_macos_build_executor, << parameters.platform >> ] + - equal: [ *macos_build_executor, << parameters.platform >> ] - equal: [ *macos_test_executor, << parameters.platform >> ] steps: - run: @@ -306,8 +285,7 @@ commands: - when: condition: or: - - equal: [ *intel_macos_build_executor, << parameters.platform >> ] - - equal: [ *arm_macos_build_executor, << parameters.platform >> ] + - equal: [ *macos_build_executor, << parameters.platform >> ] - equal: [ *macos_test_executor, << parameters.platform >> ] steps: - run: @@ -356,6 +334,15 @@ commands: name: Special case for Windows because of ssh-agent command: | printf "[net]\ngit-fetch-with-cli = true" >> ~/.cargo/Cargo.toml + - when: + condition: + or: + - equal: [ *macos_build_executor, << parameters.platform >> ] + steps: + - run: + name: Special case for OSX x86_64 builds + command: | + rustup target add x86_64-apple-darwin install_extra_tools: steps: @@ -608,8 +595,7 @@ jobs: - when: condition: or: - - equal: [ *intel_macos_build_executor, << parameters.platform >> ] - - equal: [ *arm_macos_build_executor, << parameters.platform >> ] + - equal: [ *macos_build_executor, << parameters.platform >> ] steps: - when: @@ -619,13 +605,28 @@ jobs: - run: cargo xtask release prepare nightly - run: command: > - cargo xtask dist + cargo xtask dist --target aarch64-apple-darwin + - run: + command: > + cargo xtask dist --target x86_64-apple-darwin - run: command: > mkdir -p artifacts - run: command: > cargo xtask package + --target aarch64-apple-darwin + --apple-team-id ${APPLE_TEAM_ID} + --apple-username ${APPLE_USERNAME} + --cert-bundle-base64 ${MACOS_CERT_BUNDLE_BASE64} + --cert-bundle-password ${MACOS_CERT_BUNDLE_PASSWORD} + --keychain-password ${MACOS_KEYCHAIN_PASSWORD} + --notarization-password ${MACOS_NOTARIZATION_PASSWORD} + --output artifacts/ + - run: + command: > + cargo xtask package + --target x86_64-apple-darwin --apple-team-id ${APPLE_TEAM_ID} --apple-username ${APPLE_USERNAME} --cert-bundle-base64 ${MACOS_CERT_BUNDLE_BASE64} @@ -958,7 +959,7 @@ workflows: matrix: parameters: platform: - [ intel_macos_build, arm_macos_build, windows_build, amd_linux_build, arm_linux_build ] + [ macos_build, windows_build, amd_linux_build, arm_linux_build ] - secops/wiz-docker: context: - platform-docker-ro @@ -1055,7 +1056,7 @@ workflows: matrix: parameters: platform: - [ intel_macos_build, arm_macos_build, windows_build, amd_linux_build, arm_linux_build ] + [ macos_build, windows_build, amd_linux_build, arm_linux_build ] filters: branches: ignore: /.*/ diff --git a/Cargo.lock b/Cargo.lock index b20449ff4b..92e74e8e73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5710,9 +5710,9 @@ dependencies = [ [[package]] name = "router-bridge" -version = "0.5.17+v2.7.2" +version = "0.5.18+v2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f183e217179b38a4283e76ca62e3149ebe96512e9b1bd6b3933abab863f9a2c" +checksum = "673a5f56dd761938c87c89d33affb6f53e0129457d14bf12389f0cb4ebe74cfd" dependencies = [ "anyhow", "async-channel 1.9.0", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index b6fcae8cd3..92ca61a6ce 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -187,7 +187,7 @@ reqwest = { version = "0.11.24", default-features = false, features = [ "stream", ] } # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.5.17+v2.7.2" +router-bridge = "=0.5.18+v2.7.2" rust-embed = "8.2.0" rustls = "0.21.10" rustls-native-certs = "0.6.3" From 847d0552ec004cfa2e218e2c37576b3b9c6e085c Mon Sep 17 00:00:00 2001 From: Nick Marsh Date: Fri, 12 Apr 2024 18:59:24 +1000 Subject: [PATCH 11/46] Router generation of sig and refs for apollo reporting (#4796) This PR introduces experimental support for generating the stats report key (signature) and referenced fields in native rust code instead of using what is generated by router-bridge as part of query planning. There were a number of gotchas that I found in the JS code that I have faithfully ported across, but at this point I've run enough unit tests and fuzz testing that I'm confident it's generating the same as the JS code. Co-authored-by: o0Ignition0o Co-authored-by: Geoffroy Couprie --- .../exp_experimental_rust_apollo_reporting.md | 5 + Cargo.lock | 49 +- .../src/apollo_studio_interop/mod.rs | 1945 +++++++++++++++++ .../testdata/schema_interop.graphql | 230 ++ apollo-router/src/configuration/mod.rs | 28 + ...nfiguration__tests__schema_generation.snap | 27 + apollo-router/src/lib.rs | 1 + .../src/query_planner/bridge_query_planner.rs | 121 +- .../query_planner/caching_query_planner.rs | 1 + fuzz/Cargo.toml | 22 + fuzz/examples/usage_reporting_router.rs | 80 + .../apollo_router_studio_interop.rs | 162 ++ fuzz/fuzz_targets/federation.rs | 2 +- fuzz/fuzz_targets/router.rs | 2 +- fuzz/router.yaml | 14 + fuzz/src/lib.rs | 10 +- fuzz/supergraph-moretypes.graphql | 169 ++ licenses.html | 663 +++++- 18 files changed, 3449 insertions(+), 82 deletions(-) create mode 100644 .changesets/exp_experimental_rust_apollo_reporting.md create mode 100644 apollo-router/src/apollo_studio_interop/mod.rs create mode 100644 apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql create mode 100644 fuzz/examples/usage_reporting_router.rs create mode 100644 fuzz/fuzz_targets/apollo_router_studio_interop.rs create mode 100644 fuzz/router.yaml create mode 100644 fuzz/supergraph-moretypes.graphql diff --git a/.changesets/exp_experimental_rust_apollo_reporting.md b/.changesets/exp_experimental_rust_apollo_reporting.md new file mode 100644 index 0000000000..b1f94ee146 --- /dev/null +++ b/.changesets/exp_experimental_rust_apollo_reporting.md @@ -0,0 +1,5 @@ +### Experimental implementation of Apollo usage report field generation ([PR 4796](https://github.com/apollographql/router/pull/4796)) + +This adds a new and experimental Rust implementation of the generation of the stats report key and referenced fields that are sent in Apollo usage reports, as part of the effort to replace the router-bridge with native Rust code. For now, we recommend that the `experimental_apollo_metrics_generation_mode` setting should be left at the default value while we confirm that it generates identical payloads to router-bridge. + +By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4796 \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 92e74e8e73..d9b3bf9e36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -87,9 +87,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "const-random", @@ -942,7 +942,7 @@ dependencies = [ "hex", "hmac", "http 0.2.11", - "http 1.0.0", + "http 1.1.0", "once_cell", "percent-encoding", "sha2", @@ -1035,7 +1035,7 @@ dependencies = [ "aws-smithy-types", "bytes", "http 0.2.11", - "http 1.0.0", + "http 1.1.0", "pin-project-lite", "tokio", "tracing", @@ -1633,23 +1633,21 @@ checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" [[package]] name = "const-random" -version = "0.1.15" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368a7a772ead6ce7e1de82bfb04c485f3db8ec744f72925af5735e29a22cc18e" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" dependencies = [ "const-random-macro", - "proc-macro-hack", ] [[package]] name = "const-random-macro" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d7d6ab3c3a2282db210df5f02c4dab6e0a7057af0fb7ebd4070f30fe05c0ddb" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ "getrandom 0.2.10", "once_cell", - "proc-macro-hack", "tiny-keccak", ] @@ -1910,9 +1908,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.0.0" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f711ade317dd348950a9910f81c5947e3d8907ebd2b83f76203ff1807e6a2bc2" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if", "cpufeatures", @@ -2608,9 +2606,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.1.20" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e825f6987101665dea6ec934c09ec6d721de7bc1bf92248e1d5810c8cd636b77" +checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" [[package]] name = "filetime" @@ -3303,9 +3301,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -5126,12 +5124,6 @@ dependencies = [ "toml_edit 0.19.14", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" - [[package]] name = "proc-macro2" version = "0.4.30" @@ -5737,13 +5729,24 @@ dependencies = [ name = "router-fuzz" version = "0.0.0" dependencies = [ + "anyhow", + "apollo-compiler", "apollo-parser", + "apollo-router", "apollo-smith", + "async-trait", "env_logger", + "http 0.2.11", "libfuzzer-sys", "log", "reqwest", + "router-bridge", + "schemars", + "serde", "serde_json", + "serde_json_bytes", + "tokio", + "tower", ] [[package]] @@ -8128,7 +8131,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" dependencies = [ - "curve25519-dalek 4.0.0", + "curve25519-dalek 4.1.2", "rand_core 0.6.4", "serde", "zeroize", diff --git a/apollo-router/src/apollo_studio_interop/mod.rs b/apollo-router/src/apollo_studio_interop/mod.rs new file mode 100644 index 0000000000..a07842a7ab --- /dev/null +++ b/apollo-router/src/apollo_studio_interop/mod.rs @@ -0,0 +1,1945 @@ +//! Generation of usage reporting fields +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::collections::HashSet; +use std::fmt; + +use apollo_compiler::ast::Argument; +use apollo_compiler::ast::DirectiveList; +use apollo_compiler::ast::Name; +use apollo_compiler::ast::OperationType; +use apollo_compiler::ast::Value; +use apollo_compiler::ast::VariableDefinition; +use apollo_compiler::executable::Field; +use apollo_compiler::executable::Fragment; +use apollo_compiler::executable::FragmentSpread; +use apollo_compiler::executable::InlineFragment; +use apollo_compiler::executable::Operation; +use apollo_compiler::executable::Selection; +use apollo_compiler::executable::SelectionSet; +use apollo_compiler::validation::Valid; +use apollo_compiler::ExecutableDocument; +use apollo_compiler::Node; +use apollo_compiler::Schema; +use router_bridge::planner::ReferencedFieldsForType; +use router_bridge::planner::UsageReporting; + +/// The result of the generate_usage_reporting function which contains a UsageReporting struct and +/// functions that allow comparison with another ComparableUsageReporting or UsageReporting object. +pub(crate) struct ComparableUsageReporting { + /// The UsageReporting fields + pub(crate) result: UsageReporting, +} + +/// Enum specifying the result of a comparison. +pub(crate) enum UsageReportingComparisonResult { + /// The UsageReporting instances are the same + Equal, + /// The stats_report_key in the UsageReporting instances are different + StatsReportKeyNotEqual, + /// The referenced_fields in the UsageReporting instances are different. When comparing referenced + /// fields, we ignore the ordering of field names. + ReferencedFieldsNotEqual, + /// Both the stats_report_key and referenced_fields in the UsageReporting instances are different. + BothNotEqual, +} + +impl ComparableUsageReporting { + /// Compare this to another UsageReporting. + pub(crate) fn compare(&self, other: &UsageReporting) -> UsageReportingComparisonResult { + let sig_equal = self.result.stats_report_key == other.stats_report_key; + let refs_equal = self.compare_referenced_fields(&other.referenced_fields_by_type); + match (sig_equal, refs_equal) { + (true, true) => UsageReportingComparisonResult::Equal, + (false, true) => UsageReportingComparisonResult::StatsReportKeyNotEqual, + (true, false) => UsageReportingComparisonResult::ReferencedFieldsNotEqual, + (false, false) => UsageReportingComparisonResult::BothNotEqual, + } + } + + fn compare_referenced_fields( + &self, + other_ref_fields: &HashMap, + ) -> bool { + let self_ref_fields = &self.result.referenced_fields_by_type; + if self_ref_fields.len() != other_ref_fields.len() { + return false; + } + + for (name, self_refs) in self_ref_fields.iter() { + let maybe_other_refs = other_ref_fields.get(name); + if let Some(other_refs) = maybe_other_refs { + if self_refs.is_interface != other_refs.is_interface { + return false; + } + + let self_field_names_set: HashSet<_> = + self_refs.field_names.clone().into_iter().collect(); + let other_field_names_set: HashSet<_> = + other_refs.field_names.clone().into_iter().collect(); + if self_field_names_set != other_field_names_set { + return false; + } + } else { + return false; + } + } + + true + } +} + +/// Generate a ComparableUsageReporting containing the stats_report_key (a normalized version of the operation signature) +/// and referenced fields of an operation. The document used to generate the signature and for the references can be +/// different to handle cases where the operation has been filtered, but we want to keep the same signature. +pub(crate) fn generate_usage_reporting( + signature_doc: &ExecutableDocument, + references_doc: &ExecutableDocument, + operation_name: &Option, + schema: &Valid, +) -> ComparableUsageReporting { + let mut generator = UsageReportingGenerator { + signature_doc, + references_doc, + operation_name, + schema, + fragments_map: HashMap::new(), + fields_by_type: HashMap::new(), + fields_by_interface: HashMap::new(), + fragment_spread_set: HashSet::new(), + }; + + generator.generate() +} + +struct UsageReportingGenerator<'a> { + signature_doc: &'a ExecutableDocument, + references_doc: &'a ExecutableDocument, + operation_name: &'a Option, + schema: &'a Valid, + fragments_map: HashMap>, + fields_by_type: HashMap>, + fields_by_interface: HashMap, + fragment_spread_set: HashSet, +} + +impl UsageReportingGenerator<'_> { + fn generate(&mut self) -> ComparableUsageReporting { + ComparableUsageReporting { + result: UsageReporting { + stats_report_key: self.generate_stats_report_key(), + referenced_fields_by_type: self.generate_apollo_reporting_refs(), + }, + } + } + + fn generate_stats_report_key(&mut self) -> String { + self.fragments_map.clear(); + + match self + .signature_doc + .get_operation(self.operation_name.as_deref()) + .ok() + { + None => "".to_string(), + Some(operation) => { + self.extract_signature_fragments(&operation.selection_set); + self.format_operation_for_report(operation) + } + } + } + + fn extract_signature_fragments(&mut self, selection_set: &SelectionSet) { + for selection in &selection_set.selections { + match selection { + Selection::Field(field) => { + self.extract_signature_fragments(&field.selection_set); + } + Selection::InlineFragment(fragment) => { + self.extract_signature_fragments(&fragment.selection_set); + } + Selection::FragmentSpread(fragment_node) => { + let fragment_name = fragment_node.fragment_name.to_string(); + if let Entry::Vacant(e) = self.fragments_map.entry(fragment_name) { + if let Some(fragment) = self + .signature_doc + .fragments + .get(&fragment_node.fragment_name) + { + e.insert(fragment.clone()); + } + } + } + } + } + } + + fn format_operation_for_report(&self, operation: &Node) -> String { + // The result in the name of the operation + let op_name = match &operation.name { + None => "-".into(), + Some(node) => node.to_string(), + }; + let mut result = format!("# {}\n", op_name); + + // Followed by a sorted list of fragments + let mut sorted_fragments: Vec<_> = self.fragments_map.iter().collect(); + sorted_fragments.sort_by_key(|&(k, _)| k); + + sorted_fragments.into_iter().for_each(|(_, f)| { + result.push_str(&ApolloReportingSignatureFormatter::Fragment(f).to_string()) + }); + + // Followed by the operation + result.push_str(&ApolloReportingSignatureFormatter::Operation(operation).to_string()); + + result + } + + fn generate_apollo_reporting_refs(&mut self) -> HashMap { + self.fragments_map.clear(); + self.fields_by_type.clear(); + self.fields_by_interface.clear(); + + match self + .references_doc + .get_operation(self.operation_name.as_deref()) + .ok() + { + None => HashMap::new(), + Some(operation) => { + let operation_type = match operation.operation_type { + OperationType::Query => "Query", + OperationType::Mutation => "Mutation", + OperationType::Subscription => "Subscription", + }; + self.extract_fields(&operation_type.into(), &operation.selection_set); + + self.fields_by_type + .iter() + .filter_map(|(type_name, field_names)| { + if field_names.is_empty() { + None + } else { + let refs = ReferencedFieldsForType { + field_names: field_names.iter().cloned().collect(), + is_interface: *self + .fields_by_interface + .get(type_name) + .unwrap_or(&false), + }; + + Some((type_name.clone(), refs)) + } + }) + .collect() + } + } + } + + fn extract_fields(&mut self, parent_type: &String, selection_set: &SelectionSet) { + if !self.fields_by_interface.contains_key(parent_type) { + let field_schema_type = self.schema.types.get(parent_type.as_str()); + let is_interface = field_schema_type.is_some_and(|t| t.is_interface()); + self.fields_by_interface + .insert(parent_type.clone(), is_interface); + } + + for selection in &selection_set.selections { + match selection { + Selection::Field(field) => { + self.fields_by_type + .entry(parent_type.clone()) + .or_default() + .insert(field.name.to_string()); + + let field_type = field.selection_set.ty.to_string(); + self.extract_fields(&field_type, &field.selection_set); + } + Selection::InlineFragment(fragment) => { + let frag_type_name = match fragment.type_condition.clone() { + Some(fragment_type) => fragment_type.to_string(), + None => parent_type.clone(), + }; + self.extract_fields(&frag_type_name, &fragment.selection_set); + } + Selection::FragmentSpread(fragment) => { + if !self.fragment_spread_set.contains(&fragment.fragment_name) { + self.fragment_spread_set + .insert(fragment.fragment_name.clone()); + + if let Some(fragment) = + self.references_doc.fragments.get(&fragment.fragment_name) + { + let fragment_type = fragment.selection_set.ty.to_string(); + self.extract_fields(&fragment_type, &fragment.selection_set); + } + } + } + } + } + } +} + +enum ApolloReportingSignatureFormatter<'a> { + Operation(&'a Node), + Fragment(&'a Node), + Argument(&'a Node), + Field(&'a Node), +} + +impl<'a> fmt::Display for ApolloReportingSignatureFormatter<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ApolloReportingSignatureFormatter::Operation(operation) => { + format_operation(operation, f) + } + ApolloReportingSignatureFormatter::Fragment(fragment) => format_fragment(fragment, f), + ApolloReportingSignatureFormatter::Argument(argument) => format_argument(argument, f), + ApolloReportingSignatureFormatter::Field(field) => format_field(field, f), + } + } +} + +fn format_operation(operation: &Node, f: &mut fmt::Formatter) -> fmt::Result { + let shorthand = operation.operation_type == OperationType::Query + && operation.name.is_none() + && operation.variables.is_empty() + && operation.directives.is_empty(); + + if !shorthand { + f.write_str(operation.operation_type.name())?; + if let Some(name) = &operation.name { + write!(f, " {}", name)?; + } + + // print variables sorted by name + if !operation.variables.is_empty() { + f.write_str("(")?; + let mut sorted_variables = operation.variables.clone(); + sorted_variables.sort_by(|a, b| a.name.cmp(&b.name)); + for (index, variable) in sorted_variables.iter().enumerate() { + if index != 0 { + f.write_str(",")?; + } + format_variable(variable, f)?; + } + f.write_str(")")?; + } + + // In the JS implementation, only the fragment directives are sorted + format_directives(&operation.directives, false, f)?; + } + + format_selection_set(&operation.selection_set, f) +} + +fn format_selection_set(selection_set: &SelectionSet, f: &mut fmt::Formatter) -> fmt::Result { + // print selection set sorted by name with fields followed by named fragments followed by inline fragments + let mut fields: Vec<&Node> = Vec::new(); + let mut named_fragments: Vec<&Node> = Vec::new(); + let mut inline_fragments: Vec<&Node> = Vec::new(); + for selection in selection_set.selections.iter() { + match selection { + Selection::Field(field) => { + fields.push(field); + } + Selection::FragmentSpread(fragment_spread) => { + named_fragments.push(fragment_spread); + } + Selection::InlineFragment(inline_fragment) => { + inline_fragments.push(inline_fragment); + } + } + } + + if !fields.is_empty() || !named_fragments.is_empty() || !inline_fragments.is_empty() { + fields.sort_by(|&a, &b| a.name.cmp(&b.name)); + named_fragments.sort_by(|&a, &b| a.fragment_name.cmp(&b.fragment_name)); + // Note that inline fragments are not sorted in the JS implementation + + f.write_str("{")?; + + for (i, &field) in fields.iter().enumerate() { + let field_str = ApolloReportingSignatureFormatter::Field(field).to_string(); + f.write_str(&field_str)?; + + // We need to insert a space if this is not the last field and it ends in an alphanumeric character + if i < fields.len() - 1 + && field_str + .chars() + .last() + .map_or(false, |c| c.is_alphanumeric()) + { + f.write_str(" ")?; + } + } + + for &frag in named_fragments.iter() { + format_fragment_spread(frag, f)?; + } + + for &frag in inline_fragments.iter() { + format_inline_fragment(frag, f)?; + } + + f.write_str("}")?; + } + + Ok(()) +} + +fn format_variable(arg: &Node, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "${}:{}", arg.name, arg.ty)?; + if let Some(value) = &arg.default_value { + f.write_str("=")?; + format_value(value, f)?; + } + format_directives(&arg.directives, false, f) +} + +fn format_argument(arg: &Node, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}:", arg.name)?; + format_value(&arg.value, f) +} + +fn format_field(field: &Node, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&field.name)?; + + let mut sorted_args = field.arguments.clone(); + if !sorted_args.is_empty() { + sorted_args.sort_by(|a, b| a.name.cmp(&b.name)); + + f.write_str("(")?; + + // The graphql-js implementation will use newlines and indentation instead of commas if the length of the "arg line" is + // over 80 characters. This "arg line" includes the alias followed by ": " if the field has an alias (which is never + // the case for now), followed by all argument names and values separated by ": ", surrounded with brackets. Our usage + // reporting plugin replaces all newlines + indentation with a single space, so we have to replace commas with spaces if + // the line length is too long. + let arg_strings: Vec = sorted_args + .iter() + .map(|a| ApolloReportingSignatureFormatter::Argument(a).to_string()) + .collect(); + // Adjust for incorrect spacing generated by the argument formatter - 2 extra characters for the surrounding brackets, plus + // 2 extra characters per argument for the separating space and the space between the argument name and type. + let original_line_length = + 2 + arg_strings.iter().map(|s| s.len()).sum::() + (arg_strings.len() * 2); + let separator = if original_line_length > 80 { " " } else { "," }; + + for (index, arg_string) in arg_strings.iter().enumerate() { + f.write_str(arg_string)?; + + // We only need to insert a separating space it's not the last arg and if the string ends in an alphanumeric character + if index < arg_strings.len() - 1 + && arg_string + .chars() + .last() + .map_or(true, |c| c.is_alphanumeric()) + { + f.write_str(separator)?; + } + } + f.write_str(")")?; + } + + // In the JS implementation, only the fragment directives are sorted + format_directives(&field.directives, false, f)?; + format_selection_set(&field.selection_set, f) +} + +fn format_fragment_spread( + fragment_spread: &Node, + f: &mut fmt::Formatter, +) -> fmt::Result { + write!(f, "...{}", fragment_spread.fragment_name)?; + format_directives(&fragment_spread.directives, true, f) +} + +fn format_inline_fragment( + inline_fragment: &Node, + f: &mut fmt::Formatter, +) -> fmt::Result { + if let Some(type_name) = &inline_fragment.type_condition { + write!(f, "...on {}", type_name)?; + } else { + f.write_str("...")?; + } + + format_directives(&inline_fragment.directives, true, f)?; + format_selection_set(&inline_fragment.selection_set, f) +} + +fn format_fragment(fragment: &Node, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "fragment {} on {}", + &fragment.name.to_string(), + &fragment.selection_set.ty.to_string() + )?; + format_directives(&fragment.directives, true, f)?; + format_selection_set(&fragment.selection_set, f) +} + +fn format_directives( + directives: &DirectiveList, + sorted: bool, + f: &mut fmt::Formatter, +) -> fmt::Result { + let mut sorted_directives = directives.clone(); + if sorted { + sorted_directives.sort_by(|a, b| a.name.cmp(&b.name)); + } + + for directive in sorted_directives.iter() { + write!(f, "@{}", directive.name)?; + + let mut sorted_args = directive.arguments.clone(); + if !sorted_args.is_empty() { + sorted_args.sort_by(|a, b| a.name.cmp(&b.name)); + + f.write_str("(")?; + + for (index, argument) in sorted_args.iter().enumerate() { + if index != 0 { + f.write_str(",")?; + } + f.write_str(&ApolloReportingSignatureFormatter::Argument(argument).to_string())?; + } + + f.write_str(")")?; + } + } + + Ok(()) +} + +fn format_value(value: &Value, f: &mut fmt::Formatter) -> fmt::Result { + match value { + Value::String(_) => f.write_str("\"\""), + Value::Float(_) | Value::Int(_) => f.write_str("0"), + Value::Object(_) => f.write_str("{}"), + Value::List(_) => f.write_str("[]"), + rest => f.write_str(&rest.to_string()), + } +} + +#[cfg(test)] +mod tests { + use apollo_compiler::Schema; + use router_bridge::planner::PlanOptions; + use router_bridge::planner::Planner; + use router_bridge::planner::QueryPlannerConfig; + use test_log::test; + + use super::*; + + // Generate the signature and referenced fields using router-bridge to confirm that the expected value we used is correct. + // We can remove this when we no longer use the bridge but should keep the rust implementation verifications. + async fn assert_bridge_results( + schema_str: &str, + query_str: &str, + expected_sig: &str, + expected_refs: &HashMap, + ) { + let planner = Planner::::new( + schema_str.to_string(), + QueryPlannerConfig::default(), + ) + .await + .unwrap(); + let plan = planner + .plan(query_str.to_string(), None, PlanOptions::default()) + .await + .unwrap(); + let bridge_result = ComparableUsageReporting { + result: plan.usage_reporting, + }; + let expected_result = UsageReporting { + stats_report_key: expected_sig.to_string(), + referenced_fields_by_type: expected_refs.clone(), + }; + assert!(matches!( + bridge_result.compare(&expected_result), + UsageReportingComparisonResult::Equal + )); + } + + fn assert_expected_results( + actual: &ComparableUsageReporting, + expected_sig: &str, + expected_refs: &HashMap, + ) { + let expected_result = UsageReporting { + stats_report_key: expected_sig.to_string(), + referenced_fields_by_type: expected_refs.clone(), + }; + assert!(matches!( + actual.compare(&expected_result), + UsageReportingComparisonResult::Equal + )); + } + + #[test(tokio::test)] + async fn test_complex_query() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query UnusedQuery { + noInputQuery { + enumResponse + } + } + + fragment UnusedFragment on EverythingResponse { + enumResponse + } + + fragment Fragment2 on EverythingResponse { + basicTypes { + nullableFloat + } + } + + query TransformedQuery { + + + scalarInputQuery(idInput: "a1", listInput: [], boolInput: true, intInput: 1, stringInput: "x", floatInput: 1.2) @skip(if: false) @include(if: true) { + ...Fragment2, + + + objectTypeWithInputField(boolInput: true, secondInput: false) { + stringField + __typename + intField + } + + enumResponse + interfaceResponse { + sharedField + ... on InterfaceImplementation2 { + implementation2Field + } + ... on InterfaceImplementation1 { + implementation1Field + } + } + ...Fragment1, + } + } + + fragment Fragment1 on EverythingResponse { + basicTypes { + nonNullFloat + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = + generate_usage_reporting(&doc, &doc, &Some("TransformedQuery".into()), &schema); + + let expected_sig = "# TransformedQuery\nfragment Fragment1 on EverythingResponse{basicTypes{nonNullFloat}}fragment Fragment2 on EverythingResponse{basicTypes{nullableFloat}}query TransformedQuery{scalarInputQuery(boolInput:true floatInput:0 idInput:\"\"intInput:0 listInput:[]stringInput:\"\")@skip(if:false)@include(if:true){enumResponse interfaceResponse{sharedField...on InterfaceImplementation2{implementation2Field}...on InterfaceImplementation1{implementation1Field}}objectTypeWithInputField(boolInput:true,secondInput:false){__typename intField stringField}...Fragment1...Fragment2}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["scalarInputQuery".into()], + is_interface: false, + }, + ), + ( + "BasicTypesResponse".into(), + ReferencedFieldsForType { + field_names: vec!["nullableFloat".into(), "nonNullFloat".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec![ + "basicTypes".into(), + "objectTypeWithInputField".into(), + "enumResponse".into(), + "interfaceResponse".into(), + ], + is_interface: false, + }, + ), + ( + "AnInterface".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into()], + is_interface: true, + }, + ), + ( + "ObjectTypeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["stringField".into(), "__typename".into(), "intField".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["implementation1Field".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation2".into(), + ReferencedFieldsForType { + field_names: vec!["implementation2Field".into()], + is_interface: false, + }, + ), + ]); + assert_expected_results(&generated, expected_sig, &expected_refs); + + // the router-bridge planner will throw errors on unused fragments/queries so we remove them here + let sanitised_query_str = r#"fragment Fragment2 on EverythingResponse { + basicTypes { + nullableFloat + } + } + + query TransformedQuery { + + + scalarInputQuery(idInput: "a1", listInput: [], boolInput: true, intInput: 1, stringInput: "x", floatInput: 1.2) @skip(if: false) @include(if: true) { + ...Fragment2, + + + objectTypeWithInputField(boolInput: true, secondInput: false) { + stringField + __typename + intField + } + + enumResponse + interfaceResponse { + sharedField + ... on InterfaceImplementation2 { + implementation2Field + } + ... on InterfaceImplementation1 { + implementation1Field + } + } + ...Fragment1, + } + } + + fragment Fragment1 on EverythingResponse { + basicTypes { + nonNullFloat + } + }"#; + + assert_bridge_results( + schema_str, + sanitised_query_str, + expected_sig, + &expected_refs, + ) + .await; + } + + #[test(tokio::test)] + async fn test_complex_references() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query Query($secondInput: Boolean!) { + scalarResponseQuery + noInputQuery { + basicTypes { + nonNullId + nonNullInt + } + enumResponse + interfaceImplementationResponse { + sharedField + implementation2Field + } + interfaceResponse { + ... on InterfaceImplementation1 { + implementation1Field + sharedField + } + ... on InterfaceImplementation2 { + implementation2Field + sharedField + } + } + listOfUnions { + ... on UnionType1 { + nullableString + } + } + objectTypeWithInputField(secondInput: $secondInput) { + intField + } + } + basicInputTypeQuery(input: { someFloat: 1 }) { + unionResponse { + ... on UnionType1 { + nullableString + } + } + unionType2Response { + unionType2Field + } + listOfObjects { + stringField + } + } + }"#; + + let schema: Valid = + Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("Query".into()), &schema); + + let expected_sig = "# Query\nquery Query($secondInput:Boolean!){basicInputTypeQuery(input:{}){listOfObjects{stringField}unionResponse{...on UnionType1{nullableString}}unionType2Response{unionType2Field}}noInputQuery{basicTypes{nonNullId nonNullInt}enumResponse interfaceImplementationResponse{implementation2Field sharedField}interfaceResponse{...on InterfaceImplementation1{implementation1Field sharedField}...on InterfaceImplementation2{implementation2Field sharedField}}listOfUnions{...on UnionType1{nullableString}}objectTypeWithInputField(secondInput:$secondInput){intField}}scalarResponseQuery}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec![ + "scalarResponseQuery".into(), + "noInputQuery".into(), + "basicInputTypeQuery".into(), + ], + is_interface: false, + }, + ), + ( + "BasicTypesResponse".into(), + ReferencedFieldsForType { + field_names: vec!["nonNullId".into(), "nonNullInt".into()], + is_interface: false, + }, + ), + ( + "ObjectTypeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["intField".into(), "stringField".into()], + is_interface: false, + }, + ), + ( + "UnionType2".into(), + ReferencedFieldsForType { + field_names: vec!["unionType2Field".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec![ + "basicTypes".into(), + "enumResponse".into(), + "interfaceImplementationResponse".into(), + "interfaceResponse".into(), + "listOfUnions".into(), + "objectTypeWithInputField".into(), + "unionResponse".into(), + "unionType2Response".into(), + "listOfObjects".into(), + ], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["implementation1Field".into(), "sharedField".into()], + is_interface: false, + }, + ), + ( + "UnionType1".into(), + ReferencedFieldsForType { + field_names: vec!["nullableString".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation2".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into(), "implementation2Field".into()], + is_interface: false, + }, + ), + ]); + assert_expected_results(&generated, expected_sig, &expected_refs); + + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_basic_whitespace() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query MyQuery { + noInputQuery { + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("MyQuery".into()), &schema); + + let expected_sig = "# MyQuery\nquery MyQuery{noInputQuery{id}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["noInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_anonymous_query() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query { + noInputQuery { + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let expected_sig = "# -\n{noInputQuery{id}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["noInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_anonymous_mutation() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"mutation { + noInputMutation { + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let expected_sig = "# -\nmutation{noInputMutation{id}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Mutation".into(), + ReferencedFieldsForType { + field_names: vec!["noInputMutation".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_anonymous_subscription() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str: &str = r#"subscription { + noInputSubscription { + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let expected_sig = "# -\nsubscription{noInputSubscription{id}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Subscription".into(), + ReferencedFieldsForType { + field_names: vec!["noInputSubscription".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_ordered_fields_and_variables() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query VariableScalarInputQuery($idInput: ID!, $boolInput: Boolean!, $floatInput: Float!, $intInput: Int!, $listInput: [String!]!, $stringInput: String!, $nullableStringInput: String) { + sortQuery( + idInput: $idInput + boolInput: $boolInput + floatInput: $floatInput + INTInput: $intInput + listInput: $listInput + stringInput: $stringInput + nullableStringInput: $nullableStringInput + ) { + zzz + CCC + nullableId + aaa + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting( + &doc, + &doc, + &Some("VariableScalarInputQuery".into()), + &schema, + ); + + let expected_sig = "# VariableScalarInputQuery\nquery VariableScalarInputQuery($boolInput:Boolean!,$floatInput:Float!,$idInput:ID!,$intInput:Int!,$listInput:[String!]!,$nullableStringInput:String,$stringInput:String!){sortQuery(INTInput:$intInput boolInput:$boolInput floatInput:$floatInput idInput:$idInput listInput:$listInput nullableStringInput:$nullableStringInput stringInput:$stringInput){CCC aaa id nullableId zzz}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["sortQuery".into()], + is_interface: false, + }, + ), + ( + "SortResponse".into(), + ReferencedFieldsForType { + field_names: vec![ + "aaa".into(), + "CCC".into(), + "id".into(), + "nullableId".into(), + "zzz".into(), + ], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_fragments() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query FragmentQuery { + noInputQuery { + listOfBools + interfaceResponse { + sharedField + ... on InterfaceImplementation2 { + implementation2Field + } + ...bbbInterfaceFragment + ...aaaInterfaceFragment + ... { + ... on InterfaceImplementation1 { + implementation1Field + } + } + ... on InterfaceImplementation1 { + implementation1Field + } + } + unionResponse { + ... on UnionType2 { + unionType2Field + } + ... on UnionType1 { + unionType1Field + } + } + ...zzzFragment + ...aaaFragment + ...ZZZFragment + } + } + + fragment zzzFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment ZZZFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment aaaFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment UnusedFragment on InterfaceImplementation2 { + sharedField + implementation2Field + } + + fragment bbbInterfaceFragment on InterfaceImplementation2 { + sharedField + implementation2Field + } + + fragment aaaInterfaceFragment on InterfaceImplementation1 { + sharedField + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = + generate_usage_reporting(&doc, &doc, &Some("FragmentQuery".into()), &schema); + + let expected_sig = "# FragmentQuery\nfragment ZZZFragment on EverythingResponse{listOfInterfaces{sharedField}}fragment aaaFragment on EverythingResponse{listOfInterfaces{sharedField}}fragment aaaInterfaceFragment on InterfaceImplementation1{sharedField}fragment bbbInterfaceFragment on InterfaceImplementation2{implementation2Field sharedField}fragment zzzFragment on EverythingResponse{listOfInterfaces{sharedField}}query FragmentQuery{noInputQuery{interfaceResponse{sharedField...aaaInterfaceFragment...bbbInterfaceFragment...on InterfaceImplementation2{implementation2Field}...{...on InterfaceImplementation1{implementation1Field}}...on InterfaceImplementation1{implementation1Field}}listOfBools unionResponse{...on UnionType2{unionType2Field}...on UnionType1{unionType1Field}}...ZZZFragment...aaaFragment...zzzFragment}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "UnionType1".into(), + ReferencedFieldsForType { + field_names: vec!["unionType1Field".into()], + is_interface: false, + }, + ), + ( + "UnionType2".into(), + ReferencedFieldsForType { + field_names: vec!["unionType2Field".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["noInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec![ + "listOfInterfaces".into(), + "listOfBools".into(), + "interfaceResponse".into(), + "unionResponse".into(), + ], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into(), "implementation1Field".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["implementation1Field".into(), "sharedField".into()], + is_interface: false, + }, + ), + ( + "AnInterface".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into()], + is_interface: true, + }, + ), + ( + "InterfaceImplementation2".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into(), "implementation2Field".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + + // the router-bridge planner will throw errors on unused fragments/queries so we remove them here + let sanitised_query_str = r#"query FragmentQuery { + noInputQuery { + listOfBools + interfaceResponse { + sharedField + ... on InterfaceImplementation2 { + implementation2Field + } + ...bbbInterfaceFragment + ...aaaInterfaceFragment + ... { + ... on InterfaceImplementation1 { + implementation1Field + } + } + ... on InterfaceImplementation1 { + implementation1Field + } + } + unionResponse { + ... on UnionType2 { + unionType2Field + } + ... on UnionType1 { + unionType1Field + } + } + ...zzzFragment + ...aaaFragment + ...ZZZFragment + } + } + + fragment zzzFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment ZZZFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment aaaFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment bbbInterfaceFragment on InterfaceImplementation2 { + sharedField + implementation2Field + } + + fragment aaaInterfaceFragment on InterfaceImplementation1 { + sharedField + }"#; + assert_bridge_results( + schema_str, + sanitised_query_str, + expected_sig, + &expected_refs, + ) + .await; + } + + #[test(tokio::test)] + async fn test_directives() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"fragment Fragment1 on InterfaceImplementation1 { + sharedField + implementation1Field + } + + fragment Fragment2 on InterfaceImplementation2 @withArgs(arg2: "" arg1: "test" arg3: true arg5: [1,2] arg4: 2) @noArgs { + sharedField + implementation2Field + } + + query DirectiveQuery @withArgs(arg2: "" arg1: "test") @noArgs { + noInputQuery { + enumResponse @withArgs(arg3: false arg5: [1,2] arg4: 2) @noArgs + unionResponse { + ... on UnionType1 @withArgs(arg2: "" arg1: "test") @noArgs { + unionType1Field + } + } + interfaceResponse { + ... Fragment1 @withArgs(arg1: "test") @noArgs + ... Fragment2 + } + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = + generate_usage_reporting(&doc, &doc, &Some("DirectiveQuery".into()), &schema); + + let expected_sig = "# DirectiveQuery\nfragment Fragment1 on InterfaceImplementation1{implementation1Field sharedField}fragment Fragment2 on InterfaceImplementation2@noArgs@withArgs(arg1:\"\",arg2:\"\",arg3:true,arg4:0,arg5:[]){implementation2Field sharedField}query DirectiveQuery@withArgs(arg1:\"\",arg2:\"\")@noArgs{noInputQuery{enumResponse@withArgs(arg3:false,arg4:0,arg5:[])@noArgs interfaceResponse{...Fragment1@noArgs@withArgs(arg1:\"\")...Fragment2}unionResponse{...on UnionType1@noArgs@withArgs(arg1:\"\",arg2:\"\"){unionType1Field}}}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "UnionType1".into(), + ReferencedFieldsForType { + field_names: vec!["unionType1Field".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["noInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec![ + "enumResponse".into(), + "interfaceResponse".into(), + "unionResponse".into(), + ], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into(), "implementation1Field".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["implementation1Field".into(), "sharedField".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation2".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into(), "implementation2Field".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_aliases() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query AliasQuery { + xxAlias: enumInputQuery(enumInput: SOME_VALUE_1) { + aliased: enumResponse + } + aaAlias: enumInputQuery(enumInput: SOME_VALUE_2) { + aliasedAgain: enumResponse + } + ZZAlias: enumInputQuery(enumInput: SOME_VALUE_3) { + enumResponse + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("AliasQuery".into()), &schema); + + let expected_sig = "# AliasQuery\nquery AliasQuery{enumInputQuery(enumInput:SOME_VALUE_1){enumResponse}enumInputQuery(enumInput:SOME_VALUE_2){enumResponse}enumInputQuery(enumInput:SOME_VALUE_3){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["enumInputQuery".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_inline_values() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query InlineInputTypeQuery { + inputTypeQuery(input: { + inputString: "foo", + inputInt: 42, + inputBoolean: null, + nestedType: { someFloat: 4.2 }, + enumInput: SOME_VALUE_1, + nestedTypeList: [ { someFloat: 4.2, someNullableFloat: null } ], + listInput: [1, 2, 3] + }) { + enumResponse + } + }"#; + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = + generate_usage_reporting(&doc, &doc, &Some("InlineInputTypeQuery".into()), &schema); + + let expected_sig = "# InlineInputTypeQuery\nquery InlineInputTypeQuery{inputTypeQuery(input:{}){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["inputTypeQuery".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_root_type_fragment() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query SomeQuery { + ... on Query { + ... { + basicResponseQuery { + id + } + } + } + noInputQuery { + enumResponse + } + }"#; + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let expected_sig = "# SomeQuery\nquery SomeQuery{noInputQuery{enumResponse}...on Query{...{basicResponseQuery{id}}}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "BasicResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into(), "noInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_directive_arg_spacing() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query { + basicResponseQuery { + id @withArgs(arg1: "") + id + } + }"#; + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let expected_sig = "# -\n{basicResponseQuery{id@withArgs(arg1:\"\")id}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "BasicResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_operation_with_single_variable() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query QueryWithVar($input_enum: SomeEnum) { + enumInputQuery(enumInput: $input_enum) { + listOfBools + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("QueryWithVar".into()), &schema); + + let expected_sig = "# QueryWithVar\nquery QueryWithVar($input_enum:SomeEnum){enumInputQuery(enumInput:$input_enum){listOfBools}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["enumInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["listOfBools".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_operation_with_multiple_variables() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query QueryWithVars($stringInput: String!, $floatInput: Float!, $boolInput: Boolean!) { + scalarInputQuery(listInput: ["x"], stringInput: $stringInput, intInput: 6, floatInput: $floatInput, boolInput: $boolInput, idInput: "y") { + enumResponse + } + inputTypeQuery(input: { inputInt: 2, inputString: "z", listInput: [], nestedType: { someFloat: 5 }}) { + enumResponse + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = + generate_usage_reporting(&doc, &doc, &Some("QueryWithVars".into()), &schema); + + let expected_sig = "# QueryWithVars\nquery QueryWithVars($boolInput:Boolean!,$floatInput:Float!,$stringInput:String!){inputTypeQuery(input:{}){enumResponse}scalarInputQuery(boolInput:$boolInput floatInput:$floatInput idInput:\"\"intInput:0 listInput:[]stringInput:$stringInput){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["scalarInputQuery".into(), "inputTypeQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_field_arg_comma_or_space() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query QueryArgLength($StringInputWithAVeryyyLongNameSoLineLengthIs80: String!, $inputType: AnotherInputType, $enumInputWithAVryLongNameSoLineLengthIsOver80: SomeEnum, $enumInputType: EnumInputType) { + enumInputQuery (enumInput:$enumInputWithAVryLongNameSoLineLengthIsOver80,inputType:$enumInputType) { + enumResponse + } + defaultArgQuery(stringInput:$StringInputWithAVeryyyLongNameSoLineLengthIs80,inputType:$inputType) { + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = + generate_usage_reporting(&doc, &doc, &Some("QueryArgLength".into()), &schema); + + // enumInputQuery has a variable line length of 81, so it should be separated by spaces (which are converted from newlines + // in the original implementation). + // enumInputQuery has a variable line length of 80, so it should be separated by commas. + let expected_sig = "# QueryArgLength\nquery QueryArgLength($StringInputWithAVeryyyLongNameSoLineLengthIs80:String!,$enumInputType:EnumInputType,$enumInputWithAVryLongNameSoLineLengthIsOver80:SomeEnum,$inputType:AnotherInputType){defaultArgQuery(inputType:$inputType stringInput:$StringInputWithAVeryyyLongNameSoLineLengthIs80){id}enumInputQuery(enumInput:$enumInputWithAVryLongNameSoLineLengthIsOver80 inputType:$enumInputType){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["enumInputQuery".into(), "defaultArgQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ( + "BasicResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_operation_arg_always_commas() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query QueryArgLength($enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80: SomeEnum, $enumInputType: EnumInputType) { + enumInputQuery (enumInput:$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80,inputType:$enumInputType) { + enumResponse + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = + generate_usage_reporting(&doc, &doc, &Some("QueryArgLength".into()), &schema); + + // operation variables shouldn't ever be converted to spaces, since the line length check is only on field variables + // in the original implementation + let expected_sig = "# QueryArgLength\nquery QueryArgLength($enumInputType:EnumInputType,$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80:SomeEnum){enumInputQuery(enumInput:$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80 inputType:$enumInputType){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["enumInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; + } + + #[test(tokio::test)] + async fn test_compare() { + let source = ComparableUsageReporting { + result: UsageReporting { + stats_report_key: "# -\n{basicResponseQuery{field1 field2}}".into(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field1".into(), "field2".into()], + is_interface: false, + }, + ), + ]), + }, + }; + + // Same signature and ref fields should match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), + }), + UsageReportingComparisonResult::Equal + )); + + // Reordered signature should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: "# -\n{basicResponseQuery{field2 field1}}".into(), + referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), + }), + UsageReportingComparisonResult::StatsReportKeyNotEqual + )); + + // Different signature should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: + "# NamedQuery\nquery NamedQuery {basicResponseQuery{field1 field2}}".into(), + referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), + }), + UsageReportingComparisonResult::StatsReportKeyNotEqual + )); + + // Reordered parent type should match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field1".into(), "field2".into()], + is_interface: false, + }, + ), + ]) + }), + UsageReportingComparisonResult::Equal + )); + + // Reordered fields should match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field2".into(), "field1".into()], + is_interface: false, + }, + ), + ]) + }), + UsageReportingComparisonResult::Equal + )); + + // Added parent type should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field1".into(), "field2".into()], + is_interface: false, + }, + ), + ( + "OtherType".into(), + ReferencedFieldsForType { + field_names: vec!["otherField".into()], + is_interface: false, + }, + ), + ]) + }), + UsageReportingComparisonResult::ReferencedFieldsNotEqual + )); + + // Added field should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field1".into(), "field2".into(), "field3".into()], + is_interface: false, + }, + ), + ]) + }), + UsageReportingComparisonResult::ReferencedFieldsNotEqual + )); + + // Missing parent type should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ),]) + }), + UsageReportingComparisonResult::ReferencedFieldsNotEqual + )); + + // Missing field should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field1".into()], + is_interface: false, + }, + ), + ]) + }), + UsageReportingComparisonResult::ReferencedFieldsNotEqual + )); + + // Both different should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: "# -\n{basicResponseQuery{field2 field1}}".into(), + referenced_fields_by_type: HashMap::from([( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ),]) + }), + UsageReportingComparisonResult::BothNotEqual + )); + } +} diff --git a/apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql b/apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql new file mode 100644 index 0000000000..f97f128c10 --- /dev/null +++ b/apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql @@ -0,0 +1,230 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query + mutation: Mutation + subscription: Subscription +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @noArgs on FIELD | FRAGMENT_DEFINITION | FRAGMENT_SPREAD | INLINE_FRAGMENT | MUTATION | QUERY | SUBSCRIPTION + +directive @withArgs(arg1: String = "Default", arg2: String, arg3: Boolean, arg4: Int, arg5: [ID]) on FIELD | FRAGMENT_DEFINITION | FRAGMENT_SPREAD | INLINE_FRAGMENT | MUTATION | QUERY | SUBSCRIPTION + +interface AnInterface + @join__type(graph: MAIN) +{ + sharedField: String! +} + +input AnotherInputType + @join__type(graph: MAIN) +{ + anotherInput: ID! +} + +type BasicResponse + @join__type(graph: MAIN) +{ + id: Int! + nullableId: Int +} + +type BasicTypesResponse + @join__type(graph: MAIN) +{ + nullableId: ID + nonNullId: ID! + nullableInt: Int + nonNullInt: Int! + nullableString: String + nonNullString: String! + nullableFloat: Float + nonNullFloat: Float! + nullableBoolean: Boolean + nonNullBoolean: Boolean! +} + +input EnumInputType + @join__type(graph: MAIN) +{ + enumInput: SomeEnum! + enumListInput: [SomeEnum!]! + nestedEnumType: [NestedEnumInputType] +} + +type EverythingResponse + @join__type(graph: MAIN) +{ + id: Int! + nullableId: Int + basicTypes: BasicTypesResponse + enumResponse: SomeEnum + interfaceResponse: AnInterface + interfaceImplementationResponse: InterfaceImplementation2 + unionResponse: UnionType + unionType2Response: UnionType2 + listOfBools: [Boolean!]! + listOfInterfaces: [AnInterface] + listOfUnions: [UnionType] + objectTypeWithInputField(boolInput: Boolean, secondInput: Boolean!): ObjectTypeResponse + listOfObjects: [ObjectTypeResponse] +} + +input InputType + @join__type(graph: MAIN) +{ + inputString: String! + inputInt: Int! + inputBoolean: Boolean + nestedType: NestedInputType! + enumInput: SomeEnum + listInput: [Int!]! + nestedTypeList: [NestedInputType] +} + +input InputTypeWithDefault + @join__type(graph: MAIN) +{ + nonNullId: ID! + nonNullIdWithDefault: ID! = "id" + nullableId: ID + nullableIdWithDefault: ID = "id" +} + +type InterfaceImplementation1 implements AnInterface + @join__implements(graph: MAIN, interface: "AnInterface") + @join__type(graph: MAIN) +{ + sharedField: String! + implementation1Field: Int! +} + +type InterfaceImplementation2 implements AnInterface + @join__implements(graph: MAIN, interface: "AnInterface") + @join__type(graph: MAIN) +{ + sharedField: String! + implementation2Field: Float! +} + +scalar join__FieldSet + +enum join__Graph { + MAIN @join__graph(name: "main", url: "http://localhost:4001/graphql") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Mutation + @join__type(graph: MAIN) +{ + noInputMutation: EverythingResponse! +} + +input NestedEnumInputType + @join__type(graph: MAIN) +{ + someEnum: SomeEnum +} + +input NestedInputType + @join__type(graph: MAIN) +{ + someFloat: Float! + someNullableFloat: Float +} + +type ObjectTypeResponse + @join__type(graph: MAIN) +{ + stringField: String! + intField: Int! + nullableField: String +} + +type Query + @join__type(graph: MAIN) +{ + inputTypeQuery(input: InputType!): EverythingResponse! + scalarInputQuery(listInput: [String!]!, stringInput: String!, nullableStringInput: String, intInput: Int!, floatInput: Float!, boolInput: Boolean!, enumInput: SomeEnum, idInput: ID!): EverythingResponse! + noInputQuery: EverythingResponse! + basicInputTypeQuery(input: NestedInputType!): EverythingResponse! + anotherInputTypeQuery(input: AnotherInputType): EverythingResponse! + enumInputQuery(enumInput: SomeEnum, inputType: EnumInputType): EverythingResponse! + basicResponseQuery: BasicResponse! + scalarResponseQuery: String + defaultArgQuery(stringInput: String! = "default", inputType: AnotherInputType = {anotherInput: "inputDefault"}): BasicResponse! + inputTypeDefaultQuery(input: InputTypeWithDefault): BasicResponse! + sortQuery(listInput: [String!]!, stringInput: String!, nullableStringInput: String, INTInput: Int!, floatInput: Float!, boolInput: Boolean!, enumInput: SomeEnum, idInput: ID!): SortResponse! +} + +enum SomeEnum + @join__type(graph: MAIN) +{ + SOME_VALUE_1 @join__enumValue(graph: MAIN) + SOME_VALUE_2 @join__enumValue(graph: MAIN) + SOME_VALUE_3 @join__enumValue(graph: MAIN) +} + +type SortResponse + @join__type(graph: MAIN) +{ + id: Int! + nullableId: Int + zzz: Int + aaa: Int + CCC: Int +} + +type Subscription + @join__type(graph: MAIN) +{ + noInputSubscription: EverythingResponse! +} + +union UnionType + @join__type(graph: MAIN) + @join__unionMember(graph: MAIN, member: "UnionType1") + @join__unionMember(graph: MAIN, member: "UnionType2") + = UnionType1 | UnionType2 + +type UnionType1 + @join__type(graph: MAIN) +{ + unionType1Field: String! + nullableString: String +} + +type UnionType2 + @join__type(graph: MAIN) +{ + unionType2Field: String! + nullableString: String +} \ No newline at end of file diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 6e49389d22..ae5674b665 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -168,6 +168,10 @@ pub struct Configuration { #[serde(default)] pub(crate) experimental_api_schema_generation_mode: ApiSchemaMode, + /// Set the Apollo usage report signature and referenced field generation implementation to use. + #[serde(default)] + pub(crate) experimental_apollo_metrics_generation_mode: ApolloMetricsGenerationMode, + /// Plugin configuration #[serde(default)] pub(crate) plugins: UserPlugins, @@ -210,6 +214,21 @@ pub(crate) enum ApiSchemaMode { Both, } +/// Apollo usage report signature and referenced field generation modes. +#[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)] +#[derivative(Debug)] +#[serde(rename_all = "lowercase")] +pub(crate) enum ApolloMetricsGenerationMode { + /// Use the new Rust-based implementation. + New, + /// Use the old JavaScript-based implementation. + Legacy, + /// Use Rust-based and Javascript-based implementations side by side, logging warnings if the + /// implementations disagree. + #[default] + Both, +} + impl<'de> serde::Deserialize<'de> for Configuration { fn deserialize(deserializer: D) -> Result where @@ -236,6 +255,7 @@ impl<'de> serde::Deserialize<'de> for Configuration { limits: Limits, experimental_chaos: Chaos, experimental_batching: Batching, + experimental_apollo_metrics_generation_mode: ApolloMetricsGenerationMode, } let ad_hoc: AdHocConfiguration = serde::Deserialize::deserialize(deserializer)?; @@ -254,6 +274,9 @@ impl<'de> serde::Deserialize<'de> for Configuration { .chaos(ad_hoc.experimental_chaos) .uplink(ad_hoc.uplink) .experimental_batching(ad_hoc.experimental_batching) + .experimental_apollo_metrics_generation_mode( + ad_hoc.experimental_apollo_metrics_generation_mode, + ) .build() .map_err(|e| serde::de::Error::custom(e.to_string())) } @@ -290,6 +313,7 @@ impl Configuration { chaos: Option, uplink: Option, experimental_api_schema_generation_mode: Option, + experimental_apollo_metrics_generation_mode: Option, experimental_batching: Option, ) -> Result { #[cfg(not(test))] @@ -317,6 +341,7 @@ impl Configuration { limits: operation_limits.unwrap_or_default(), experimental_chaos: chaos.unwrap_or_default(), experimental_api_schema_generation_mode: experimental_api_schema_generation_mode.unwrap_or_default(), + experimental_apollo_metrics_generation_mode: experimental_apollo_metrics_generation_mode.unwrap_or_default(), plugins: UserPlugins { plugins: Some(plugins), }, @@ -365,6 +390,7 @@ impl Configuration { uplink: Option, experimental_batching: Option, experimental_api_schema_generation_mode: Option, + experimental_apollo_metrics_generation_mode: Option, ) -> Result { let configuration = Self { validated_yaml: Default::default(), @@ -377,6 +403,8 @@ impl Configuration { experimental_chaos: chaos.unwrap_or_default(), experimental_api_schema_generation_mode: experimental_api_schema_generation_mode .unwrap_or_default(), + experimental_apollo_metrics_generation_mode: + experimental_apollo_metrics_generation_mode.unwrap_or_default(), plugins: UserPlugins { plugins: Some(plugins), }, diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 21648e4a98..687f514a2a 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1348,6 +1348,33 @@ expression: "&schema" } ] }, + "experimental_apollo_metrics_generation_mode": { + "description": "Set the Apollo usage report signature and referenced field generation implementation to use.", + "default": "both", + "oneOf": [ + { + "description": "Use the new Rust-based implementation.", + "type": "string", + "enum": [ + "new" + ] + }, + { + "description": "Use the old JavaScript-based implementation.", + "type": "string", + "enum": [ + "legacy" + ] + }, + { + "description": "Use Rust-based and Javascript-based implementations side by side, logging warnings if the implementations disagree.", + "type": "string", + "enum": [ + "both" + ] + } + ] + }, "experimental_batching": { "description": "Batching configuration.", "default": { diff --git a/apollo-router/src/lib.rs b/apollo-router/src/lib.rs index cef6ac055f..f10afe636a 100644 --- a/apollo-router/src/lib.rs +++ b/apollo-router/src/lib.rs @@ -50,6 +50,7 @@ pub mod plugin; #[macro_use] pub(crate) mod metrics; +mod apollo_studio_interop; pub(crate) mod axum_factory; mod cache; mod configuration; diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index f36bea7467..a051548cf2 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -25,6 +25,9 @@ use tower::Service; use super::PlanNode; use super::QueryKey; +use crate::apollo_studio_interop::generate_usage_reporting; +use crate::apollo_studio_interop::UsageReportingComparisonResult; +use crate::configuration::ApolloMetricsGenerationMode; use crate::error::PlanErrors; use crate::error::QueryPlannerError; use crate::error::SchemaError; @@ -357,6 +360,7 @@ impl BridgeQueryPlanner { } } + #[allow(clippy::too_many_arguments)] async fn plan( &self, original_query: String, @@ -365,6 +369,7 @@ impl BridgeQueryPlanner { key: CacheKeyMetadata, selections: Query, plan_options: PlanOptions, + doc: &ParsedDocument, ) -> Result { let planner_result = match self .planner @@ -387,10 +392,15 @@ impl BridgeQueryPlanner { }; // the `statsReportKey` field should match the original query instead of the filtered query, to index them all under the same query - let operation_signature = if original_query != filtered_query { + let operation_signature = if matches!( + self.configuration + .experimental_apollo_metrics_generation_mode, + ApolloMetricsGenerationMode::Legacy | ApolloMetricsGenerationMode::Both + ) && original_query != filtered_query + { Some( self.planner - .operation_signature(original_query, operation) + .operation_signature(original_query.clone(), operation.clone()) .await .map_err(QueryPlannerError::RouterBridgeError)?, ) @@ -411,6 +421,111 @@ impl BridgeQueryPlanner { usage_reporting.stats_report_key = sig; } + if matches!( + self.configuration + .experimental_apollo_metrics_generation_mode, + ApolloMetricsGenerationMode::New | ApolloMetricsGenerationMode::Both + ) { + // If the query is filtered, we want to generate the signature using the original query and generate the + // reference using the filtered query. To do this, we need to re-parse the original query here. + let signature_doc = if original_query != filtered_query { + Query::parse_document( + &original_query, + operation.clone().as_deref(), + &self.schema, + &self.configuration, + ) + .unwrap_or(doc.clone()) + } else { + doc.clone() + }; + + let generated_usage_reporting = generate_usage_reporting( + &signature_doc.executable, + &doc.executable, + &operation, + &self.schema.definitions, + ); + + // Ignore comparison if the operation name is an empty string since there is a known issue where + // router behaviour is incorrect in that case, and it also generates incorrect usage reports. + // https://github.com/apollographql/router/issues/4837 + let is_empty_operation_name = operation.map_or(false, |s| s.is_empty()); + let is_in_both_metrics_mode = matches!( + self.configuration + .experimental_apollo_metrics_generation_mode, + ApolloMetricsGenerationMode::Both + ); + if !is_empty_operation_name && is_in_both_metrics_mode { + let comparison_result = generated_usage_reporting.compare(&usage_reporting); + + if matches!( + comparison_result, + UsageReportingComparisonResult::StatsReportKeyNotEqual + | UsageReportingComparisonResult::BothNotEqual + ) { + tracing::warn!( + monotonic_counter.apollo.router.operations.telemetry.studio.signature = 1u64, + generation.is_matched = false, + "Mismatch between the Apollo usage reporting signature generated in router and router-bridge" + ); + tracing::debug!( + "Different signatures generated between router and router-bridge:\n{}\n{}", + generated_usage_reporting.result.stats_report_key, + usage_reporting.stats_report_key, + ); + } else { + tracing::info!( + monotonic_counter + .apollo + .router + .operations + .telemetry + .studio + .signature = 1u64, + generation.is_matched = true, + ); + } + + if matches!( + comparison_result, + UsageReportingComparisonResult::ReferencedFieldsNotEqual + | UsageReportingComparisonResult::BothNotEqual + ) { + tracing::warn!( + monotonic_counter.apollo.router.operations.telemetry.studio.references = 1u64, + generation.is_matched = false, + "Mismatch between the Apollo usage report referenced fields generated in router and router-bridge" + ); + tracing::debug!( + "Different referenced fields generated between router and router-bridge:\n{:?}\n{:?}", + generated_usage_reporting.result.referenced_fields_by_type, + usage_reporting.referenced_fields_by_type, + ); + } else { + tracing::info!( + monotonic_counter + .apollo + .router + .operations + .telemetry + .studio + .references = 1u64, + generation.is_matched = true, + ); + } + } else if matches!( + self.configuration + .experimental_apollo_metrics_generation_mode, + ApolloMetricsGenerationMode::New + ) { + usage_reporting.stats_report_key = + generated_usage_reporting.result.stats_report_key; + usage_reporting.referenced_fields_by_type = + generated_usage_reporting.result.referenced_fields_by_type; + } + } + Ok(QueryPlannerContent::Plan { plan: Arc::new(super::QueryPlan { usage_reporting: Arc::new(usage_reporting), @@ -675,6 +790,7 @@ impl BridgeQueryPlanner { key.metadata, selections, key.plan_options, + &doc, ) .await } @@ -980,6 +1096,7 @@ mod tests { CacheKeyMetadata::default(), selections, PlanOptions::default(), + &doc, ) .await .unwrap_err(); diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index ed2cb0d396..485c85460a 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -429,6 +429,7 @@ where }); } + // This will be overridden when running in ApolloMetricsGenerationMode::New mode if let Some(QueryPlannerContent::Plan { plan, .. }) = &content { context .extensions() diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index 7510f063bf..f26f53a03b 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -11,13 +11,28 @@ cargo-fuzz = true [dependencies] libfuzzer-sys = "0.4" +apollo-compiler = "=1.0.0-beta.15" apollo-smith = { version = "0.5.0", features = ["parser-impl"] } apollo-parser = "0.7.6" env_logger = "0.10.2" log = "0.4" reqwest = { version = "0.11", default-features = false, features = ["json", "blocking"] } +router-bridge = "=0.5.17+v2.7.2" serde_json = "1" +tokio = { version = "1.36.0", features = ["full"] } +[dev-dependencies] +anyhow = "1" +apollo-router = { path = "../apollo-router" } +async-trait = "0.1" +schemars = { version = "0.8", features = ["url"] } +serde = "1" +serde_json_bytes = "0.2" +tower = { version = "0.4", features = ["full"] } +http = "0.2" + +[[example]] +name = "usage_reporting_router" [[bin]] name = "invariant_router" @@ -42,3 +57,10 @@ name = "federation" path = "fuzz_targets/federation.rs" test = false doc = false + +[[bin]] +name = "apollo_router_studio_interop" +path = "fuzz_targets/apollo_router_studio_interop.rs" +test = false +doc = false +bench = false diff --git a/fuzz/examples/usage_reporting_router.rs b/fuzz/examples/usage_reporting_router.rs new file mode 100644 index 0000000000..a98a3435cd --- /dev/null +++ b/fuzz/examples/usage_reporting_router.rs @@ -0,0 +1,80 @@ +use std::ops::ControlFlow; + +use anyhow::Result; +use apollo_router::graphql; +use apollo_router::layers::ServiceBuilderExt; +use apollo_router::plugin::Plugin; +use apollo_router::plugin::PluginInit; +use apollo_router::register_plugin; +use apollo_router::services::execution; +use apollo_router::services::supergraph; +use tower::BoxError; +use tower::ServiceBuilder; +use tower::ServiceExt; + +#[derive(Debug)] +struct ExposeReferencedFieldsByType { + #[allow(dead_code)] + configuration: bool, +} + +#[async_trait::async_trait] +impl Plugin for ExposeReferencedFieldsByType { + type Config = bool; + + async fn new(init: PluginInit) -> Result { + Ok(Self { + configuration: init.config, + }) + } + + fn supergraph_service(&self, service: supergraph::BoxService) -> supergraph::BoxService { + ServiceBuilder::new() + .map_first_graphql_response( + |context, http_parts, mut graphql_response: graphql::Response| { + graphql_response.extensions.insert( + "usageReporting", + context.get("usageReporting").unwrap().unwrap(), + ); + (http_parts, graphql_response) + }, + ) + .service(service) + .boxed() + } + + fn execution_service(&self, service: execution::BoxService) -> execution::BoxService { + ServiceBuilder::new() + .checkpoint(|req: execution::Request| { + let as_json: serde_json_bytes::Value = + serde_json_bytes::to_value(&req.query_plan).unwrap(); + + req.context.insert_json_value( + "usageReporting", + as_json.get("usage_reporting").unwrap().clone(), + ); + // we don't need to execute the request, there's no subgraphs anyway + Ok(ControlFlow::Break( + execution::Response::fake_builder() + .context(req.context) + .build() + .unwrap(), + )) + }) + .service(service) + .boxed() + } +} + +register_plugin!( + "apollo-test", + "expose_referenced_fields_by_type", + ExposeReferencedFieldsByType +); + +// make sure you rebuild before you fuzz! +// in the /fuzz directory (you need to be there because fuzz is not in the workspace) +// $ cargo build --example usage_reporting_router +fn main() -> Result<()> { + apollo_router::main() +} diff --git a/fuzz/fuzz_targets/apollo_router_studio_interop.rs b/fuzz/fuzz_targets/apollo_router_studio_interop.rs new file mode 100644 index 0000000000..e4e031c80f --- /dev/null +++ b/fuzz/fuzz_targets/apollo_router_studio_interop.rs @@ -0,0 +1,162 @@ +#![no_main] + +use std::env; +use std::process::Child; +use std::process::Command; +use std::process::Stdio; +use std::sync::atomic::AtomicBool; +use std::sync::OnceLock; + +use apollo_compiler::ExecutableDocument; +use apollo_compiler::Schema; +#[path = "../../apollo-router/src/apollo_studio_interop/mod.rs"] +mod apollo_router_usage_reporting; +use apollo_router_usage_reporting::generate_usage_reporting; +use apollo_router_usage_reporting::UsageReportingComparisonResult; +use libfuzzer_sys::fuzz_target; +use router_bridge::planner::UsageReporting; +use router_fuzz::generate_valid_operation; +use serde_json::json; + +const ROUTER_CMD: &str = "./target/debug/examples/usage_reporting_router"; +// const SCHEMA_PATH: &str = "fuzz/supergraph.graphql"; +// const SCHEMA_PATH: &str = "fuzz/supergraph-fed2.graphql"; +// This schema contains more types and fields and directive so we can test as much of signature and referenced field +// generation as possible. apollo_smith doesn't support random generation of input objects, union types, etc so it's +// still not comprehensive. +const SCHEMA_PATH: &str = "fuzz/supergraph-moretypes.graphql"; +const ROUTER_CONFIG_PATH: &str = "fuzz/router.yaml"; +const ROUTER_URL: &str = "http://localhost:4100"; +static ROUTER_INIT: AtomicBool = AtomicBool::new(false); + +static mut ROUTER_PROCESS: OnceLock = OnceLock::new(); + +#[derive(Debug)] +struct ChildProcessGuard(Child); +impl Drop for ChildProcessGuard { + fn drop(&mut self) { + if let Err(e) = self.0.kill() { + eprintln!("Could not kill child process: {}", e); + } + } +} + +/* +Ideally this fuzzer would just call the router-bridge's Planner.plan function directly instead of spinning up a new +router executable, but when we tried to do that, we ran into some very confusing serialization issues. The running +theory is that the fuzzer runs a couple of sanitizers / custom flags, which deno was not happy with. We work around +this by spawning a router in a separate process and sending requests to the router instead. The usage_reporting +payload is not usually exposed from router responses, so we have to use a plugin to extract it. This was done as an +example so we could avoid polluting the main fuzzer dependencies. + +To run this fuzzer: +* if this is the first time running it, or you've made changes to router code + * go to the /fuzz directory (you need to be there because fuzz is not in the workspace) + * run `cargo build --example usage_reporting_router` +* start the fuzzer using `cargo +nightly fuzz run apollo_router_studio_interop` from the root directory + * if you get an Address already in use error, make sure you `killall usage_reporting_router` before a new run +*/ + +fuzz_target!(|data: &[u8]| { + let _ = env_logger::try_init(); + + if !ROUTER_INIT.swap(true, std::sync::atomic::Ordering::Relaxed) { + let mut cmd = + Command::new(env::var("ROUTER_CMD").unwrap_or_else(|_| ROUTER_CMD.to_string())) + .arg("--supergraph") + .arg( + env::var("ROUTER_SCHEMA_PATH") + .unwrap_or_else(|_| SCHEMA_PATH.to_string()), + ).arg("--config") + .arg( + env::var("ROUTER_CONFIG_PATH") + .unwrap_or_else(|_| ROUTER_CONFIG_PATH.to_string()), + ) + .arg("--hot-reload") + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .spawn() + .expect("cannot launch the router\nThe fuzzer cannot work unless you run `cargo build --example usage_reporting_router` in the `fuzz` directory.\nDid you forget to run cargo build before you run the fuzzer?"); + + println!("waiting for router to start up"); + std::thread::sleep(std::time::Duration::from_secs(5)); + if let Ok(Some(exit_status)) = cmd.try_wait() { + panic!("the router exited with exit code : {}", exit_status); + } + unsafe { ROUTER_PROCESS.set(ChildProcessGuard(cmd)) } + .expect("cannot set the router child process"); + } + + let (op_str, schema_str) = match generate_valid_operation(data, SCHEMA_PATH) { + Ok(d) => (d.0, d.1), + Err(_err) => { + println!("Failed to generate valid operation"); + return; + } + }; + + // If the generated schema or operation doesn't pass validation, the call to the router will + // fail, so we don't want to continue with the test. + let schema = match Schema::parse_and_validate(schema_str, "schema.graphql") { + Ok(s) => s, + Err(_err) => { + // println!("Failed to parse and validate schema"); + return; + } + }; + let doc = match ExecutableDocument::parse_and_validate(&schema, &op_str, "query.graphql") { + Ok(d) => d, + Err(_err) => { + // println!("Failed to parse and validate operation"); + return; + } + }; + + let rust_generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let http_client = reqwest::blocking::Client::new(); + let router_response = http_client + .post(ROUTER_URL) + .json(&json!({ + "query": op_str + })) + .send(); + if let Err(err) = router_response { + println!("Bad response from router: [{err}] for operation: [{op_str:?}]"); + unsafe { ROUTER_PROCESS.get_mut() } + .unwrap() + .0 + .kill() + .unwrap(); + panic!("{}", err); + } + + let response: serde_json::Value = router_response.unwrap().json().unwrap(); + + let bridge_generated: UsageReporting = serde_json::from_value( + response + .get("extensions") + .unwrap() + .as_object() + .unwrap() + .get("usageReporting") + .unwrap() + .clone(), + ) + .unwrap(); + + if !matches!( + rust_generated.compare(&bridge_generated), + UsageReportingComparisonResult::Equal + ) { + unsafe { ROUTER_PROCESS.get_mut() } + .unwrap() + .0 + .kill() + .unwrap(); + panic!( + "New rust implementation:\n{:?}\nExisting router-bridge implementation:\n{:?}", + rust_generated.result, bridge_generated + ); + } +}); diff --git a/fuzz/fuzz_targets/federation.rs b/fuzz/fuzz_targets/federation.rs index ae60a82284..7cc951692a 100644 --- a/fuzz/fuzz_targets/federation.rs +++ b/fuzz/fuzz_targets/federation.rs @@ -15,7 +15,7 @@ const GATEWAY_FED2_URL: &str = "http://localhost:4200/graphql"; fuzz_target!(|data: &[u8]| { let generated_operation = match generate_valid_operation(data, "fuzz/supergraph.graphql") { - Ok(d) => d, + Ok((d, _)) => d, Err(_err) => { return; } diff --git a/fuzz/fuzz_targets/router.rs b/fuzz/fuzz_targets/router.rs index 7a0d9dd673..b27b07a921 100644 --- a/fuzz/fuzz_targets/router.rs +++ b/fuzz/fuzz_targets/router.rs @@ -14,7 +14,7 @@ const ROUTER_URL: &str = "http://localhost:4000"; fuzz_target!(|data: &[u8]| { let generated_operation = match generate_valid_operation(data, "fuzz/supergraph-fed2.graphql") { - Ok(d) => d, + Ok((d, _)) => d, Err(_err) => { return; } diff --git a/fuzz/router.yaml b/fuzz/router.yaml new file mode 100644 index 0000000000..5267986e49 --- /dev/null +++ b/fuzz/router.yaml @@ -0,0 +1,14 @@ +supergraph: + listen: 0.0.0.0:4100 + introspection: true +plugins: + experimental.expose_query_plan: true + apollo-test.expose_referenced_fields_by_type: true +experimental_graphql_validation_mode: both +sandbox: + enabled: true +homepage: + enabled: false +traffic_shaping: + router: + timeout: 300s \ No newline at end of file diff --git a/fuzz/src/lib.rs b/fuzz/src/lib.rs index b22056214c..a5c9eb61d5 100644 --- a/fuzz/src/lib.rs +++ b/fuzz/src/lib.rs @@ -11,7 +11,10 @@ use libfuzzer_sys::arbitrary::Unstructured; use log::debug; /// This generate an arbitrary valid GraphQL operation -pub fn generate_valid_operation(input: &[u8], schema_path: &'static str) -> Result { +pub fn generate_valid_operation( + input: &[u8], + schema_path: &'static str, +) -> Result<(String, String)> { drop(env_logger::try_init()); let contents = fs::read_to_string(schema_path).expect("cannot read file"); @@ -34,7 +37,8 @@ pub fn generate_valid_operation(input: &[u8], schema_path: &'static str) -> Resu &mut u, Document::try_from(tree.document()).expect("tree should not have errors"), )?; - let operation_def = gql_doc.operation_definition()?.unwrap(); + let operation_def: String = gql_doc.operation_definition()?.unwrap().into(); + let doc: String = gql_doc.finish().into(); - Ok(operation_def.into()) + Ok((operation_def, doc)) } diff --git a/fuzz/supergraph-moretypes.graphql b/fuzz/supergraph-moretypes.graphql new file mode 100644 index 0000000000..eed557f97c --- /dev/null +++ b/fuzz/supergraph-moretypes.graphql @@ -0,0 +1,169 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query + mutation: Mutation + subscription: Subscription +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @noArgs on FIELD | FRAGMENT_DEFINITION | FRAGMENT_SPREAD | INLINE_FRAGMENT | MUTATION | QUERY | SUBSCRIPTION + +directive @withArgs(arg1: String = "Default", arg2: String, arg3: Boolean, arg4: Int, arg5: [ID]) on FIELD | FRAGMENT_DEFINITION | FRAGMENT_SPREAD | INLINE_FRAGMENT | MUTATION | QUERY | SUBSCRIPTION + +interface AnInterface + @join__type(graph: MAIN) +{ + sharedField: String! +} + +type BasicResponse + @join__type(graph: MAIN) +{ + id: Int! + nullableId: Int +} + +type BasicTypesResponse + @join__type(graph: MAIN) +{ + nullableId: ID + nonNullId: ID! + nullableInt: Int + nonNullInt: Int! + nullableString: String + nonNullString: String! + nullableFloat: Float + nonNullFloat: Float! + nullableBoolean: Boolean + nonNullBoolean: Boolean! +} + +type EverythingResponse + @join__type(graph: MAIN) +{ + id: Int! + nullableId: Int + basicTypes: BasicTypesResponse + enumResponse: SomeEnum + interfaceResponse: AnInterface + interfaceImplementationResponse: InterfaceImplementation2 + unionType2Response: UnionType2 + listOfBools: [Boolean!]! + listOfInterfaces: [AnInterface] + objectTypeWithInputField(boolInput: Boolean, secondInput: Boolean!): ObjectTypeResponse + listOfObjects: [ObjectTypeResponse] +} + +type InterfaceImplementation1 implements AnInterface + @join__implements(graph: MAIN, interface: "AnInterface") + @join__type(graph: MAIN) +{ + sharedField: String! + implementation1Field: Int! +} + +type InterfaceImplementation2 implements AnInterface + @join__implements(graph: MAIN, interface: "AnInterface") + @join__type(graph: MAIN) +{ + sharedField: String! + implementation2Field: Float! +} + +scalar join__FieldSet + +enum join__Graph { + MAIN @join__graph(name: "main", url: "http://localhost:4001/graphql") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Mutation + @join__type(graph: MAIN) +{ + noInputMutation: EverythingResponse! +} + +type ObjectTypeResponse + @join__type(graph: MAIN) +{ + stringField: String! + intField: Int! + nullableField: String +} + +type Query + @join__type(graph: MAIN) +{ + scalarInputQuery(listInput: [String!]!, stringInput: String!, nullableStringInput: String, intInput: Int!, floatInput: Float!, boolInput: Boolean!, enumInput: SomeEnum, idInput: ID!): EverythingResponse! + noInputQuery: EverythingResponse! + basicResponseQuery: BasicResponse! + scalarResponseQuery: String + defaultArgQuery(stringInput: String! = "default"): BasicResponse! + sortQuery(listInput: [String!]!, stringInput: String!, nullableStringInput: String, INTInput: Int!, floatInput: Float!, boolInput: Boolean!, enumInput: SomeEnum, idInput: ID!): SortResponse! +} + +enum SomeEnum + @join__type(graph: MAIN) +{ + SOME_VALUE_1 @join__enumValue(graph: MAIN) + SOME_VALUE_2 @join__enumValue(graph: MAIN) + SOME_VALUE_3 @join__enumValue(graph: MAIN) +} + +type SortResponse + @join__type(graph: MAIN) +{ + id: Int! + nullableId: Int + zzz: Int + aaa: Int + CCC: Int +} + +type Subscription + @join__type(graph: MAIN) +{ + noInputSubscription: EverythingResponse! +} + +type UnionType1 + @join__type(graph: MAIN) +{ + unionType1Field: String! + nullableString: String +} + +type UnionType2 + @join__type(graph: MAIN) +{ + unionType2Field: String! + nullableString: String +} \ No newline at end of file diff --git a/licenses.html b/licenses.html index e5d3dab6ea..5336ced0be 100644 --- a/licenses.html +++ b/licenses.html @@ -44,13 +44,13 @@

Third Party Licenses

Overview of licenses:

    -
  • Apache License 2.0 (489)
  • -
  • MIT License (155)
  • -
  • BSD 3-Clause "New" or "Revised" License (12)
  • -
  • ISC License (11)
  • -
  • BSD 2-Clause "Simplified" License (3)
  • +
  • MIT License (90)
  • +
  • Apache License 2.0 (70)
  • +
  • BSD 3-Clause "New" or "Revised" License (10)
  • +
  • ISC License (7)
  • Elastic License 2.0 (3)
  • Mozilla Public License 2.0 (3)
  • +
  • BSD 2-Clause "Simplified" License (2)
  • Creative Commons Zero v1.0 Universal (2)
  • OpenSSL License (2)
  • Unicode License Agreement - Data Files and Software (2016) (1)
  • @@ -443,6 +443,216 @@

    Used by:

    of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS + + +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    +                                 Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +   APPENDIX: How to apply the Apache License to your work.
    +
    +      To apply the Apache License to your work, attach the following
    +      boilerplate notice, with the fields enclosed by brackets "[]"
    +      replaced with your own identifying information. (Don't include
    +      the brackets!)  The text should be enclosed in the appropriate
    +      comment syntax for the file format. We also recommend that a
    +      file or class name and description of purpose be included on the
    +      same "printed page" as the copyright notice for easier
    +      identification within third-party archives.
    +
    +   Copyright 2021 Jacob Pratt
    +
    +   Licensed under the Apache License, Version 2.0 (the "License");
    +   you may not use this file except in compliance with the License.
    +   You may obtain a copy of the License at
    +
    +       http://www.apache.org/licenses/LICENSE-2.0
    +
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
     
  • @@ -4264,7 +4474,191 @@

    Used by:

    Apache License 2.0

    Used by:

    +
                                     Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    +
                                     Apache License
                                Version 2.0, January 2004
    @@ -4442,6 +4836,20 @@ 

    Used by:

    of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS + + Copyright 2019 Yoshua Wuyts + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License.
  • @@ -8003,13 +8411,26 @@

    Used by:

  • arbitrary
  • arc-swap
  • async-channel
  • +
  • async-channel
  • async-compression
  • +
  • async-executor
  • +
  • async-global-executor
  • +
  • async-io
  • +
  • async-io
  • +
  • async-lock
  • +
  • async-lock
  • +
  • async-process
  • +
  • async-signal
  • +
  • async-std
  • +
  • async-task
  • +
  • atomic-waker
  • autocfg
  • backtrace
  • base64
  • base64
  • bitflags
  • bitflags
  • +
  • blocking
  • bstr
  • bumpalo
  • bytes-utils
  • @@ -8032,6 +8453,9 @@

    Used by:

  • envmnt
  • equivalent
  • event-listener
  • +
  • event-listener
  • +
  • event-listener
  • +
  • event-listener-strategy
  • fastrand
  • fastrand
  • filetime
  • @@ -8042,6 +8466,7 @@

    Used by:

  • fraction
  • fsio
  • futures-lite
  • +
  • futures-lite
  • gimli
  • git2
  • group
  • @@ -8049,7 +8474,6 @@

    Used by:

  • hashbrown
  • hdrhistogram
  • heck
  • -
  • heck
  • hermit-abi
  • httparse
  • humantime-serde
  • @@ -8061,6 +8485,7 @@

    Used by:

  • indexmap
  • indexmap
  • inventory
  • +
  • io-lifetimes
  • ipconfig
  • itertools
  • itertools
  • @@ -8074,6 +8499,7 @@

    Used by:

  • libz-ng-sys
  • libz-sys
  • linux-raw-sys
  • +
  • linux-raw-sys
  • lock_api
  • log
  • maplit
  • @@ -8098,8 +8524,6 @@

    Used by:

  • openssl-src
  • parking
  • parking_lot
  • -
  • parking_lot
  • -
  • parking_lot_core
  • parking_lot_core
  • percent-encoding
  • pest
  • @@ -8107,9 +8531,10 @@

    Used by:

  • pest_generator
  • pest_meta
  • petgraph
  • +
  • piper
  • pkg-config
  • platforms
  • -
  • proc-macro-hack
  • +
  • polling
  • proc-macro2
  • prost
  • prost
  • @@ -8131,11 +8556,10 @@

    Used by:

  • rustc_version
  • rustc_version
  • rustix
  • +
  • rustix
  • rustls
  • rustls-native-certs
  • rustls-pemfile
  • -
  • salsa
  • -
  • salsa-macros
  • scopeguard
  • sct
  • security-framework
  • @@ -8171,11 +8595,11 @@

    Used by:

  • unicode-bidi
  • unicode-id
  • unicode-normalization
  • -
  • unicode-segmentation
  • unicode-width
  • unicode-xid
  • url
  • uuid
  • +
  • value-bag
  • version_check
  • waker-fn
  • wasi
  • @@ -10681,6 +11105,7 @@

    Used by:

                                  Apache License
    @@ -11333,9 +11758,7 @@ 

    Used by:

    Apache License 2.0

    Used by:

    ../../LICENSE-APACHE
    @@ -11987,11 +12410,14 @@

    Used by:

    Apache License 2.0

    Used by:

      +
    • apollo-compiler
    • +
    • apollo-parser
    • curve25519-dalek-derive
    • deadpool-runtime
    • deno-proc-macro-rules
    • deno-proc-macro-rules-macros
    • dunce
    • +
    • gloo-timers
    • graphql-introspection-query
    • graphql_client
    • graphql_client_codegen
    • @@ -12160,7 +12586,6 @@

      Used by:

      @@ -12483,7 +12908,7 @@

      Used by:

      BSD 3-Clause "New" or "Revised" License

      Used by:

      Copyright (c) 2016-2021 isis agora lovecruft. All rights reserved.
       Copyright (c) 2016-2021 Henry de Valence. All rights reserved.
      @@ -12629,6 +13054,7 @@ 

      BSD 3-Clause "New" or "Revised" Licens

      Used by:

      Copyright (c) <year> <owner>. 
       
      @@ -12960,12 +13386,11 @@ 

      Used by:

      Elastic License 2.0

      Used by:

      -
      Elastic License 2.0
      +                
      Copyright 2021 Apollo Graph, Inc.
       
      -URL: https://www.elastic.co/licensing/elastic-license
      +Elastic License 2.0
       
       ## Acceptance
       
      @@ -13056,14 +13481,121 @@ 

      Used by:

      **use** means anything you do with the software requiring one of your licenses. **trademark** means trademarks, service marks, and similar rights. + +--------------------------------------------------------------------------------
      + +
    • +

      Elastic License 2.0

      +

      Used by:

      + +
      Copyright 2021 Apollo Graph, Inc.
      +
      +Source code in this repository is covered by (i) the Elastic License 2.0 or (ii) an MIT compatible license, in each case, as designated by a licensing file in a subdirectory or file header. The default throughout the repository is a license under the Elastic License 2.0, unless a file header or a licensing file in a subdirectory specifies another license.
      +
      +--------------------------------------------------------------------------------
      +
      +Elastic License 2.0
      +
      +## Acceptance
      +
      +By using the software, you agree to all of the terms and conditions below.
      +
      +## Copyright License
      +
      +The licensor grants you a non-exclusive, royalty-free, worldwide,
      +non-sublicensable, non-transferable license to use, copy, distribute, make
      +available, and prepare derivative works of the software, in each case subject to
      +the limitations and conditions below.
      +
      +## Limitations
      +
      +You may not provide the software to third parties as a hosted or managed
      +service, where the service provides users with access to any substantial set of
      +the features or functionality of the software.
      +
      +You may not move, change, disable, or circumvent the license key functionality
      +in the software, and you may not remove or obscure any functionality in the
      +software that is protected by the license key.
      +
      +You may not alter, remove, or obscure any licensing, copyright, or other notices
      +of the licensor in the software. Any use of the licensorโ€™s trademarks is subject
      +to applicable law.
      +
      +## Patents
      +
      +The licensor grants you a license, under any patent claims the licensor can
      +license, or becomes able to license, to make, have made, use, sell, offer for
      +sale, import and have imported the software, in each case subject to the
      +limitations and conditions in this license. This license does not cover any
      +patent claims that you cause to be infringed by modifications or additions to
      +the software. If you or your company make any written claim that the software
      +infringes or contributes to infringement of any patent, your patent license for
      +the software granted under these terms ends immediately. If your company makes
      +such a claim, your patent license ends immediately for work on behalf of your
      +company.
      +
      +## Notices
      +
      +You must ensure that anyone who gets a copy of any part of the software from you
      +also gets a copy of these terms.
      +
      +If you modify the software, you must include in any modified copies of the
      +software prominent notices stating that you have modified the software.
      +
      +## No Other Rights
      +
      +These terms do not imply any licenses other than those expressly granted in
      +these terms.
      +
      +## Termination
      +
      +If you use the software in violation of these terms, such use is not licensed,
      +and your licenses will automatically terminate. If the licensor provides you
      +with a notice of your violation, and you cease all violation of this license no
      +later than 30 days after you receive that notice, your licenses will be
      +reinstated retroactively. However, if you violate these terms after such
      +reinstatement, any additional violation of these terms will cause your licenses
      +to terminate automatically and permanently.
      +
      +## No Liability
      +
      +*As far as the law allows, the software comes as is, without any warranty or
      +condition, and the licensor will not be liable to you for any damages arising
      +out of these terms or the use or nature of the software, under any kind of
      +legal claim.*
      +
      +## Definitions
      +
      +The **licensor** is the entity offering these terms, and the **software** is the
      +software the licensor makes available under these terms, including any portion
      +of it.
      +
      +**you** refers to the individual or entity agreeing to these terms.
      +
      +**your company** is any legal entity, sole proprietorship, or other kind of
      +organization that you work for, plus all organizations that have control over,
      +are under the control of, or are under common control with that
      +organization. **control** means ownership of substantially all the assets of an
      +entity, or the power to direct its management and policies by vote, contract, or
      +otherwise. Control can be direct or indirect.
      +
      +**your licenses** are all the licenses granted to you for the software under
      +these terms.
      +
      +**use** means anything you do with the software requiring one of your licenses.
      +
      +**trademark** means trademarks, service marks, and similar rights.
      +
      +--------------------------------------------------------------------------------
    • ISC License

      Used by:

         Copyright 2015-2016 Brian Smith.
       
      @@ -13084,7 +13616,6 @@ 

      ISC License

      Used by:

      /* Copyright (c) 2015, Google Inc.
        *
      @@ -13190,6 +13721,7 @@ 

      ISC License

      Used by:

      ISC License:
      @@ -13509,7 +14041,6 @@ 

      MIT License

      Used by:

      Copyright (c) 2015-2016 the fiat-crypto authors (see
       https://github.com/mit-plv/fiat-crypto/blob/master/AUTHORS).
      @@ -14926,6 +15457,7 @@ 

      Used by:

    • jsonschema
    • lazy-regex-proc_macros
    • number_prefix
    • +
    • ring
    • serde_v8
    • v8
    • valuable
    • @@ -15676,34 +16208,6 @@

      Used by:

      SOFTWARE.
      -
    • -
    • -

      MIT License

      -

      Used by:

      - -
      The MIT License (MIT)
      -
      -Copyright (c) 2019 Simon Heath
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy
      -of this software and associated documentation files (the "Software"), to deal
      -in the Software without restriction, including without limitation the rights
      -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      -copies of the Software, and to permit persons to whom the Software is
      -furnished to do so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in all
      -copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
      -SOFTWARE.
    • MIT License

      @@ -16632,7 +17136,6 @@

      OpenSSL License

      Used by:

      /* ====================================================================
        * Copyright (c) 1998-2011 The OpenSSL Project.  All rights reserved.
      @@ -16687,6 +17190,62 @@ 

      Used by:

      * Hudson (tjh@cryptsoft.com). * */
      +
    • +
    • +

      OpenSSL License

      +

      Used by:

      + +
      OpenSSL License
      +
      +Copyright (c) 1998-2008 The OpenSSL Project. All rights reserved.
      +
      +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
      +
      +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
      +
      +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
      +
      +3. All advertising materials mentioning features or use of this software must display the following acknowledgment: "This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
      +
      +4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to endorse or promote products derived from this software without prior written permission. For written permission, please contact openssl-core@openssl.org.
      +
      +5. Products derived from this software may not be called "OpenSSL" nor may "OpenSSL" appear in their names without prior written permission of the OpenSSL Project.
      +
      +6. Redistributions of any form whatsoever must retain the following acknowledgment: "This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/)"
      +
      +THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +This product includes cryptographic software written by Eric Young (eay@cryptsoft.com). This product includes software written by Tim Hudson (tjh@cryptsoft.com).
      +
      +
      +Original SSLeay License
      +
      +Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) All rights reserved.
      +
      +This package is an SSL implementation written by Eric Young (eay@cryptsoft.com). The implementation was written so as to conform with Netscapes SSL.
      +
      +This library is free for commercial and non-commercial use as long as the following conditions are aheared to. The following conditions apply to all code found in this distribution, be it the RC4, RSA, lhash, DES, etc., code; not just the SSL code. The SSL documentation included with this distribution is covered by the same copyright terms except that the holder is Tim Hudson (tjh@cryptsoft.com).
      +
      +Copyright remains Eric Young's, and as such any Copyright notices in the code are not to be removed. If this package is used in a product, Eric Young should be given attribution as the author of the parts of the library used. This can be in the form of a textual message at program startup or in documentation (online or textual) provided with the package.
      +
      +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
      +
      +1. Redistributions of source code must retain the copyright notice, this list of conditions and the following disclaimer.
      +
      +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
      +
      +3. All advertising materials mentioning features or use of this software must display the following acknowledgement:
      +"This product includes cryptographic software written by Eric Young (eay@cryptsoft.com)"
      +The word 'cryptographic' can be left out if the rouines from the library being used are not cryptographic related :-).
      +
      +4. If you include any Windows specific code (or a derivative thereof) from the apps directory (application code) you must include an acknowledgement: "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
      +
      +THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
      +
      +The licence and distribution terms for any publically available version or derivative of this code cannot be changed. i.e. this code cannot simply be copied and put under another distribution licence [including the GNU Public Licence.]
      +
    • Unicode License Agreement - Data Files and Software (2016)

      From ad8e0c645bd234be184afff0ed5e81e8a1da814e Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Fri, 12 Apr 2024 12:49:30 +0200 Subject: [PATCH 12/46] Fix error: failed to select a version for `router-bridge` (#4946) router-bridge was added as a dendency of router-fuzz in a recent PR, but the dependency in apollo-router had been upgraded in the meantime. This switches to specify common dependencies in the workspace, so that version numbers are de-duplicated. Note that `Cargo.lock` is not changed by this PR. --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. --- .cargo/{config => config.toml} | 0 Cargo.toml | 30 +++++++++++ apollo-router-benchmarks/Cargo.toml | 14 +++-- apollo-router/Cargo.toml | 52 +++++++++---------- apollo-router/build/main.rs | 13 ++++- .../src/query_planner/bridge_query_planner.rs | 13 ++++- fuzz/Cargo.toml | 26 +++++----- 7 files changed, 97 insertions(+), 51 deletions(-) rename .cargo/{config => config.toml} (100%) diff --git a/.cargo/config b/.cargo/config.toml similarity index 100% rename from .cargo/config rename to .cargo/config.toml diff --git a/Cargo.toml b/Cargo.toml index 4861dbc154..7a87db0ece 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,3 +41,33 @@ incremental = false [profile.release-dhat] inherits = "release" debug = 1 + +# Dependencies used in more than one place are specified here in order to keep versions in sync: +# https://doc.rust-lang.org/cargo/reference/workspaces.html#the-dependencies-table +[workspace.dependencies] +apollo-compiler = "=1.0.0-beta.15" +apollo-parser = "0.7.6" +apollo-smith = { version = "0.5.0", features = ["parser-impl"] } +async-trait = "0.1.77" +http = "0.2.11" +once_cell = "1.19.0" +reqwest = { version = "0.11.24", default-features = false, features = [ + "rustls-tls", + "rustls-native-certs", + "gzip", + "json", + "stream", +] } + +# note: this dependency should _always_ be pinned, prefix the version with an `=` +router-bridge = "=0.5.18+v2.7.2" + +schemars = { version = "0.8.16", features = ["url"] } +serde = { version = "1.0.197", features = ["derive", "rc"] } +serde_json = { version = "1.0.114", features = [ + "preserve_order", + "float_roundtrip", +] } +serde_json_bytes = { version = "0.2.2", features = ["preserve_order"] } +tokio = { version = "1.36.0", features = ["full"] } +tower = { version = "0.4.13", features = ["full"] } diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index 63aa39063d..c7c5e4f8cc 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -6,20 +6,18 @@ edition = "2021" license = "Elastic-2.0" publish = false -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [dev-dependencies] apollo-router = { path = "../apollo-router" } criterion = { version = "0.5", features = ["async_tokio", "async_futures"] } memory-stats = "1.1.0" -once_cell = "1" -serde_json = { version = "1", features = ["preserve_order", "float_roundtrip"] } -tokio = { version = "1", features = ["full"] } -tower = "0.4" +once_cell.workspace = true +serde_json.workspace = true +tokio.workspace = true +tower.workspace = true [build-dependencies] -apollo-smith = { version = "0.5.0", features = ["parser-impl"] } -apollo-parser = "0.7.6" +apollo-smith.workspace = true +apollo-parser.workspace = true arbitrary = "1.3.2" [[bench]] diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 92ca61a6ce..b65ada6305 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -64,7 +64,7 @@ features = ["docs_rs"] askama = "0.12.1" access-json = "0.1.0" anyhow = "1.0.80" -apollo-compiler = "=1.0.0-beta.15" +apollo-compiler.workspace = true apollo-federation = "=0.0.10" arc-swap = "1.6.0" async-channel = "1.9.0" @@ -74,7 +74,7 @@ async-compression = { version = "0.4.6", features = [ "gzip", "deflate", ] } -async-trait = "0.1.77" +async-trait.workspace = true axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] } base64 = "0.21.7" bloomfilter = "1.0.13" @@ -104,7 +104,7 @@ fred = { version = "7.1.2", features = ["enable-rustls"] } futures = { version = "0.3.30", features = ["thread-pool"] } graphql_client = "0.13.0" hex = { version = "0.4.3", features = ["serde"] } -http = "0.2.11" +http.workspace = true http-body = "0.4.6" heck = "0.4.1" humantime = "2.1.0" @@ -179,37 +179,27 @@ proteus = "0.5.0" rand = "0.8.5" rhai = { version = "=1.17.1", features = ["sync", "serde", "internals"] } regex = "1.10.3" -reqwest = { version = "0.11.24", default-features = false, features = [ - "rustls-tls", - "rustls-native-certs", - "gzip", - "json", - "stream", -] } -# note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.5.18+v2.7.2" +reqwest.workspace = true +router-bridge.workspace = true rust-embed = "8.2.0" rustls = "0.21.10" rustls-native-certs = "0.6.3" rustls-pemfile = "1.0.4" -schemars = { version = "0.8.16", features = ["url"] } +schemars.workspace = true shellexpand = "3.1.0" sha2 = "0.10.8" semver = "1.0.22" -serde = { version = "1.0.197", features = ["derive", "rc"] } +serde.workspace = true serde_derive_default = "0.1" -serde_json_bytes = { version = "0.2.2", features = ["preserve_order"] } -serde_json = { version = "1.0.114", features = [ - "preserve_order", - "float_roundtrip", -] } +serde_json_bytes.workspace = true +serde_json.workspace = true serde_urlencoded = "0.7.1" serde_yaml = "0.8.26" static_assertions = "1.1.0" strum_macros = "0.25.3" sys-info = "0.9.1" thiserror = "1.0.57" -tokio = { version = "1.36.0", features = ["full"] } +tokio.workspace = true tokio-stream = { version = "0.1.14", features = ["sync", "net"] } tokio-util = { version = "0.7.10", features = ["net", "codec", "time"] } tonic = { version = "0.9.2", features = [ @@ -218,7 +208,7 @@ tonic = { version = "0.9.2", features = [ "tls-roots", "gzip", ] } -tower = { version = "0.4.13", features = ["full"] } +tower.workspace = true tower-http = { version = "0.4.4", features = [ "add-extension", "trace", @@ -273,7 +263,9 @@ uname = "0.1.1" [target.'cfg(unix)'.dependencies] uname = "0.1.1" -hyperlocal = { version = "0.8.0", default-features = false, features = ["client"] } +hyperlocal = { version = "0.8.0", default-features = false, features = [ + "client", +] } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemallocator = "0.5" @@ -293,10 +285,15 @@ maplit = "1.0.2" memchr = { version = "2.7.1", default-features = false } mockall = "0.11.4" num-traits = "0.2.18" -once_cell = "1.19.0" +once_cell.workspace = true opentelemetry-stdout = { version = "0.1.0", features = ["trace"] } opentelemetry = { version = "0.20.0", features = ["testing"] } -opentelemetry-proto = { version="0.5.0", features = ["metrics", "trace", "gen-tonic-messages", "with-serde"] } +opentelemetry-proto = { version = "0.5.0", features = [ + "metrics", + "trace", + "gen-tonic-messages", + "with-serde", +] } p256 = "0.13.2" rand_core = "0.6.4" reqwest = { version = "0.11.24", default-features = false, features = [ @@ -333,12 +330,15 @@ wiremock = "0.5.22" rstack = { version = "0.3.3", features = ["dw"], default-features = false } [target.'cfg(unix)'.dev-dependencies] -hyperlocal = { version = "0.8.0", default-features = false, features = ["client", "server"] } +hyperlocal = { version = "0.8.0", default-features = false, features = [ + "client", + "server", +] } [build-dependencies] tonic-build = "0.9.2" basic-toml = "0.1" -serde_json = "1.0.114" +serde_json.workspace = true [[test]] name = "integration_tests" diff --git a/apollo-router/build/main.rs b/apollo-router/build/main.rs index 763d894df0..c5b1a4e7ad 100644 --- a/apollo-router/build/main.rs +++ b/apollo-router/build/main.rs @@ -5,12 +5,21 @@ mod studio; fn main() -> Result<(), Box> { let cargo_manifest: serde_json::Value = basic_toml::from_str( - &fs::read_to_string(PathBuf::from(&env!("CARGO_MANIFEST_DIR")).join("Cargo.toml")) - .expect("could not read Cargo.toml"), + &fs::read_to_string( + PathBuf::from(&env!("CARGO_MANIFEST_DIR")) + .parent() + .unwrap() + .join("Cargo.toml"), + ) + .expect("could not read Cargo.toml"), ) .expect("could not parse Cargo.toml"); let router_bridge = cargo_manifest + .get("workspace") + .expect("Cargo.toml does not contain workspace") + .as_object() + .expect("Cargo.toml workspace key is not an object") .get("dependencies") .expect("Cargo.toml does not contain dependencies") .as_object() diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index a051548cf2..1e24f15bbd 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -1484,11 +1484,20 @@ mod tests { #[test] fn router_bridge_dependency_is_pinned() { let cargo_manifest: serde_json::Value = basic_toml::from_str( - &fs::read_to_string(PathBuf::from(&env!("CARGO_MANIFEST_DIR")).join("Cargo.toml")) - .expect("could not read Cargo.toml"), + &fs::read_to_string( + PathBuf::from(&env!("CARGO_MANIFEST_DIR")) + .parent() + .unwrap() + .join("Cargo.toml"), + ) + .expect("could not read Cargo.toml"), ) .expect("could not parse Cargo.toml"); let router_bridge_version = cargo_manifest + .get("workspace") + .expect("Cargo.toml does not contain workspace") + .as_object() + .expect("Cargo.toml workspace key is not an object") .get("dependencies") .expect("Cargo.toml does not contain dependencies") .as_object() diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index f26f53a03b..05278801ee 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -11,25 +11,25 @@ cargo-fuzz = true [dependencies] libfuzzer-sys = "0.4" -apollo-compiler = "=1.0.0-beta.15" -apollo-smith = { version = "0.5.0", features = ["parser-impl"] } -apollo-parser = "0.7.6" +apollo-compiler.workspace = true +apollo-smith.workspace = true +apollo-parser.workspace = true env_logger = "0.10.2" log = "0.4" -reqwest = { version = "0.11", default-features = false, features = ["json", "blocking"] } -router-bridge = "=0.5.17+v2.7.2" -serde_json = "1" -tokio = { version = "1.36.0", features = ["full"] } +reqwest = { workspace = true, features = ["json", "blocking"] } +router-bridge.workspace = true +serde_json.workspace = true +tokio.workspace = true [dev-dependencies] anyhow = "1" apollo-router = { path = "../apollo-router" } -async-trait = "0.1" -schemars = { version = "0.8", features = ["url"] } -serde = "1" -serde_json_bytes = "0.2" -tower = { version = "0.4", features = ["full"] } -http = "0.2" +async-trait.workspace = true +schemars.workspace = true +serde.workspace = true +serde_json_bytes.workspace = true +tower.workspace = true +http.workspace = true [[example]] name = "usage_reporting_router" From 54d299a065bfcb82846afd453e47ec484c7e5a9d Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Fri, 12 Apr 2024 18:43:33 +0200 Subject: [PATCH 13/46] align coprocessor metric creation (#4930) There are currently 2 different ways to create metrics, with slight incompatibilities. This makes sure that the execution stage coprocessor metrics are generated in the same way as the other stages --- .changesets/fix_geal_coprocessor_metrics.md | 5 +++ apollo-router/src/plugins/coprocessor/mod.rs | 44 ++++++++++--------- .../src/plugins/coprocessor/supergraph.rs | 22 +++++----- 3 files changed, 41 insertions(+), 30 deletions(-) create mode 100644 .changesets/fix_geal_coprocessor_metrics.md diff --git a/.changesets/fix_geal_coprocessor_metrics.md b/.changesets/fix_geal_coprocessor_metrics.md new file mode 100644 index 0000000000..03248ee1ba --- /dev/null +++ b/.changesets/fix_geal_coprocessor_metrics.md @@ -0,0 +1,5 @@ +### align coprocessor metric creation ([PR #4930](https://github.com/apollographql/router/pull/4930)) + +There are currently 2 different ways to create metrics, with slight incompatibilities. This makes sure that the execution stage coprocessor metrics are generated in the same way as the other stages + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4930 \ No newline at end of file diff --git a/apollo-router/src/plugins/coprocessor/mod.rs b/apollo-router/src/plugins/coprocessor/mod.rs index a79ff4da2b..2914f20ec5 100644 --- a/apollo-router/src/plugins/coprocessor/mod.rs +++ b/apollo-router/src/plugins/coprocessor/mod.rs @@ -359,11 +359,12 @@ impl RouterStage { ); error }); - tracing::info!( - monotonic_counter.apollo.router.operations.coprocessor = 1u64, - coprocessor.stage = %PipelineStep::RouterRequest, - coprocessor.succeeded = succeeded, - "Total operations with co-processors enabled" + u64_counter!( + "apollo.router.operations.coprocessor", + "Total operations with co-processors enabled", + 1, + "coprocessor.stage" = PipelineStep::RouterRequest, + "coprocessor.succeeded" = succeeded ); result } @@ -397,11 +398,12 @@ impl RouterStage { ); error }); - tracing::info!( - monotonic_counter.apollo.router.operations.coprocessor = 1u64, - coprocessor.stage = %PipelineStep::RouterResponse, - coprocessor.succeeded = succeeded, - "Total operations with co-processors enabled" + u64_counter!( + "apollo.router.operations.coprocessor", + "Total operations with co-processors enabled", + 1, + "coprocessor.stage" = PipelineStep::RouterResponse, + "coprocessor.succeeded" = succeeded ); result } @@ -491,11 +493,12 @@ impl SubgraphStage { ); error }); - tracing::info!( - monotonic_counter.apollo.router.operations.coprocessor = 1u64, - coprocessor.stage = %PipelineStep::SubgraphRequest, - coprocessor.succeeded = succeeded, - "Total operations with co-processors enabled" + u64_counter!( + "apollo.router.operations.coprocessor", + "Total operations with co-processors enabled", + 1, + "coprocessor.stage" = PipelineStep::SubgraphRequest, + "coprocessor.succeeded" = succeeded ); result } @@ -530,11 +533,12 @@ impl SubgraphStage { ); error }); - tracing::info!( - monotonic_counter.apollo.router.operations.coprocessor = 1u64, - coprocessor.stage = %PipelineStep::SubgraphResponse, - coprocessor.succeeded = succeeded, - "Total operations with co-processors enabled" + u64_counter!( + "apollo.router.operations.coprocessor", + "Total operations with co-processors enabled", + 1, + "coprocessor.stage" = PipelineStep::SubgraphResponse, + "coprocessor.succeeded" = succeeded ); result } diff --git a/apollo-router/src/plugins/coprocessor/supergraph.rs b/apollo-router/src/plugins/coprocessor/supergraph.rs index 7e6e313f42..79202a0eb0 100644 --- a/apollo-router/src/plugins/coprocessor/supergraph.rs +++ b/apollo-router/src/plugins/coprocessor/supergraph.rs @@ -105,11 +105,12 @@ impl SupergraphStage { ); error }); - tracing::info!( - monotonic_counter.apollo.router.operations.coprocessor = 1u64, - coprocessor.stage = %PipelineStep::SupergraphRequest, - coprocessor.succeeded = succeeded, - "Total operations with co-processors enabled" + u64_counter!( + "apollo.router.operations.coprocessor", + "Total operations with co-processors enabled", + 1, + "coprocessor.stage" = PipelineStep::SupergraphRequest, + "coprocessor.succeeded" = succeeded ); result } @@ -144,11 +145,12 @@ impl SupergraphStage { ); error }); - tracing::info!( - monotonic_counter.apollo.router.operations.coprocessor = 1u64, - coprocessor.stage = %PipelineStep::SupergraphResponse, - coprocessor.succeeded = succeeded, - "Total operations with co-processors enabled" + u64_counter!( + "apollo.router.operations.coprocessor", + "Total operations with co-processors enabled", + 1, + "coprocessor.stage" = PipelineStep::SupergraphResponse, + "coprocessor.succeeded" = succeeded ); result } From 303cee1f2df30c1ce3f0246d7f2de55aa097e1c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Mon, 15 Apr 2024 12:11:46 +0200 Subject: [PATCH 14/46] Rust validation follow-up: backwards compatibility fixes (#4949) Error messages are now mostly the same as the graphql-js messages from before (where possible). GraphQL syntax errors are reported as `PARSING_ERROR` again instead of as `GRAPHQL_VALIDATION_ERROR`. `SpecError::ParsingError` is replaced by `SpecError::ParseError` which contains structured error information similar to `SpecError::ValidationError`. The other uses of `ParsingError` are replaced by `TransformError`, as most of them are in query transforms/filtering. The error messages for `TransformError` remain the same. There will still be differences in error messages, especially when querying undefined fields etc, because previously those would hit an error case deep inside the router whereas now they will be rejected early on. This is an easily explainable difference with obvious benefits for users, so I think it's acceptable. --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. --- .../feat_geal_remove_legacy_validation.md | 4 +- Cargo.lock | 8 +- Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- apollo-router/src/error.rs | 4 +- .../src/plugins/authorization/mod.rs | 6 +- .../src/query_planner/bridge_query_planner.rs | 2 +- .../query_planner/caching_query_planner.rs | 5 +- apollo-router/src/router/mod.rs | 6 +- apollo-router/src/spec/mod.rs | 35 ++++- apollo-router/src/spec/query.rs | 4 +- apollo-router/src/spec/query/subselections.rs | 4 +- apollo-router/tests/integration/validation.rs | 122 ++++++++++++++++++ apollo-router/tests/integration_tests.rs | 2 +- ...ests__defer_path_with_disabled_config.snap | 2 +- ...on_tests__validation_errors_from_rust.snap | 4 +- examples/supergraph-sdl/rust/Cargo.toml | 2 +- 17 files changed, 184 insertions(+), 30 deletions(-) diff --git a/.changesets/feat_geal_remove_legacy_validation.md b/.changesets/feat_geal_remove_legacy_validation.md index ff52e7ee0a..7ef2bcbd24 100644 --- a/.changesets/feat_geal_remove_legacy_validation.md +++ b/.changesets/feat_geal_remove_legacy_validation.md @@ -2,6 +2,8 @@ GraphQL query validation was initially performed by the query planner in JavaScript, which caused some performance issues. Here, we are introducing a new Rust-based validation process using `apollo-compiler` from the `apollo-rs` project. This validation is also happening much earlier in the process, inside the "router service" instead of the query planner, which will reduce the load on the query planner and give back some room in the query planner cache. +Because validation now happens early, some error paths deeper inside the router will no longer be hit, causing observable differences in error messages. The new messages should be clearer and more useful. + This new validation process has been running in production for months concurrently with the JavaScript version, allowing us to detect and fix any discrepancies in the new implementation. We now have enough confidence in the new Rust-based validation to entirely switch off the less performant, JavaScript validation. -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4551 \ No newline at end of file +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4551 diff --git a/Cargo.lock b/Cargo.lock index f4db1f55fa..418d74cb73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -192,9 +192,9 @@ checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "apollo-compiler" -version = "1.0.0-beta.15" +version = "1.0.0-beta.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79df4ab329753d36476653850519fe92d1b34854dd4337f6abf42a64963ac0ce" +checksum = "175659cea0232b38bfacd1505aed00221cc4028d848699ce9e3422c6bf87d90a" dependencies = [ "apollo-parser", "ariadne", @@ -220,9 +220,9 @@ dependencies = [ [[package]] name = "apollo-federation" -version = "0.0.10" +version = "0.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94e3b0774618a4febe307d2ace6714583e13cd7948cdadb2b4a937ccdc166333" +checksum = "e9fc457f3e836a60ea3d4e1a25a8b42c5c62ddf13a2131c194d94f752c7a1475" dependencies = [ "apollo-compiler", "derive_more", diff --git a/Cargo.toml b/Cargo.toml index 7a87db0ece..dfebd2c8b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,7 +45,7 @@ debug = 1 # Dependencies used in more than one place are specified here in order to keep versions in sync: # https://doc.rust-lang.org/cargo/reference/workspaces.html#the-dependencies-table [workspace.dependencies] -apollo-compiler = "=1.0.0-beta.15" +apollo-compiler = "=1.0.0-beta.16" apollo-parser = "0.7.6" apollo-smith = { version = "0.5.0", features = ["parser-impl"] } async-trait = "0.1.77" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index bac9991d7a..651ba3d4d5 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -65,7 +65,7 @@ askama = "0.12.1" access-json = "0.1.0" anyhow = "1.0.80" apollo-compiler.workspace = true -apollo-federation = "=0.0.10" +apollo-federation = "=0.0.11" arc-swap = "1.6.0" async-channel = "1.9.0" async-compression = { version = "0.4.6", features = [ diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index fc95798a0d..c102059d8c 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -539,7 +539,7 @@ pub(crate) enum SchemaError { /// Collection of schema validation errors. #[derive(Debug)] pub(crate) struct ParseErrors { - pub(crate) errors: apollo_compiler::validation::DiagnosticList, + pub(crate) errors: DiagnosticList, } impl std::fmt::Display for ParseErrors { @@ -619,7 +619,7 @@ impl IntoGraphQLErrors for ValidationErrors { impl From for ValidationErrors { fn from(errors: DiagnosticList) -> Self { Self { - errors: errors.iter().map(|e| e.to_json()).collect(), + errors: errors.iter().map(|e| e.unstable_to_json_compat()).collect(), } } } diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs index 67c235936b..355ffd4b5c 100644 --- a/apollo-router/src/plugins/authorization/mod.rs +++ b/apollo-router/src/plugins/authorization/mod.rs @@ -437,7 +437,7 @@ impl AuthorizationPlugin { AuthenticatedVisitor::new(&schema.definitions, doc, &schema.implementers_map, dry_run) { let modified_query = transform::document(&mut visitor, doc) - .map_err(|e| SpecError::ParsingError(e.to_string()))?; + .map_err(|e| SpecError::TransformError(e.to_string()))?; if visitor.query_requires_authentication { if is_authenticated { @@ -476,7 +476,7 @@ impl AuthorizationPlugin { dry_run, ) { let modified_query = transform::document(&mut visitor, doc) - .map_err(|e| SpecError::ParsingError(e.to_string()))?; + .map_err(|e| SpecError::TransformError(e.to_string()))?; if visitor.query_requires_scopes { tracing::debug!("the query required scopes, the requests present scopes: {scopes:?}, modified query:\n{modified_query}\nunauthorized paths: {:?}", visitor @@ -511,7 +511,7 @@ impl AuthorizationPlugin { dry_run, ) { let modified_query = transform::document(&mut visitor, doc) - .map_err(|e| SpecError::ParsingError(e.to_string()))?; + .map_err(|e| SpecError::TransformError(e.to_string()))?; if visitor.query_requires_policies { tracing::debug!("the query required policies, the requests present policies: {policies:?}, modified query:\n{modified_query}\nunauthorized paths: {:?}", diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index 1e24f15bbd..bdde60d459 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -592,7 +592,7 @@ impl Service for BridgeQueryPlanner { let schema = &this.schema.api_schema().definitions; match add_defer_labels(schema, &doc.ast) { Err(e) => { - return Err(QueryPlannerError::SpecError(SpecError::ParsingError( + return Err(QueryPlannerError::SpecError(SpecError::TransformError( e.to_string(), ))) } diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index 3e86b6087e..96689c131a 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -367,10 +367,11 @@ where let doc = match request.context.extensions().lock().get::() { None => { return Err(CacheResolverError::RetrievalError(Arc::new( - QueryPlannerError::SpecError(SpecError::ParsingError( + // TODO: dedicated error variant? + QueryPlannerError::SpecError(SpecError::TransformError( "missing parsed document".to_string(), )), - ))) + ))); } Some(d) => d.clone(), }; diff --git a/apollo-router/src/router/mod.rs b/apollo-router/src/router/mod.rs index 86fbc922c3..bd5461a046 100644 --- a/apollo-router/src/router/mod.rs +++ b/apollo-router/src/router/mod.rs @@ -492,7 +492,7 @@ mod tests { let response = router_handle.request(request).await.unwrap(); assert_eq!( - "type `User` does not have a field `name`", response.errors[0].message, + r#"Cannot query field "name" on type "User"."#, response.errors[0].message, "{response:?}" ); assert_eq!( @@ -554,8 +554,8 @@ mod tests { let response = router_handle.request(request).await.unwrap(); assert_eq!( - "type `User` does not have a field `name`", - response.errors[0].message + r#"Cannot query field "name" on type "User"."#, + response.errors[0].message, ); assert_eq!( "GRAPHQL_VALIDATION_FAILED", diff --git a/apollo-router/src/spec/mod.rs b/apollo-router/src/spec/mod.rs index 1c1a8667e8..06229783ff 100644 --- a/apollo-router/src/spec/mod.rs +++ b/apollo-router/src/spec/mod.rs @@ -41,8 +41,13 @@ pub(crate) enum SpecError { InvalidType(String), /// cannot query field '{0}' on type '{1}' InvalidField(String, String), + // This branch used to be used for parse errors, and is now used primarily for errors + // during the query filtering / transform stage. It's also used for a handful of other + // random string errors. /// parsing error: {0} - ParsingError(String), + TransformError(String), + /// parsing error: {0} + ParseError(ValidationErrors), /// validation error: {0} ValidationError(ValidationErrors), /// Unknown operation named "{0}" @@ -58,7 +63,7 @@ pub(crate) const GRAPHQL_VALIDATION_FAILURE_ERROR_KEY: &str = "## GraphQLValidat impl SpecError { pub(crate) const fn get_error_key(&self) -> &'static str { match self { - SpecError::ParsingError(_) => "## GraphQLParseFailure\n", + SpecError::TransformError(_) | SpecError::ParseError(_) => "## GraphQLParseFailure\n", SpecError::UnknownOperation(_) => "## GraphQLUnknownOperationName\n", _ => GRAPHQL_VALIDATION_FAILURE_ERROR_KEY, } @@ -75,7 +80,8 @@ impl ErrorExtension for SpecError { SpecError::RecursionLimitExceeded => "RECURSION_LIMIT_EXCEEDED", SpecError::InvalidType(_) => "INVALID_TYPE", SpecError::InvalidField(_, _) => "INVALID_FIELD", - SpecError::ParsingError(_) => "PARSING_ERROR", + SpecError::TransformError(_) => "PARSING_ERROR", + SpecError::ParseError(_) => "PARSING_ERROR", SpecError::ValidationError(_) => "GRAPHQL_VALIDATION_FAILED", SpecError::UnknownOperation(_) => "GRAPHQL_VALIDATION_FAILED", SpecError::SubscriptionNotSupported => "SUBSCRIPTION_NOT_SUPPORTED", @@ -104,6 +110,29 @@ impl ErrorExtension for SpecError { impl IntoGraphQLErrors for SpecError { fn into_graphql_errors(self) -> Result, Self> { match self { + SpecError::ParseError(e) => { + // Not using `ValidationErrors::into_graphql_errors` here, + // because it sets the extension code to GRAPHQL_VALIDATION_FAILED + Ok(e.errors + .into_iter() + .map(|error| { + crate::graphql::Error::builder() + .message(format!("parsing error: {}", error.message)) + .locations( + error + .locations + .into_iter() + .map(|loc| crate::graphql::Location { + line: loc.line as u32, + column: loc.column as u32, + }) + .collect(), + ) + .extension_code("PARSING_ERROR") + .build() + }) + .collect()) + } SpecError::ValidationError(e) => { e.into_graphql_errors().map_err(SpecError::ValidationError) } diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 4bee87adbd..89a80aa77d 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -277,7 +277,7 @@ impl Query { let ast = match parser.parse_ast(query, "query.graphql") { Ok(ast) => ast, Err(errors) => { - return Err(SpecError::ValidationError(errors.into())); + return Err(SpecError::ParseError(errors.into())); } }; let schema = &schema.api_schema().definitions; @@ -345,7 +345,7 @@ impl Query { let mut visitor = QueryHashVisitor::new(&schema.definitions, document); traverse::document(&mut visitor, document, operation_name).map_err(|e| { - SpecError::ParsingError(format!("could not calculate the query hash: {e}")) + SpecError::QueryHashing(format!("could not calculate the query hash: {e}")) })?; let hash = visitor.finish(); diff --git a/apollo-router/src/spec/query/subselections.rs b/apollo-router/src/spec/query/subselections.rs index 3bf8d64833..d14146010e 100644 --- a/apollo-router/src/spec/query/subselections.rs +++ b/apollo-router/src/spec/query/subselections.rs @@ -109,7 +109,7 @@ pub(crate) fn collect_subselections( } if defer_stats.conditional_defer_variable_names.len() > MAX_DEFER_VARIABLES { // TODO:ย dedicated error variant? - return Err(SpecError::ParsingError( + return Err(SpecError::TransformError( "@defer conditional on too many different variables".into(), )); } @@ -131,7 +131,7 @@ pub(crate) fn collect_subselections( &FieldType::new_named((&type_name).try_into().unwrap()), &operation.selection_set, ) - .map_err(|err| SpecError::ParsingError(err.to_owned()))?; + .map_err(|err| SpecError::TransformError(err.to_owned()))?; debug_assert!(shared.path.is_empty()); if !primary.is_empty() { shared.subselections.insert( diff --git a/apollo-router/tests/integration/validation.rs b/apollo-router/tests/integration/validation.rs index 82a3270bc1..962df9c49b 100644 --- a/apollo-router/tests/integration/validation.rs +++ b/apollo-router/tests/integration/validation.rs @@ -45,3 +45,125 @@ async fn test_request_extensions_is_null() { r#"{"data":{"__typename":"Query"}}"# ); } + +#[tokio::test] +async fn test_syntax_error() { + let request = serde_json::json!({"query": "{__typename"}); + let request = apollo_router::services::router::Request::fake_builder() + .body(request.to_string()) + .method(hyper::Method::POST) + .header("content-type", "application/json") + .build() + .unwrap(); + let response = apollo_router::TestHarness::builder() + .schema(include_str!("../fixtures/supergraph.graphql")) + .build_router() + .await + .unwrap() + .oneshot(request) + .await + .unwrap() + .next_response() + .await + .unwrap() + .unwrap(); + + let v: serde_json::Value = serde_json::from_slice(&response).unwrap(); + insta::assert_json_snapshot!(v, @r###" + { + "errors": [ + { + "message": "parsing error: syntax error: expected R_CURLY, got EOF", + "locations": [ + { + "line": 1, + "column": 12 + } + ], + "extensions": { + "code": "PARSING_ERROR" + } + } + ] + } + "###); +} + +#[tokio::test] +async fn test_validation_error() { + let request = serde_json::json!({"query": "{...a} fragment unused on Query { me { id } } fragment a on Query{me {id} topProducts(first: 5.5) {id}}"}); + let request = apollo_router::services::router::Request::fake_builder() + .body(request.to_string()) + .method(hyper::Method::POST) + .header("content-type", "application/json") + .build() + .unwrap(); + let response = apollo_router::TestHarness::builder() + .schema(include_str!("../fixtures/supergraph.graphql")) + .build_router() + .await + .unwrap() + .oneshot(request) + .await + .unwrap() + .next_response() + .await + .unwrap() + .unwrap(); + + let v: serde_json::Value = serde_json::from_slice(&response).unwrap(); + insta::assert_json_snapshot!(v, @r###" + { + "errors": [ + { + "message": "Fragment \"unused\" is never used.", + "locations": [ + { + "line": 1, + "column": 8 + } + ], + "extensions": { + "code": "GRAPHQL_VALIDATION_FAILED" + } + }, + { + "message": "Field \"topProducts\" of type \"Product\" must have a selection of subfields. Did you mean \"topProducts { ... }\"?", + "locations": [ + { + "line": 1, + "column": 75 + } + ], + "extensions": { + "code": "GRAPHQL_VALIDATION_FAILED" + } + }, + { + "message": "Int cannot represent value: 5.5", + "locations": [ + { + "line": 1, + "column": 94 + } + ], + "extensions": { + "code": "GRAPHQL_VALIDATION_FAILED" + } + }, + { + "message": "Cannot query field \"id\" on type \"Product\".", + "locations": [ + { + "line": 1, + "column": 100 + } + ], + "extensions": { + "code": "GRAPHQL_VALIDATION_FAILED" + } + } + ] + } + "###); +} diff --git a/apollo-router/tests/integration_tests.rs b/apollo-router/tests/integration_tests.rs index 86746338dc..cb2a45496d 100644 --- a/apollo-router/tests/integration_tests.rs +++ b/apollo-router/tests/integration_tests.rs @@ -115,7 +115,7 @@ async fn api_schema_hides_field() { let message = &actual.errors[0].message; assert!( - message.contains("type `Product` does not have a field `inStock`"), + message.contains(r#"Cannot query field "inStock" on type "Product"."#), "{message}" ); assert_eq!( diff --git a/apollo-router/tests/snapshots/integration_tests__defer_path_with_disabled_config.snap b/apollo-router/tests/snapshots/integration_tests__defer_path_with_disabled_config.snap index c1a9d7d8c8..0b318b15ce 100644 --- a/apollo-router/tests/snapshots/integration_tests__defer_path_with_disabled_config.snap +++ b/apollo-router/tests/snapshots/integration_tests__defer_path_with_disabled_config.snap @@ -5,7 +5,7 @@ expression: stream.next().await.unwrap().unwrap() { "errors": [ { - "message": "cannot find directive `@defer` in this document", + "message": "Unknown directive \"@defer\".", "locations": [ { "line": 4, diff --git a/apollo-router/tests/snapshots/integration_tests__validation_errors_from_rust.snap b/apollo-router/tests/snapshots/integration_tests__validation_errors_from_rust.snap index 04d045688e..dec7cacb76 100644 --- a/apollo-router/tests/snapshots/integration_tests__validation_errors_from_rust.snap +++ b/apollo-router/tests/snapshots/integration_tests__validation_errors_from_rust.snap @@ -4,7 +4,7 @@ expression: response.errors --- [ { - "message": "the argument `notAnArg` is not supported by `Product.name`", + "message": "Unknown argument \"notAnArg\" on field \"Product.name\".", "locations": [ { "line": 1, @@ -16,7 +16,7 @@ expression: response.errors } }, { - "message": "fragment `Unused` must be used in an operation", + "message": "Fragment \"Unused\" is never used.", "locations": [ { "line": 1, diff --git a/examples/supergraph-sdl/rust/Cargo.toml b/examples/supergraph-sdl/rust/Cargo.toml index 627d062fc9..09cd627dad 100644 --- a/examples/supergraph-sdl/rust/Cargo.toml +++ b/examples/supergraph-sdl/rust/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] anyhow = "1" -apollo-compiler = "=1.0.0-beta.15" +apollo-compiler = "1.0.0-beta.16" apollo-router = { path = "../../../apollo-router" } async-trait = "0.1" tower = { version = "0.4", features = ["full"] } From 7a65b3caaf31529b3e2c213aae7f8958238549d3 Mon Sep 17 00:00:00 2001 From: Ivan Goncharov Date: Mon, 15 Apr 2024 17:36:46 +0300 Subject: [PATCH 15/46] file-uploads: Add tests and improve errors for rearrange_query_plan (#4920) 1. Replace unordered HashSet/HashMap with IndexSet/IndexMap. It makes predictable order of variable names inside error messages 2. Add unit tests 3. For Parallel node simplify result query plan by doing special handling of cases: a. if only one subnode is using files don't change anything b. if all subnodes are using files replace `Parallel` with `Sequence` --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [x] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. --- .../file_uploads/rearrange_query_plan.rs | 716 ++++++++++++++++-- 1 file changed, 655 insertions(+), 61 deletions(-) diff --git a/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs b/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs index 5b17d7efb0..dc335c4596 100644 --- a/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs +++ b/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs @@ -1,8 +1,9 @@ use std::cmp; use std::collections::BTreeMap; use std::collections::HashMap; -use std::collections::HashSet; +use indexmap::IndexMap; +use indexmap::IndexSet; use itertools::Itertools; use super::error::FileUploadError; @@ -36,7 +37,7 @@ pub(super) fn rearrange_query_plan( ); } - let root = rearrange_plan_node(root, &mut HashMap::new(), &variable_ranges)?; + let root = rearrange_plan_node(root, &mut IndexMap::new(), &variable_ranges)?; Ok(QueryPlan { root, usage_reporting: query_plan.usage_reporting.clone(), @@ -48,7 +49,7 @@ pub(super) fn rearrange_query_plan( // Recursive, and recursion is safe here since query plan is also executed recursively. fn rearrange_plan_node<'a>( node: &PlanNode, - acc_variables: &mut HashMap<&'a str, &'a (Option, Option)>, + acc_variables: &mut IndexMap<&'a str, &'a (Option, Option)>, variable_ranges: &'a HashMap<&str, (Option, Option)>, ) -> UploadResult { Ok(match node { @@ -94,7 +95,7 @@ fn rearrange_plan_node<'a>( // Error if 'rest' contains file variables if let Some(rest) = rest { - let mut rest_variables = HashMap::new(); + let mut rest_variables = IndexMap::new(); // ignore result use it just to collect variables drop(rearrange_plan_node( rest, @@ -127,7 +128,7 @@ fn rearrange_plan_node<'a>( .transpose(); // Error if 'deferred' contains file variables - let mut deferred_variables = HashMap::new(); + let mut deferred_variables = IndexMap::new(); for DeferredNode { node, .. } in deferred.iter() { if let Some(node) = node { // ignore result use it just to collect variables @@ -164,9 +165,9 @@ fn rearrange_plan_node<'a>( let mut sequence_last = None; let mut has_overlap = false; - let mut duplicate_variables = HashSet::new(); + let mut duplicate_variables = IndexSet::new(); for node in nodes.iter() { - let mut node_variables = HashMap::new(); + let mut node_variables = IndexMap::new(); let node = rearrange_plan_node(node, &mut node_variables, variable_ranges)?; sequence.push(node); @@ -203,10 +204,10 @@ fn rearrange_plan_node<'a>( // Note: we don't wrap or change order of nodes that don't use "file variables". let mut parallel = Vec::new(); let mut sequence = BTreeMap::new(); - let mut duplicate_variables = HashSet::new(); + let mut duplicate_variables = IndexSet::new(); for node in nodes.iter() { - let mut node_variables = HashMap::new(); + let mut node_variables = IndexMap::new(); let node = rearrange_plan_node(node, &mut node_variables, variable_ranges)?; if node_variables.is_empty() { parallel.push(node); @@ -242,7 +243,11 @@ fn rearrange_plan_node<'a>( )); } - if !sequence.is_empty() { + if sequence.len() <= 1 { + // if there are no node competing for files, keep nodes nodes in Parallel + parallel.extend(sequence.into_values().map(|(node, _)| node)); + PlanNode::Parallel { nodes: parallel } + } else { let mut nodes = Vec::new(); let mut sequence_last_file = None; for (first_file, (node, last_file)) in sequence.into_iter() { @@ -253,62 +258,651 @@ fn rearrange_plan_node<'a>( nodes.push(node); } - parallel.push(PlanNode::Sequence { nodes }); + if parallel.is_empty() { + // if all nodes competing for files replace Parallel with Sequence + PlanNode::Sequence { nodes } + } else { + // if some of the nodes competing for files wrap them with Sequence within Parallel + parallel.push(PlanNode::Sequence { nodes }); + PlanNode::Parallel { nodes: parallel } + } } - - PlanNode::Parallel { nodes: parallel } } }) } -#[test] -fn test_rearrange_impossible_plan() { - let root = serde_json::from_str(r#"{ - "kind": "Sequence", - "nodes": [ - { - "kind": "Fetch", - "serviceName": "uploads1", - "variableUsages": [ - "file1" - ], - "operation": "mutation SomeMutation__uploads1__0($file1:Upload1){file1:singleUpload1(file:$file1){filename}}", - "operationName": "SomeMutation__uploads1__0", - "operationKind": "mutation", - "id": null, - "inputRewrites": null, - "outputRewrites": null, - "schemaAwareHash": "0239133f4bf1e52ed2d84a06563d98d61a197ec417490a38b37aaeecd98b315c", - "authorization": { - "is_authenticated": false, - "scopes": [], - "policies": [] - } - }, - { - "kind": "Fetch", - "serviceName": "uploads2", - "variableUsages": [ - "file0" - ], - "operation": "mutation SomeMutation__uploads2__1($file0:Upload2){file0:singleUpload2(file:$file0){filename}}", - "operationName": "SomeMutation__uploads2__1", - "operationKind": "mutation", - "id": null, - "inputRewrites": null, - "outputRewrites": null, - "schemaAwareHash": "41fda639a3b69227226d234fed29d63124e0a95ac9ff98c611e903d4b2adcd8c", - "authorization": { - "is_authenticated": false, - "scopes": [], - "policies": [] - } +#[cfg(test)] +mod tests { + use indexmap::indexmap; + use serde_json::json; + + use super::*; + use crate::query_planner::subscription::SubscriptionNode; + use crate::query_planner::Primary; + use crate::services::execution::QueryPlan; + + // Custom `assert_matches` due to its current nightly-only status, see + // https://github.com/rust-lang/rust/issues/82775 + macro_rules! assert_matches { + ($actual:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(,)?) => { + let result = $actual; + assert!( + matches!(result, $( $pattern )|+ $( if $guard )?), + "got {:?} but expected {:?}", + result, + "", // stringify!($pattern) + ); + }; + } + + fn fake_query_plan(root_json: serde_json::Value) -> QueryPlan { + QueryPlan::fake_new(Some(serde_json::from_value(root_json).unwrap()), None) + } + + fn to_root_json(query_plan: QueryPlan) -> serde_json::Value { + serde_json::to_value(query_plan.root).unwrap() + } + + fn normalize_json( + json: serde_json::Value, + ) -> serde_json::Value { + serde_json::to_value(serde_json::from_value::(json).unwrap()).unwrap() + } + + fn fake_fetch(service_name: &str, variables: Vec<&str>) -> serde_json::Value { + normalize_json::(json!({ + "kind": "Fetch", + "serviceName": service_name.to_owned(), + "variableUsages": variables.to_owned(), + "operation": "", + "operationKind": "query" + })) + } + + fn fake_subscription(service_name: &str, variables: Vec<&str>) -> serde_json::Value { + normalize_json::(json!({ + "serviceName": service_name.to_owned(), + "variableUsages": variables.to_owned(), + "operation": "", + "operationKind": "subscription" + })) + } + + fn fake_primary(node: serde_json::Value) -> serde_json::Value { + normalize_json::(json!({ "node": node })) + } + + fn fake_deferred(node: serde_json::Value) -> serde_json::Value { + normalize_json::(json!({ + "depends": [], + "queryPath": [], + "node": node, + })) + } + + #[test] + fn test_valid_conditional_node() { + let root_json = json!({ + "kind": "Condition", + "condition": "", + "ifClause": fake_fetch("uploads1", vec!["file"]), + "elseClause": fake_fetch("uploads2", vec!["file"]), + }); + let query_plan = fake_query_plan(root_json.clone()); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_eq!(to_root_json(result.unwrap()), root_json); + } + + #[test] + fn test_inner_error_within_conditional_node() { + let query_plan = fake_query_plan(json!({ + "kind": "Condition", + "condition": "", + "ifClause": { + "kind": "Sequence", + "nodes": [ + fake_fetch("uploads1", vec!["file2"]), + fake_fetch("uploads2", vec!["file1"]) + ] } - ] - }"#).unwrap(); + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!(result, Err(FileUploadError::MisorderedVariables)); + } + + #[test] + fn test_conditional_node_overlapping_with_external_node() { + let query_plan = fake_query_plan(json!({ + "kind": "Sequence", + "nodes": [ + { + "kind": "Condition", + "condition": "", + "ifClause": fake_fetch("uploads1", vec!["file"]), + "elseClause": fake_fetch("uploads2", vec!["file"]), + }, + fake_fetch("uploads3", vec!["file"]), + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!( + result, + Err(FileUploadError::DuplicateVariableUsages(ref variables)) if variables == "$file", + ); + } + + #[test] + fn test_valid_subscription_node() { + let root_json = json!({ + "kind": "Subscription", + "primary": fake_subscription("uploads", vec!["file"]), + "rest": fake_fetch("subgraph", vec!["not_a_file"]), + }); + let query_plan = fake_query_plan(root_json.clone()); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_eq!(to_root_json(result.unwrap()), root_json); + } + + #[test] + fn test_valid_file_inside_of_subscription_rest() { + let query_plan = fake_query_plan(json!({ + "kind": "Subscription", + "primary": fake_subscription("uploads1", vec!["file2"]), + "rest": { + "kind": "Sequence", + "nodes": [ + // Note: order is invalid on purpose since we are testing that user get + // error about variables inside subscription instead of internal error. + fake_fetch("uploads1", vec!["file2"]), + fake_fetch("uploads2", vec!["file1"]) + ] + } + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!( + result, + Err(FileUploadError::VariablesForbiddenInsideSubscription(ref variables)) if variables == "$file2, $file1", + ); + } + + #[test] + fn test_valid_defer_node() { + let root_json = json!({ + "kind": "Defer", + "primary": fake_primary(fake_fetch("uploads", vec!["file"])), + "deferred": [fake_deferred(fake_fetch("subgraph", vec!["not_a_file"]))], + }); + let query_plan = fake_query_plan(root_json.clone()); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_eq!(to_root_json(result.unwrap()), root_json); + } + + #[test] + fn test_file_inside_of_deffered() { + let query_plan = fake_query_plan(json!({ + "kind": "Defer", + "primary": fake_primary(fake_fetch("uploads", vec!["file"])), + "deferred": [ + fake_deferred(json!({ + "kind": "Sequence", + "nodes": [ + // Note: order is invalid on purpose since we are testing that user get + // error about variables inside deffered instead of internal error. + fake_fetch("uploads1", vec!["file2"]), + fake_fetch("uploads2", vec!["file1"]) + ] + })) + ], + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!( + result, + Err(FileUploadError::VariablesForbiddenInsideDefer(ref variables)) if variables == "$file2, $file1", + ); + } + + #[test] + fn test_inner_error_within_defer_node() { + let query_plan = fake_query_plan(json!({ + "kind": "Defer", + "primary": fake_primary(json!({ + "kind": "Sequence", + "nodes": [ + fake_fetch("uploads1", vec!["file2"]), + fake_fetch("uploads2", vec!["file1"]) + ] + })), + "deferred": [] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!(result, Err(FileUploadError::MisorderedVariables)); + } + + #[test] + fn test_defer_node_overlapping_with_external_node() { + let query_plan = fake_query_plan(json!({ + "kind": "Sequence", + "nodes": [ + { + "kind": "Defer", + "primary": fake_primary(json!(fake_fetch("uploads1", vec!["file"]))), + "deferred": [] + }, + fake_fetch("uploads2", vec!["file"]), + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!( + result, + Err(FileUploadError::DuplicateVariableUsages(ref variables)) if variables == "$file", + ); + } + + #[test] + fn test_valid_flatten_node() { + let root_json = json!({ + "kind": "Flatten", + "path": [], + "node": fake_fetch("uploads", vec!["file"]), + }); + let query_plan = fake_query_plan(root_json.clone()); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_eq!(to_root_json(result.unwrap()), root_json); + } + + #[test] + fn test_inner_error_within_flatten_node() { + let query_plan = fake_query_plan(json!({ + "kind": "Flatten", + "path": [], + "node": { + "kind": "Sequence", + "nodes": [ + fake_fetch("uploads1", vec!["file2"]), + fake_fetch("uploads2", vec!["file1"]) + ] + }, + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!(result, Err(FileUploadError::MisorderedVariables)); + } + + #[test] + fn test_flatten_node_overlapping_with_external_node() { + let query_plan = fake_query_plan(json!({ + "kind": "Sequence", + "nodes": [ + { + "kind": "Flatten", + "path": [], + "node": fake_fetch("uploads1", vec!["file"]), + }, + fake_fetch("uploads2", vec!["file"]), + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!( + result, + Err(FileUploadError::DuplicateVariableUsages(ref variables)) if variables == "$file", + ); + } + + #[test] + fn test_valid_sequence() { + let root_json = json!({ + "kind": "Sequence", + "nodes": [ + fake_fetch("uploads1", vec!["file1"]), + fake_fetch("uploads2", vec!["file2"]) + ] + }); + let query_plan = fake_query_plan(root_json.clone()); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_eq!(to_root_json(result.unwrap()), root_json); + } + + #[test] + fn test_missordered_sequence() { + let query_plan = fake_query_plan(json!({ + "kind": "Sequence", + "nodes": [ + fake_fetch("uploads1", vec!["file2"]), + fake_fetch("uploads2", vec!["file1"]) + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + }) + .unwrap(); - let variable_ranges = - HashMap::from([("file1", (Some(1), Some(1))), ("file0", (Some(0), Some(0)))]); - let root = rearrange_plan_node(&root, &mut HashMap::new(), &variable_ranges).unwrap_err(); - assert_eq!("References to variables containing files are ordered in the way that prevent streaming of files.".to_string(), root.to_string()); + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!(result, Err(FileUploadError::MisorderedVariables)); + } + + #[test] + fn test_sequence_with_overlapping_variables() { + let query_plan = fake_query_plan(json!({ + "kind": "Sequence", + "nodes": [ + fake_fetch("uploads1", vec!["files1"]), + fake_fetch("uploads2", vec!["files2"]) + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.files1.0".to_owned()], + "1".to_owned() => vec!["variables.files2.0".to_owned()], + "2".to_owned() => vec!["variables.files1.1".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!(result, Err(FileUploadError::MisorderedVariables)); + } + + #[test] + fn test_sequence_with_duplicated_variables() { + let query_plan = fake_query_plan(json!({ + "kind": "Sequence", + "nodes": [ + fake_fetch("uploads1", vec!["file1"]), + fake_fetch("uploads2", vec!["file2", "file3"]), + fake_fetch("uploads3", vec!["file1"]), + fake_fetch("uploads4", vec!["file2", "file4"]) + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + "2".to_owned() => vec!["variables.file3".to_owned()], + "3".to_owned() => vec!["variables.file4".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!( + result, + Err(FileUploadError::DuplicateVariableUsages(ref variables)) if variables == "$file1, $file2", + ); + } + + #[test] + fn test_keep_nodes_in_parallel() { + let query_plan = fake_query_plan(json!({ + "kind": "Parallel", + "nodes": [ + fake_fetch("subgraph1", vec!["not_a_file"]), + fake_fetch("subgraph2", vec!["not_a_file"]), + fake_fetch("uploads1", vec!["file1"]), + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_eq!( + to_root_json(result.unwrap()), + json!({ + "kind": "Parallel", + "nodes": [ + fake_fetch("subgraph1", vec!["not_a_file"]), + fake_fetch("subgraph2", vec!["not_a_file"]), + fake_fetch("uploads1", vec!["file1"]), + ] + }) + ); + } + + #[test] + fn test_convert_parallel_to_sequence() { + let query_plan = fake_query_plan(json!({ + "kind": "Parallel", + "nodes": [ + fake_fetch("uploads1", vec!["file1"]), + fake_fetch("uploads2", vec!["file2"]) + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_eq!( + to_root_json(result.unwrap()), + json!({ + "kind": "Sequence", + "nodes": [ + fake_fetch("uploads1", vec!["file1"]), + fake_fetch("uploads2", vec!["file2"]) + ] + }) + ); + } + + #[test] + fn test_embedded_sequence_into_parallel() { + let query_plan = fake_query_plan(json!({ + "kind": "Parallel", + "nodes": [ + fake_fetch("uploads1", vec!["file1"]), + fake_fetch("subgraph1", vec!["not_a_file"]), + fake_fetch("uploads2", vec!["file2"]) + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_eq!( + to_root_json(result.unwrap()), + json!({ + "kind": "Parallel", + "nodes": [ + fake_fetch("subgraph1", vec!["not_a_file"]), + { + "kind": "Sequence", + "nodes": [ + fake_fetch("uploads1", vec!["file1"]), + fake_fetch("uploads2", vec!["file2"]) + ] + } + ] + }) + ); + } + + #[test] + fn test_fix_order_in_parallel() { + let query_plan = fake_query_plan(json!({ + "kind": "Parallel", + "nodes": [ + fake_fetch("uploads1", vec!["file1"]), + fake_fetch("uploads2", vec!["file0"]) + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file0".to_owned()], + "1".to_owned() => vec!["variables.file1".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_eq!( + to_root_json(result.unwrap()), + json!({ + "kind": "Sequence", + "nodes": [ + fake_fetch("uploads2", vec!["file0"]), + fake_fetch("uploads1", vec!["file1"]) + ] + }) + ); + } + + #[test] + fn test_parallel_with_overlapping_variables() { + let query_plan = fake_query_plan(json!({ + "kind": "Parallel", + "nodes": [ + fake_fetch("uploads1", vec!["files1"]), + fake_fetch("uploads2", vec!["files2"]) + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.files1.0".to_owned()], + "1".to_owned() => vec!["variables.files2.0".to_owned()], + "2".to_owned() => vec!["variables.files1.1".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!(result, Err(FileUploadError::MisorderedVariables)); + } + + #[test] + fn test_parallel_with_overlapping_fetch_nodes() { + let query_plan = fake_query_plan(json!({ + "kind": "Parallel", + "nodes": [ + fake_fetch("uploads1", vec!["file1", "file3"]), + fake_fetch("uploads2", vec!["file2"]) + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + "2".to_owned() => vec!["variables.file3".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!(result, Err(FileUploadError::MisorderedVariables)); + } + + #[test] + fn test_parallel_with_duplicated_variables() { + let query_plan = fake_query_plan(json!({ + "kind": "Parallel", + "nodes": [ + fake_fetch("uploads1", vec!["file1"]), + fake_fetch("uploads2", vec!["file2", "file3"]), + fake_fetch("uploads3", vec!["file1"]), + fake_fetch("uploads4", vec!["file2", "file4"]) + ] + })); + + let map_field = MapField::new(indexmap! { + "0".to_owned() => vec!["variables.file1".to_owned()], + "1".to_owned() => vec!["variables.file2".to_owned()], + "2".to_owned() => vec!["variables.file3".to_owned()], + "3".to_owned() => vec!["variables.file4".to_owned()], + }) + .unwrap(); + + let result = rearrange_query_plan(&query_plan, &map_field); + assert_matches!( + result, + Err(FileUploadError::DuplicateVariableUsages(ref variables)) if variables == "$file1, $file2", + ); + } } From 6d36ef798e0a49f0187a57c142eba5f60ecea661 Mon Sep 17 00:00:00 2001 From: Edward Huang Date: Tue, 16 Apr 2024 00:58:36 -0700 Subject: [PATCH 16/46] docs: query planner pool (#4928) Docs for query planner pool (https://github.com/apollographql/router/pull/4897) --- docs/source/configuration/overview.mdx | 32 ++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/source/configuration/overview.mdx b/docs/source/configuration/overview.mdx index 7bba43ab2f..f348c4072d 100644 --- a/docs/source/configuration/overview.mdx +++ b/docs/source/configuration/overview.mdx @@ -546,6 +546,38 @@ You can configure certain caching behaviors for generated query plans and APQ (b - You can configure a Redis-backed _distributed_ cache that enables multiple router instances to share cached values. For details, see [Distributed caching in the Apollo Router](./distributed-caching/). - You can configure a Redis-backed _entity_ cache that enables a client query to retrieve cached entity data split between subgraph reponses. For details, see [Subgraph entity caching in the Apollo Router](./entity-caching/). + + +### Query planner pools + + + + + +You can improve the performance of the router's query planner by configuring parallelized query planning. + +By default, the query planner plans one operation at a time. It plans one operation to completion before planning the next one. This serial planning can be problematic when an operation takes a long time to plan and consequently blocks the query planner from working on other operations. + +To resolve such blocking scenarios, you can enable parallel query planning. Configure it in `router.yaml` with `supergraph.query_planner.experimental_parallelism`: + +```yaml title="router.yaml" +supergraph: + query_planner: + experimental_parallelism: auto # number of available cpus +``` + +The value of `experimental_parallelism` is the number of query planners in the router's _query planner pool_. A query planner pool is a preallocated set of query planners from which the router can use to plan operations. The total number of pools is the maximum number of query planners that can run in parallel and therefore the maximum number of operations that can be worked on simultaneously. + +Valid values of `experimental_parallelism`: +- Any integer starting from `1` +- The special value `auto`, which sets the number of query planners equal to the number of available CPUs on the router's host machine + +The default value of `experimental_parallelism` is `1`. + +In practice, you should tune `experimental_parallelism` based on metrics and benchmarks gathered from your router. + + ### Safelisting with persisted queries You can enhance your graph's security by maintaining a persisted query list (PQL), an operation safelist made by your first-party apps. As opposed to automatic persisted queries (APQ) where operations are automatically cached, operations must be preregistered to the PQL. Once configured, the router checks incoming requests against the PQL. From af404f29440b929c9c2c95a4610c753d6580f3b3 Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Tue, 16 Apr 2024 11:01:55 +0200 Subject: [PATCH 17/46] Apply alias rewrites to arrays (#4958) [#2489](https://github.com/apollographql/router/pull/2489) introduced automatic aliasing rules to support `@interfaceObject`. These rules now properly apply to lists. --- .changesets/fix_watcher_raccoon_meat_crop.md | 8 ++ apollo-router/src/query_planner/rewrites.rs | 124 +++++++++++++++++++ 2 files changed, 132 insertions(+) create mode 100644 .changesets/fix_watcher_raccoon_meat_crop.md diff --git a/.changesets/fix_watcher_raccoon_meat_crop.md b/.changesets/fix_watcher_raccoon_meat_crop.md new file mode 100644 index 0000000000..51ddebc8b0 --- /dev/null +++ b/.changesets/fix_watcher_raccoon_meat_crop.md @@ -0,0 +1,8 @@ +### Apply alias rewrites to arrays ([PR #TODO](https://github.com/apollographql/router/pull/4958)) + + +[#2489](https://github.com/apollographql/router/pull/2489) introduced automatic aliasing rules to support `@interfaceObject`. + +These rules now properly apply to lists. + +By [@o0ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/4958 \ No newline at end of file diff --git a/apollo-router/src/query_planner/rewrites.rs b/apollo-router/src/query_planner/rewrites.rs index d9e5f2eb2e..90e92c6465 100644 --- a/apollo-router/src/query_planner/rewrites.rs +++ b/apollo-router/src/query_planner/rewrites.rs @@ -73,6 +73,16 @@ impl DataRewrite { obj.insert(renamer.rename_key_to.clone(), value); } } + + if let Some(arr) = selected.as_array_mut() { + for item in arr { + if let Some(obj) = item.as_object_mut() { + if let Some(value) = obj.remove(k.as_str()) { + obj.insert(renamer.rename_key_to.clone(), value); + } + } + } + } }); } } @@ -92,3 +102,117 @@ pub(crate) fn apply_rewrites( } } } + +#[cfg(test)] +mod tests { + use serde_json_bytes::json; + + use super::*; + + // The schema is not used for the tests + // but we need a valid one + const SCHEMA: &str = r#" + schema + @core(feature: "https://specs.apollo.dev/core/v0.1"), + @core(feature: "https://specs.apollo.dev/join/v0.1") + { + query: Query + } + directive @core(feature: String!) repeatable on SCHEMA + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + enum join__Graph { + FAKE @join__graph(name:"fake" url: "http://localhost:4001/fake") + } + + type Query { + i: [I] + } + + interface I { + x: Int + } + + type A implements I { + x: Int + } + + type B { + y: Int + } + "#; + + #[test] + fn test_key_renamer_object() { + let mut data = json!({ + "data": { + "__typename": "TestType", + "testField__alias_0": { + "__typename": "TestField", + "field":"thisisatest" + } + } + }); + + let dr = DataRewrite::KeyRenamer(DataKeyRenamer { + path: "data/testField__alias_0".into(), + rename_key_to: "testField".to_string(), + }); + + dr.maybe_apply( + &Schema::parse_test(SCHEMA, &Default::default()).unwrap(), + &mut data, + ); + + assert_eq!( + json! {{ + "data": { + "__typename": "TestType", + "testField": { + "__typename": "TestField", + "field":"thisisatest" + } + } + }}, + data + ); + } + + #[test] + fn test_key_renamer_array() { + let mut data = json!( + { + "data": [{ + "__typename": "TestType", + "testField__alias_0": { + "__typename": "TestField", + "field":"thisisatest" + } + }] + } + ); + + let dr = DataRewrite::KeyRenamer(DataKeyRenamer { + path: "data/testField__alias_0".into(), + rename_key_to: "testField".to_string(), + }); + + dr.maybe_apply( + &Schema::parse_test(SCHEMA, &Default::default()).unwrap(), + &mut data, + ); + + assert_eq!( + json! {{ + "data": [{ + "__typename": "TestType", + "testField": { + "__typename": "TestField", + "field":"thisisatest" + } + }] + }}, + data + ); + } +} From ca1d1ef61f2b713f4683dcb1c97e5d78c090d51b Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Tue, 16 Apr 2024 12:20:48 +0100 Subject: [PATCH 18/46] Subgraph support for query batching (#4661) This project is an extension of the existing work to support [client side batching in the router](https://github.com/apollographql/router/issues/126). The current implementation is experimental and is publicly [documented](https://www.apollographql.com/docs/router/executing-operations/query-batching/). The additional work to enable batching requests to subgraphs is captured in [this issue](https://github.com/apollographql/router/issues/2002). Currently the concept of a batch is preserved until the end of the RouterRequest processing. At this point we convert each batch request item into a separate SupergraphRequest. These are then planned and executed concurrently within the router and re-assembled into a batch when they complete. It's important to note that, with this implementation, the concept of a batch, from the perspective of an executing router, now disappears and each request is planned and executed separately. This extension will modify the router so that the concept of a batch is preserved, at least outwardly, so that multiple subgraph requests are "batched" (in exactly the same format as a client batch request) for onward transmission to subgraphs. The goal of this work is to provide an optimisation by reducing the number of round-trips to a subgraph from the router. Additionally, the work will address an [unresolved issue](https://github.com/apollographql/router/issues/4019) from the existing experimental implementation and promote the existing implementation from experimental to fully supported. Fixes #2002 --- **Review Guidance** This is a fairly big PR, so I've written these notes to help make the review more approachable. 1. The most important files to review are (in order): - [.changesets/feat_garypen_2002_subgraph_batching.md](https://github.com/apollographql/router/pull/4661/files#diff-6376c91cfdd47332a662c760ac849bb5449a1b6df6891b30b72b43f041bd836f) - [docs/source/executing-operations/query-batching.mdx](https://github.com/apollographql/router/pull/4661/files#diff-617468db3057857f71c387eaa0d1a6161e3c1b8bf9fcb2de6fc6eafedc147277) - [apollo-router/src/services/router/service.rs](https://github.com/apollographql/router/pull/4661/files#diff-544579a213fda1bff6313834d30fe1746a8a28ffd7c0d6dfa1081fa36a487355) - [apollo-router/src/services/supergraph/service.rs](https://github.com/apollographql/router/pull/4661/files#diff-5d72a88a68962a5926fb5bb115ea3efc186904612f74e697d72e3f009669c733) - [apollo-router/src/query_planner/plan.rs](https://github.com/apollographql/router/pull/4661/files#diff-21a82d277d12e8f21b6b71398d62e95303a117130cc4a27510b85ebfceeb8208) - [apollo-router/src/services/subgraph_service.rs](https://github.com/apollographql/router/pull/4661/files#diff-6ef5a208ca8622f30eef88f75c18566e0304d59856b66293dcd6811555e6382e) - [apollo-router/src/batching.rs](https://github.com/apollographql/router/pull/4661/files#diff-3e884074ecad8176341159a2382aa81c49d74b851894b8ade9fa4718c434dec6) First read the documentation. Hopefully that will make clear how this feature works. I've picked these files as being most important (and ordered them for review) because: router service => This is where we spot incoming batches and create context `BatchQuery` items to manage them through the router. We also re-assemble them on the way back and identify any batches which may need to be cancelled. supergraph service => Here we pick up the information about how many fetches we believe each `BatchQuery` will need to make. plan => The new `query_hashes()` does this fetch identification for us. This is the most important function in this feature. subgraph service => Here's is where we intercept the calls to subgraphs and park threads to wait for batch execution to be performed. We do a lot of work here, so this is where most of the intrusive changes are: assembling and dis-assembling batches and managing the co-ordination between a number of parked tasks. batching => This is the implementation of batch co-ordination. Each batch has a task which manages a variety of channels to facilitate communication between the incoming batches, waiting tasks and outgoing (to subgraph) batches. I'm suggesting reading this *after* reading through the service changes because it should mainly just be implementation details and you will be able to follow what is happening without knowing all this detail initially. Once you understand the changes to the services, you will need to read this code. Feel free to peek ahead though if that's how you like to review stuff. 2. There are still a couple of TODOs which will be resolved early next week. They are both related to how we handle context cloning, so a decision is still pending there. Obviously all the files need to be reviewed, but the remaining files should be fairly mechanical/straight-forward. --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [x] Changes are compatible[^1] - [x] Documentation[^2] completed - [x] Performance impact assessed and acceptable - Tests added and passing[^3] - [x] Unit Tests - [x] Integration Tests - ~[ ] Manual Tests~ **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. --------- Co-authored-by: Nicholas Cioli Co-authored-by: Edward Huang --- .../feat_garypen_2002_subgraph_batching.md | 41 + apollo-router/feature_discussions.json | 5 +- apollo-router/src/batching.rs | 712 ++++++++++++ apollo-router/src/configuration/metrics.rs | 2 +- .../migrations/0023-batching.yaml | 5 + apollo-router/src/configuration/mod.rs | 52 +- ...nfiguration__tests__schema_generation.snap | 104 +- ...grade_old_configuration@batching.yaml.snap | 8 + .../testdata/metrics/batching.router.yaml | 2 +- .../testdata/migrations/batching.yaml | 3 + apollo-router/src/configuration/tests.rs | 126 ++ apollo-router/src/error.rs | 42 +- apollo-router/src/json_ext.rs | 9 + apollo-router/src/lib.rs | 1 + .../plugins/traffic_shaping/deduplication.rs | 13 + apollo-router/src/query_planner/plan.rs | 61 + apollo-router/src/request.rs | 4 +- apollo-router/src/response.rs | 8 +- apollo-router/src/services/router/service.rs | 179 ++- apollo-router/src/services/router/tests.rs | 8 +- .../src/services/subgraph_service.rs | 690 +++++++++-- .../src/services/supergraph/service.rs | 21 +- .../src/uplink/license_enforcement.rs | 4 + .../fixtures/apollo_reports_batch.router.yaml | 4 +- .../fixtures/batching/all_enabled.router.yaml | 11 + .../fixtures/batching/block_request.rhai | 10 + .../fixtures/batching/coprocessor.router.yaml | 19 + .../fixtures/batching/rhai_script.router.yaml | 15 + .../tests/fixtures/batching/schema.graphql | 56 + .../batching/short_timeouts.router.yaml | 14 + apollo-router/tests/integration/batching.rs | 1026 +++++++++++++++++ apollo-router/tests/integration/mod.rs | 1 + ...n__lifecycle__cli_config_experimental.snap | 1 - docs/source/config.json | 2 +- docs/source/configuration/traffic-shaping.mdx | 6 +- .../executing-operations/query-batching.mdx | 169 ++- 36 files changed, 3216 insertions(+), 218 deletions(-) create mode 100644 .changesets/feat_garypen_2002_subgraph_batching.md create mode 100644 apollo-router/src/batching.rs create mode 100644 apollo-router/src/configuration/migrations/0023-batching.yaml create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@batching.yaml.snap create mode 100644 apollo-router/src/configuration/testdata/migrations/batching.yaml create mode 100644 apollo-router/tests/fixtures/batching/all_enabled.router.yaml create mode 100644 apollo-router/tests/fixtures/batching/block_request.rhai create mode 100644 apollo-router/tests/fixtures/batching/coprocessor.router.yaml create mode 100644 apollo-router/tests/fixtures/batching/rhai_script.router.yaml create mode 100644 apollo-router/tests/fixtures/batching/schema.graphql create mode 100644 apollo-router/tests/fixtures/batching/short_timeouts.router.yaml create mode 100644 apollo-router/tests/integration/batching.rs diff --git a/.changesets/feat_garypen_2002_subgraph_batching.md b/.changesets/feat_garypen_2002_subgraph_batching.md new file mode 100644 index 0000000000..7a275093b9 --- /dev/null +++ b/.changesets/feat_garypen_2002_subgraph_batching.md @@ -0,0 +1,41 @@ +### Subgraph support for query batching ([Issue #2002](https://github.com/apollographql/router/issues/2002)) + +As an extension to the ongoing work to support [client-side query batching in the router](https://github.com/apollographql/router/issues/126), the router now supports batching of subgraph requests. Each subgraph batch request retains the same external format as a client batch request. This optimization reduces the number of round-trip requests from the router to subgraphs. + +Also, batching in the router is now a generally available feature: the `experimental_batching` router configuration option has been deprecated and is replaced by the `batching` option. + +Previously, the router preserved the concept of a batch until a `RouterRequest` finished processing. From that point, the router converted each batch request item into a separate `SupergraphRequest`, and the router planned and executed those requests concurrently within the router, then reassembled them into a batch of `RouterResponse` to return to the client. Now with the implementation in this release, the concept of a batch is extended so that batches are issued to configured subgraphs (all or named). Each batch request item is planned and executed separately, but the queries issued to subgraphs are optimally assembled into batches which observe the query constraints of the various batch items. + +To configure subgraph batching, you can enable `batching.subgraph.all` for all subgraphs. You can also enable batching per subgraph with `batching.subgraph.subgraphs.*`. For example: + +```yaml +batching: + enabled: true + mode: batch_http_link + subgraph: + # Enable batching on all subgraphs + all: + enabled: true +``` + +```yaml +batching: + enabled: true + mode: batch_http_link + subgraph: + # Disable batching on all subgraphs + all: + enabled: false + # Configure(over-ride) batching support per subgraph + subgraphs: + subgraph_1: + enabled: true + subgraph_2: + enabled: true +``` + +Note: `all` may be over-ridden by `subgraphs`. This applies in general for all router subgraph configuration options. + +To learn more, see [query batching in Apollo docs](https://www.apollographql.com/docs/router/executing-operations/query-batching/). + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/4661 diff --git a/apollo-router/feature_discussions.json b/apollo-router/feature_discussions.json index 59f5a84608..446162650a 100644 --- a/apollo-router/feature_discussions.json +++ b/apollo-router/feature_discussions.json @@ -2,10 +2,9 @@ "experimental": { "experimental_retry": "https://github.com/apollographql/router/discussions/2241", "experimental_response_trace_id": "https://github.com/apollographql/router/discussions/2147", - "experimental_when_header": "https://github.com/apollographql/router/discussions/1961", - "experimental_batching": "https://github.com/apollographql/router/discussions/3840" + "experimental_when_header": "https://github.com/apollographql/router/discussions/1961" }, "preview": { "preview_entity_cache": "https://github.com/apollographql/router/discussions/4592" } -} \ No newline at end of file +} diff --git a/apollo-router/src/batching.rs b/apollo-router/src/batching.rs new file mode 100644 index 0000000000..79a7e29f83 --- /dev/null +++ b/apollo-router/src/batching.rs @@ -0,0 +1,712 @@ +//! Various utility functions and core structures used to implement batching support within +//! the router. + +use std::collections::HashMap; +use std::collections::HashSet; +use std::fmt; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::sync::Arc; + +use hyper::Body; +use opentelemetry::trace::TraceContextExt; +use opentelemetry::Context as otelContext; +use parking_lot::Mutex as PMutex; +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; +use tower::BoxError; +use tracing::Instrument; +use tracing::Span; +use tracing_opentelemetry::OpenTelemetrySpanExt; + +use crate::error::FetchError; +use crate::error::SubgraphBatchingError; +use crate::graphql; +use crate::query_planner::fetch::QueryHash; +use crate::services::http::HttpClientServiceFactory; +use crate::services::process_batches; +use crate::services::SubgraphRequest; +use crate::services::SubgraphResponse; +use crate::Context; + +/// A query that is part of a batch. +/// Note: It's ok to make transient clones of this struct, but *do not* store clones anywhere apart +/// from the single copy in the extensions. The batching co-ordinator relies on the fact that all +/// senders are dropped to know when to finish processing. +#[derive(Clone, Debug)] +pub(crate) struct BatchQuery { + /// The index of this query relative to the entire batch + index: usize, + + /// A channel sender for sending updates to the entire batch + sender: Arc>>>, + + /// How many more progress updates are we expecting to send? + remaining: Arc, + + /// Batch to which this BatchQuery belongs + batch: Arc, +} + +impl fmt::Display for BatchQuery { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "index: {}, ", self.index)?; + write!(f, "remaining: {}, ", self.remaining.load(Ordering::Acquire))?; + write!(f, "sender: {:?}, ", self.sender)?; + write!(f, "batch: {:?}, ", self.batch)?; + Ok(()) + } +} + +impl BatchQuery { + /// Is this BatchQuery finished? + pub(crate) fn finished(&self) -> bool { + self.remaining.load(Ordering::Acquire) == 0 + } + + /// Inform the batch of query hashes representing fetches needed by this element of the batch query + pub(crate) async fn set_query_hashes( + &self, + query_hashes: Vec>, + ) -> Result<(), BoxError> { + self.remaining.store(query_hashes.len(), Ordering::Release); + + self.sender + .lock() + .await + .as_ref() + .ok_or(SubgraphBatchingError::SenderUnavailable)? + .send(BatchHandlerMessage::Begin { + index: self.index, + query_hashes, + }) + .await?; + Ok(()) + } + + /// Signal to the batch handler that this specific batch query has made some progress. + /// + /// The returned channel can be awaited to receive the GraphQL response, when ready. + pub(crate) async fn signal_progress( + &self, + client_factory: HttpClientServiceFactory, + request: SubgraphRequest, + gql_request: graphql::Request, + ) -> Result>, BoxError> { + // Create a receiver for this query so that it can eventually get the request meant for it + let (tx, rx) = oneshot::channel(); + + tracing::debug!( + "index: {}, REMAINING: {}", + self.index, + self.remaining.load(Ordering::Acquire) + ); + self.sender + .lock() + .await + .as_ref() + .ok_or(SubgraphBatchingError::SenderUnavailable)? + .send(BatchHandlerMessage::Progress { + index: self.index, + client_factory, + request, + gql_request, + response_sender: tx, + span_context: Span::current().context(), + }) + .await?; + + if !self.finished() { + self.remaining.fetch_sub(1, Ordering::AcqRel); + } + + // May now be finished + if self.finished() { + let mut sender = self.sender.lock().await; + *sender = None; + } + + Ok(rx) + } + + /// Signal to the batch handler that this specific batch query is cancelled + pub(crate) async fn signal_cancelled(&self, reason: String) -> Result<(), BoxError> { + self.sender + .lock() + .await + .as_ref() + .ok_or(SubgraphBatchingError::SenderUnavailable)? + .send(BatchHandlerMessage::Cancel { + index: self.index, + reason, + }) + .await?; + + if !self.finished() { + self.remaining.fetch_sub(1, Ordering::AcqRel); + } + + // May now be finished + if self.finished() { + let mut sender = self.sender.lock().await; + *sender = None; + } + + Ok(()) + } +} + +// #[derive(Debug)] +enum BatchHandlerMessage { + /// Cancel one of the batch items + Cancel { index: usize, reason: String }, + + /// A query has reached the subgraph service and we should update its state + Progress { + index: usize, + client_factory: HttpClientServiceFactory, + request: SubgraphRequest, + gql_request: graphql::Request, + response_sender: oneshot::Sender>, + span_context: otelContext, + }, + + /// A query has passed query planning and knows how many fetches are needed + /// to complete. + Begin { + index: usize, + query_hashes: Vec>, + }, +} + +/// Collection of info needed to resolve a batch query +pub(crate) struct BatchQueryInfo { + /// The owning subgraph request + request: SubgraphRequest, + + /// The GraphQL request tied to this subgraph request + gql_request: graphql::Request, + + /// Notifier for the subgraph service handler + /// + /// Note: This must be used or else the subgraph request will time out + sender: oneshot::Sender>, +} + +// TODO: Do we want to generate a UUID for a batch for observability reasons? +// TODO: Do we want to track the size of a batch? +#[derive(Debug)] +pub(crate) struct Batch { + /// A sender channel to communicate with the batching handler + senders: PMutex>>>, + + /// The spawned batching handler task handle + /// + /// Note: We keep this as a failsafe. If the task doesn't terminate _before_ the batch is + /// dropped, then we will abort() the task on drop. + spawn_handle: JoinHandle>, + + /// What is the size (number of input operations) of the batch? + #[allow(dead_code)] + size: usize, +} + +impl Batch { + /// Creates a new batch, spawning an async task for handling updates to the + /// batch lifecycle. + pub(crate) fn spawn_handler(size: usize) -> Self { + tracing::debug!("New batch created with size {size}"); + + // Create the message channel pair for sending update events to the spawned task + let (spawn_tx, mut rx) = mpsc::channel(size); + + // Populate Senders + let mut senders = vec![]; + + for _ in 0..size { + senders.push(Some(spawn_tx.clone())); + } + + let spawn_handle = tokio::spawn(async move { + /// Helper struct for keeping track of the state of each individual BatchQuery + /// + #[derive(Debug)] + struct BatchQueryState { + registered: HashSet>, + committed: HashSet>, + cancelled: HashSet>, + } + + impl BatchQueryState { + // We are ready when everything we registered is in either cancelled or + // committed. + fn is_ready(&self) -> bool { + self.registered.difference(&self.committed.union(&self.cancelled).cloned().collect()).collect::>().is_empty() + } + } + + // Progressively track the state of the various batch fetches that we expect to see. Keys are batch + // indices. + let mut batch_state: HashMap = HashMap::with_capacity(size); + + // We also need to keep track of all requests we need to make and their send handles + let mut requests: Vec> = + Vec::from_iter((0..size).map(|_| Vec::new())); + + let mut master_client_factory = None; + tracing::debug!("Batch about to await messages..."); + // Start handling messages from various portions of the request lifecycle + // When recv() returns None, we want to stop processing messages + while let Some(msg) = rx.recv().await { + match msg { + BatchHandlerMessage::Cancel { index, reason } => { + // Log the reason for cancelling, update the state + tracing::debug!("Cancelling index: {index}, {reason}"); + + if let Some(state) = batch_state.get_mut(&index) { + // Short-circuit any requests that are waiting for this cancelled request to complete. + let cancelled_requests = std::mem::take(&mut requests[index]); + for BatchQueryInfo { + request, sender, .. + } in cancelled_requests + { + let subgraph_name = request.subgraph_name.ok_or(SubgraphBatchingError::MissingSubgraphName)?; + if let Err(log_error) = sender.send(Err(Box::new(FetchError::SubrequestBatchingError { + service: subgraph_name.clone(), + reason: format!("request cancelled: {reason}"), + }))) { + tracing::error!(service=subgraph_name, error=?log_error, "failed to notify waiter that request is cancelled"); + } + } + + // Clear out everything that has committed, now that they are cancelled, and + // mark everything as having been cancelled. + state.committed.clear(); + state.cancelled = state.registered.clone(); + } + } + + BatchHandlerMessage::Begin { + index, + query_hashes, + } => { + tracing::debug!("Beginning batch for index {index} with {query_hashes:?}"); + + batch_state.insert( + index, + BatchQueryState { + cancelled: HashSet::with_capacity(query_hashes.len()), + committed: HashSet::with_capacity(query_hashes.len()), + registered: HashSet::from_iter(query_hashes), + }, + ); + } + + BatchHandlerMessage::Progress { + index, + client_factory, + request, + gql_request, + response_sender, + span_context, + } => { + // Progress the index + + tracing::debug!("Progress index: {index}"); + + if let Some(state) = batch_state.get_mut(&index) { + state.committed.insert(request.query_hash.clone()); + } + + if master_client_factory.is_none() { + master_client_factory = Some(client_factory); + } + Span::current().add_link(span_context.span().span_context().clone()); + requests[index].push(BatchQueryInfo { + request, + gql_request, + sender: response_sender, + }) + } + } + } + + // Make sure that we are actually ready and haven't forgotten to update something somewhere + if batch_state.values().any(|f| !f.is_ready()) { + tracing::error!("All senders for the batch have dropped before reaching the ready state: {batch_state:#?}"); + // There's not much else we can do, so perform an early return + return Err(SubgraphBatchingError::ProcessingFailed("batch senders not ready when required".to_string()).into()); + } + + tracing::debug!("Assembling {size} requests into batches"); + + // We now have a bunch of requests which are organised by index and we would like to + // convert them into a bunch of requests organised by service... + + let all_in_one: Vec<_> = requests.into_iter().flatten().collect(); + + // Now build up a Service oriented view to use in constructing our batches + let mut svc_map: HashMap> = HashMap::new(); + for BatchQueryInfo { + request: sg_request, + gql_request, + sender: tx, + } in all_in_one + { + let subgraph_name = sg_request.subgraph_name.clone().ok_or(SubgraphBatchingError::MissingSubgraphName)?; + let value = svc_map + .entry( + subgraph_name, + ) + .or_default(); + value.push(BatchQueryInfo { + request: sg_request, + gql_request, + sender: tx, + }); + } + + // If we don't have a master_client_factory, we can't do anything. + if let Some(client_factory) = master_client_factory { + process_batches(client_factory, svc_map).await?; + } + Ok(()) + }.instrument(tracing::info_span!("batch_request", size))); + + Self { + senders: PMutex::new(senders), + spawn_handle, + size, + } + } + + /// Create a batch query for a specific index in this batch + /// + /// This function may fail if the index doesn't exist or has already been taken + pub(crate) fn query_for_index( + batch: Arc, + index: usize, + ) -> Result { + let mut guard = batch.senders.lock(); + // It's a serious error if we try to get a query at an index which doesn't exist or which has already been taken + if index >= guard.len() { + return Err(SubgraphBatchingError::ProcessingFailed(format!( + "tried to retriever sender for index: {index} which does not exist" + ))); + } + let opt_sender = std::mem::take(&mut guard[index]); + if opt_sender.is_none() { + return Err(SubgraphBatchingError::ProcessingFailed(format!( + "tried to retriever sender for index: {index} which has already been taken" + ))); + } + drop(guard); + Ok(BatchQuery { + index, + sender: Arc::new(Mutex::new(opt_sender)), + remaining: Arc::new(AtomicUsize::new(0)), + batch, + }) + } +} + +impl Drop for Batch { + fn drop(&mut self) { + // Failsafe: make sure that we kill the background task if the batch itself is dropped + self.spawn_handle.abort(); + } +} + +// Assemble a single batch request to a subgraph +pub(crate) async fn assemble_batch( + requests: Vec, +) -> Result< + ( + String, + Context, + http::Request, + Vec>>, + ), + BoxError, +> { + // Extract the collection of parts from the requests + let (txs, request_pairs): (Vec<_>, Vec<_>) = requests + .into_iter() + .map(|r| (r.sender, (r.request, r.gql_request))) + .unzip(); + let (requests, gql_requests): (Vec<_>, Vec<_>) = request_pairs.into_iter().unzip(); + + // Construct the actual byte body of the batched request + let bytes = hyper::body::to_bytes(serde_json::to_string(&gql_requests)?).await?; + + // Grab the common info from the first request + let context = requests + .first() + .ok_or(SubgraphBatchingError::RequestsIsEmpty)? + .context + .clone(); + let first_request = requests + .into_iter() + .next() + .ok_or(SubgraphBatchingError::RequestsIsEmpty)? + .subgraph_request; + let operation_name = first_request + .body() + .operation_name + .clone() + .unwrap_or_default(); + let (parts, _) = first_request.into_parts(); + + // Generate the final request and pass it up + let request = http::Request::from_parts(parts, Body::from(bytes)); + Ok((operation_name, context, request, txs)) +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::time::Duration; + + use hyper::body::to_bytes; + use tokio::sync::oneshot; + + use super::assemble_batch; + use super::Batch; + use super::BatchQueryInfo; + use crate::graphql; + use crate::plugins::traffic_shaping::Http2Config; + use crate::query_planner::fetch::QueryHash; + use crate::services::http::HttpClientServiceFactory; + use crate::services::SubgraphRequest; + use crate::services::SubgraphResponse; + use crate::Configuration; + use crate::Context; + + #[tokio::test(flavor = "multi_thread")] + async fn it_assembles_batch() { + // Assemble a list of requests for testing + let (receivers, requests): (Vec<_>, Vec<_>) = (0..2) + .map(|index| { + let (tx, rx) = oneshot::channel(); + let gql_request = graphql::Request::fake_builder() + .operation_name(format!("batch_test_{index}")) + .query(format!("query batch_test {{ slot{index} }}")) + .build(); + + ( + rx, + BatchQueryInfo { + request: SubgraphRequest::fake_builder() + .subgraph_request( + http::Request::builder().body(gql_request.clone()).unwrap(), + ) + .subgraph_name(format!("slot{index}")) + .build(), + gql_request, + sender: tx, + }, + ) + }) + .unzip(); + + // Assemble them + let (op_name, _context, request, txs) = assemble_batch(requests) + .await + .expect("it can assemble a batch"); + + // Make sure that the name of the entire batch is that of the first + assert_eq!(op_name, "batch_test_0"); + + // We should see the aggregation of all of the requests + let actual: Vec = serde_json::from_str( + &String::from_utf8(to_bytes(request.into_body()).await.unwrap().to_vec()).unwrap(), + ) + .unwrap(); + + let expected: Vec<_> = (0..2) + .map(|index| { + graphql::Request::fake_builder() + .operation_name(format!("batch_test_{index}")) + .query(format!("query batch_test {{ slot{index} }}")) + .build() + }) + .collect(); + assert_eq!(actual, expected); + + // We should also have all of the correct senders and they should be linked to the correct waiter + // Note: We reverse the senders since they should be in reverse order when assembled + assert_eq!(txs.len(), receivers.len()); + for (index, (tx, rx)) in Iterator::zip(txs.into_iter(), receivers).enumerate() { + let data = serde_json_bytes::json!({ + "data": { + format!("slot{index}"): "valid" + } + }); + let response = SubgraphResponse { + response: http::Response::builder() + .body(graphql::Response::builder().data(data.clone()).build()) + .unwrap(), + context: Context::new(), + }; + + tx.send(Ok(response)).unwrap(); + + // We want to make sure that we don't hang the test if we don't get the correct message + let received = tokio::time::timeout(Duration::from_millis(10), rx) + .await + .unwrap() + .unwrap() + .unwrap(); + + assert_eq!(received.response.into_body().data, Some(data)); + } + } + + #[tokio::test(flavor = "multi_thread")] + async fn it_rejects_index_out_of_bounds() { + let batch = Arc::new(Batch::spawn_handler(2)); + + assert!(Batch::query_for_index(batch.clone(), 2).is_err()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn it_rejects_duplicated_index_get() { + let batch = Arc::new(Batch::spawn_handler(2)); + + assert!(Batch::query_for_index(batch.clone(), 0).is_ok()); + assert!(Batch::query_for_index(batch.clone(), 0).is_err()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn it_limits_the_number_of_cancelled_sends() { + let batch = Arc::new(Batch::spawn_handler(2)); + + let bq = Batch::query_for_index(batch.clone(), 0).expect("its a valid index"); + + assert!(bq + .set_query_hashes(vec![Arc::new(QueryHash::default())]) + .await + .is_ok()); + assert!(!bq.finished()); + assert!(bq.signal_cancelled("why not?".to_string()).await.is_ok()); + assert!(bq.finished()); + assert!(bq + .signal_cancelled("only once though".to_string()) + .await + .is_err()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn it_limits_the_number_of_progressed_sends() { + let batch = Arc::new(Batch::spawn_handler(2)); + + let bq = Batch::query_for_index(batch.clone(), 0).expect("its a valid index"); + + let factory = HttpClientServiceFactory::from_config( + "testbatch", + &Configuration::default(), + Http2Config::Disable, + ); + let request = SubgraphRequest::fake_builder() + .subgraph_request( + http::Request::builder() + .body(graphql::Request::default()) + .unwrap(), + ) + .subgraph_name("whatever".to_string()) + .build(); + assert!(bq + .set_query_hashes(vec![Arc::new(QueryHash::default())]) + .await + .is_ok()); + assert!(!bq.finished()); + assert!(bq + .signal_progress( + factory.clone(), + request.clone(), + graphql::Request::default() + ) + .await + .is_ok()); + assert!(bq.finished()); + assert!(bq + .signal_progress(factory, request, graphql::Request::default()) + .await + .is_err()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn it_limits_the_number_of_mixed_sends() { + let batch = Arc::new(Batch::spawn_handler(2)); + + let bq = Batch::query_for_index(batch.clone(), 0).expect("its a valid index"); + + let factory = HttpClientServiceFactory::from_config( + "testbatch", + &Configuration::default(), + Http2Config::Disable, + ); + let request = SubgraphRequest::fake_builder() + .subgraph_request( + http::Request::builder() + .body(graphql::Request::default()) + .unwrap(), + ) + .subgraph_name("whatever".to_string()) + .build(); + assert!(bq + .set_query_hashes(vec![Arc::new(QueryHash::default())]) + .await + .is_ok()); + assert!(!bq.finished()); + assert!(bq + .signal_progress(factory, request, graphql::Request::default()) + .await + .is_ok()); + assert!(bq.finished()); + assert!(bq + .signal_cancelled("only once though".to_string()) + .await + .is_err()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn it_limits_the_number_of_mixed_sends_two_query_hashes() { + let batch = Arc::new(Batch::spawn_handler(2)); + + let bq = Batch::query_for_index(batch.clone(), 0).expect("its a valid index"); + + let factory = HttpClientServiceFactory::from_config( + "testbatch", + &Configuration::default(), + Http2Config::Disable, + ); + let request = SubgraphRequest::fake_builder() + .subgraph_request( + http::Request::builder() + .body(graphql::Request::default()) + .unwrap(), + ) + .subgraph_name("whatever".to_string()) + .build(); + let qh = Arc::new(QueryHash::default()); + assert!(bq.set_query_hashes(vec![qh.clone(), qh]).await.is_ok()); + assert!(!bq.finished()); + assert!(bq + .signal_progress(factory, request, graphql::Request::default()) + .await + .is_ok()); + assert!(!bq.finished()); + assert!(bq + .signal_cancelled("only twice though".to_string()) + .await + .is_ok()); + assert!(bq.finished()); + assert!(bq + .signal_cancelled("only twice though".to_string()) + .await + .is_err()); + } +} diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index fa9a4d4a67..9e3f894970 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -336,7 +336,7 @@ impl InstrumentData { populate_config_instrument!( apollo.router.config.batching, - "$.experimental_batching[?(@.enabled == true)]", + "$.batching[?(@.enabled == true)]", opt.mode, "$.mode" ); diff --git a/apollo-router/src/configuration/migrations/0023-batching.yaml b/apollo-router/src/configuration/migrations/0023-batching.yaml new file mode 100644 index 0000000000..7457467524 --- /dev/null +++ b/apollo-router/src/configuration/migrations/0023-batching.yaml @@ -0,0 +1,5 @@ +description: Batching is no longer experimental +actions: + - type: move + from: experimental_batching + to: batching diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index ae5674b665..561b62e998 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -190,7 +190,7 @@ pub struct Configuration { /// Batching configuration. #[serde(default)] - pub(crate) experimental_batching: Batching, + pub(crate) batching: Batching, } impl PartialEq for Configuration { @@ -254,7 +254,7 @@ impl<'de> serde::Deserialize<'de> for Configuration { uplink: UplinkConfig, limits: Limits, experimental_chaos: Chaos, - experimental_batching: Batching, + batching: Batching, experimental_apollo_metrics_generation_mode: ApolloMetricsGenerationMode, } let ad_hoc: AdHocConfiguration = serde::Deserialize::deserialize(deserializer)?; @@ -273,7 +273,7 @@ impl<'de> serde::Deserialize<'de> for Configuration { .operation_limits(ad_hoc.limits) .chaos(ad_hoc.experimental_chaos) .uplink(ad_hoc.uplink) - .experimental_batching(ad_hoc.experimental_batching) + .batching(ad_hoc.batching) .experimental_apollo_metrics_generation_mode( ad_hoc.experimental_apollo_metrics_generation_mode, ) @@ -313,8 +313,8 @@ impl Configuration { chaos: Option, uplink: Option, experimental_api_schema_generation_mode: Option, + batching: Option, experimental_apollo_metrics_generation_mode: Option, - experimental_batching: Option, ) -> Result { #[cfg(not(test))] let notify_queue_cap = match apollo_plugins.get(APOLLO_SUBSCRIPTION_PLUGIN_NAME) { @@ -350,7 +350,7 @@ impl Configuration { }, tls: tls.unwrap_or_default(), uplink, - experimental_batching: experimental_batching.unwrap_or_default(), + batching: batching.unwrap_or_default(), #[cfg(test)] notify: notify.unwrap_or_default(), #[cfg(not(test))] @@ -388,7 +388,7 @@ impl Configuration { operation_limits: Option, chaos: Option, uplink: Option, - experimental_batching: Option, + batching: Option, experimental_api_schema_generation_mode: Option, experimental_apollo_metrics_generation_mode: Option, ) -> Result { @@ -416,7 +416,7 @@ impl Configuration { apq: apq.unwrap_or_default(), persisted_queries: persisted_query.unwrap_or_default(), uplink, - experimental_batching: experimental_batching.unwrap_or_default(), + batching: batching.unwrap_or_default(), }; configuration.validate() @@ -1573,4 +1573,42 @@ pub(crate) struct Batching { /// Batching mode pub(crate) mode: BatchingMode, + + /// Subgraph options for batching + pub(crate) subgraph: Option>, +} + +/// Common options for configuring subgraph batching +#[derive(Debug, Clone, Default, Deserialize, Serialize, JsonSchema)] +pub(crate) struct CommonBatchingConfig { + /// Whether this batching config should be enabled + pub(crate) enabled: bool, +} + +impl Batching { + // Check if we should enable batching for a particular subgraph (service_name) + pub(crate) fn batch_include(&self, service_name: &str) -> bool { + match &self.subgraph { + Some(subgraph_batching_config) => { + // Override by checking if all is enabled + if subgraph_batching_config.all.enabled { + // If it is, require: + // - no subgraph entry OR + // - an enabled subgraph entry + subgraph_batching_config + .subgraphs + .get(service_name) + .map_or(true, |x| x.enabled) + } else { + // If it isn't, require: + // - an enabled subgraph entry + subgraph_batching_config + .subgraphs + .get(service_name) + .is_some_and(|x| x.enabled) + } + } + None => false, + } + } } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 687f514a2a..3d3d7fd527 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -742,6 +742,79 @@ expression: "&schema" } } }, + "batching": { + "description": "Batching configuration.", + "default": { + "enabled": false, + "mode": "batch_http_link", + "subgraph": null + }, + "type": "object", + "required": [ + "mode" + ], + "properties": { + "enabled": { + "description": "Activates Batching (disabled by default)", + "default": false, + "type": "boolean" + }, + "mode": { + "description": "Batching mode", + "oneOf": [ + { + "description": "batch_http_link", + "type": "string", + "enum": [ + "batch_http_link" + ] + } + ] + }, + "subgraph": { + "description": "Subgraph options for batching", + "type": "object", + "properties": { + "all": { + "description": "options applying to all subgraphs", + "default": { + "enabled": false + }, + "type": "object", + "required": [ + "enabled" + ], + "properties": { + "enabled": { + "description": "Whether this batching config should be enabled", + "type": "boolean" + } + } + }, + "subgraphs": { + "description": "per subgraph options", + "default": {}, + "type": "object", + "additionalProperties": { + "description": "Common options for configuring subgraph batching", + "type": "object", + "required": [ + "enabled" + ], + "properties": { + "enabled": { + "description": "Whether this batching config should be enabled", + "type": "boolean" + } + } + } + } + }, + "nullable": true + } + }, + "additionalProperties": false + }, "coprocessor": { "description": "Configures the externalization plugin", "type": "object", @@ -1375,37 +1448,6 @@ expression: "&schema" } ] }, - "experimental_batching": { - "description": "Batching configuration.", - "default": { - "enabled": false, - "mode": "batch_http_link" - }, - "type": "object", - "required": [ - "mode" - ], - "properties": { - "enabled": { - "description": "Activates Batching (disabled by default)", - "default": false, - "type": "boolean" - }, - "mode": { - "description": "Batching mode", - "oneOf": [ - { - "description": "batch_http_link", - "type": "string", - "enum": [ - "batch_http_link" - ] - } - ] - } - }, - "additionalProperties": false - }, "experimental_chaos": { "description": "Configuration for chaos testing, trying to reproduce bugs that require uncommon conditions. You probably donโ€™t want this in production!", "default": { diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@batching.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@batching.yaml.snap new file mode 100644 index 0000000000..daec7b3f14 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@batching.yaml.snap @@ -0,0 +1,8 @@ +--- +source: apollo-router/src/configuration/tests.rs +expression: new_config +--- +--- +batching: + enabled: true + mode: batch_http_link diff --git a/apollo-router/src/configuration/testdata/metrics/batching.router.yaml b/apollo-router/src/configuration/testdata/metrics/batching.router.yaml index c177d3f45e..169f3824a9 100644 --- a/apollo-router/src/configuration/testdata/metrics/batching.router.yaml +++ b/apollo-router/src/configuration/testdata/metrics/batching.router.yaml @@ -1,3 +1,3 @@ -experimental_batching: +batching: enabled: true mode: batch_http_link diff --git a/apollo-router/src/configuration/testdata/migrations/batching.yaml b/apollo-router/src/configuration/testdata/migrations/batching.yaml new file mode 100644 index 0000000000..c177d3f45e --- /dev/null +++ b/apollo-router/src/configuration/testdata/migrations/batching.yaml @@ -0,0 +1,3 @@ +experimental_batching: + enabled: true + mode: batch_http_link diff --git a/apollo-router/src/configuration/tests.rs b/apollo-router/src/configuration/tests.rs index f619a5018c..ffefd4ad54 100644 --- a/apollo-router/src/configuration/tests.rs +++ b/apollo-router/src/configuration/tests.rs @@ -968,6 +968,132 @@ fn it_adds_slash_to_custom_health_check_path_if_missing() { assert_eq!(&conf.health_check.path, "/healthz"); } +#[test] +fn it_processes_batching_subgraph_all_enabled_correctly() { + let json_config = json!({ + "enabled": true, + "mode": "batch_http_link", + "subgraph": { + "all": { + "enabled": true + } + } + }); + + let config: Batching = serde_json::from_value(json_config).unwrap(); + + assert!(config.batch_include("anything")); +} + +#[test] +fn it_processes_batching_subgraph_all_disabled_correctly() { + let json_config = json!({ + "enabled": true, + "mode": "batch_http_link", + "subgraph": { + "all": { + "enabled": false + } + } + }); + + let config: Batching = serde_json::from_value(json_config).unwrap(); + + assert!(!config.batch_include("anything")); +} + +#[test] +fn it_processes_batching_subgraph_accounts_enabled_correctly() { + let json_config = json!({ + "enabled": true, + "mode": "batch_http_link", + "subgraph": { + "all": { + "enabled": false + }, + "subgraphs": { + "accounts": { + "enabled": true + } + } + } + }); + + let config: Batching = serde_json::from_value(json_config).unwrap(); + + assert!(!config.batch_include("anything")); + assert!(config.batch_include("accounts")); +} + +#[test] +fn it_processes_batching_subgraph_accounts_disabled_correctly() { + let json_config = json!({ + "enabled": true, + "mode": "batch_http_link", + "subgraph": { + "all": { + "enabled": false + }, + "subgraphs": { + "accounts": { + "enabled": false + } + } + } + }); + + let config: Batching = serde_json::from_value(json_config).unwrap(); + + assert!(!config.batch_include("anything")); + assert!(!config.batch_include("accounts")); +} + +#[test] +fn it_processes_batching_subgraph_accounts_override_disabled_correctly() { + let json_config = json!({ + "enabled": true, + "mode": "batch_http_link", + "subgraph": { + "all": { + "enabled": true + }, + "subgraphs": { + "accounts": { + "enabled": false + } + } + } + }); + + let config: Batching = serde_json::from_value(json_config).unwrap(); + + assert!(config.batch_include("anything")); + assert!(!config.batch_include("accounts")); +} + +#[test] +fn it_processes_batching_subgraph_accounts_override_enabled_correctly() { + let json_config = json!({ + "enabled": true, + "mode": "batch_http_link", + "subgraph": { + "all": { + "enabled": false + }, + "subgraphs": { + "accounts": { + "enabled": true + } + } + } + }); + + let config: Batching = serde_json::from_value(json_config).unwrap(); + + assert!(!config.batch_include("anything")); + assert!(config.batch_include("accounts")); +} + fn has_field_level_serde_defaults(lines: &[&str], line_number: usize) -> bool { let serde_field_default = Regex::new( r#"^\s*#[\s\n]*\[serde\s*\((.*,)?\s*default\s*=\s*"[a-zA-Z0-9_:]+"\s*(,.*)?\)\s*\]\s*$"#, diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index c102059d8c..7fd226c12c 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -100,6 +100,15 @@ pub(crate) enum FetchError { /// could not find path: {reason} ExecutionPathNotFound { reason: String }, + + /// Batching error for '{service}': {reason} + SubrequestBatchingError { + /// The service for which batch processing failed. + service: String, + + /// The reason batch processing failed. + reason: String, + }, } impl FetchError { @@ -173,6 +182,7 @@ impl ErrorExtension for FetchError { FetchError::ExecutionPathNotFound { .. } => "EXECUTION_PATH_NOT_FOUND", FetchError::MalformedRequest { .. } => "MALFORMED_REQUEST", FetchError::MalformedResponse { .. } => "MALFORMED_RESPONSE", + FetchError::SubrequestBatchingError { .. } => "SUBREQUEST_BATCHING_ERROR", } .to_string() } @@ -191,16 +201,23 @@ impl From for FetchError { pub(crate) enum CacheResolverError { /// value retrieval failed: {0} RetrievalError(Arc), + /// batch processing failed: {0} + BatchingError(String), } impl IntoGraphQLErrors for CacheResolverError { fn into_graphql_errors(self) -> Result, Self> { - let CacheResolverError::RetrievalError(retrieval_error) = self; - retrieval_error - .deref() - .clone() - .into_graphql_errors() - .map_err(|_err| CacheResolverError::RetrievalError(retrieval_error)) + match self { + CacheResolverError::RetrievalError(retrieval_error) => retrieval_error + .deref() + .clone() + .into_graphql_errors() + .map_err(|_err| CacheResolverError::RetrievalError(retrieval_error)), + CacheResolverError::BatchingError(msg) => Ok(vec![Error::builder() + .message(msg) + .extension_code("BATCH_PROCESSING_FAILED") + .build()]), + } } } @@ -650,6 +667,19 @@ impl std::fmt::Display for ValidationErrors { } } +/// Error during subgraph batch processing +#[derive(Debug, Error, Display)] +pub(crate) enum SubgraphBatchingError { + /// Sender unavailable + SenderUnavailable, + /// Request does not have a subgraph name + MissingSubgraphName, + /// Requests is empty + RequestsIsEmpty, + /// Batch processing failed: {0} + ProcessingFailed(String), +} + #[cfg(test)] mod tests { use super::*; diff --git a/apollo-router/src/json_ext.rs b/apollo-router/src/json_ext.rs index 9967114741..b8cf588187 100644 --- a/apollo-router/src/json_ext.rs +++ b/apollo-router/src/json_ext.rs @@ -37,6 +37,15 @@ macro_rules! extract_key_value_from_object { }}; } +macro_rules! ensure_array { + ($value:expr) => {{ + match $value { + crate::json_ext::Value::Array(a) => Ok(a), + _ => Err("invalid type, expected an array"), + } + }}; +} + macro_rules! ensure_object { ($value:expr) => {{ match $value { diff --git a/apollo-router/src/lib.rs b/apollo-router/src/lib.rs index f10afe636a..4dc56b4ea9 100644 --- a/apollo-router/src/lib.rs +++ b/apollo-router/src/lib.rs @@ -52,6 +52,7 @@ pub(crate) mod metrics; mod apollo_studio_interop; pub(crate) mod axum_factory; +mod batching; mod cache; mod configuration; mod context; diff --git a/apollo-router/src/plugins/traffic_shaping/deduplication.rs b/apollo-router/src/plugins/traffic_shaping/deduplication.rs index b7f9ac6bf5..bae3f620bc 100644 --- a/apollo-router/src/plugins/traffic_shaping/deduplication.rs +++ b/apollo-router/src/plugins/traffic_shaping/deduplication.rs @@ -15,6 +15,7 @@ use tower::BoxError; use tower::Layer; use tower::ServiceExt; +use crate::batching::BatchQuery; use crate::graphql::Request; use crate::http_ext; use crate::plugins::authorization::CacheKeyMetadata; @@ -73,6 +74,18 @@ where wait_map: WaitMap, request: SubgraphRequest, ) -> Result { + // Check if the request is part of a batch. If it is, completely bypass dedup since it + // will break any request batches which this request is part of. + // This check is what enables Batching and Dedup to work together, so be very careful + // before making any changes to it. + if request + .context + .extensions() + .lock() + .contains_key::() + { + return service.ready_oneshot().await?.call(request).await; + } loop { let mut locked_wait_map = wait_map.lock().await; let authorization_cache_key = request.authorization.clone(); diff --git a/apollo-router/src/query_planner/plan.rs b/apollo-router/src/query_planner/plan.rs index 06e3519484..3ff16b0f5d 100644 --- a/apollo-router/src/query_planner/plan.rs +++ b/apollo-router/src/query_planner/plan.rs @@ -10,10 +10,12 @@ use serde::Serialize; pub(crate) use self::fetch::OperationKind; use super::fetch; use super::subscription::SubscriptionNode; +use crate::error::CacheResolverError; use crate::json_ext::Object; use crate::json_ext::Path; use crate::json_ext::Value; use crate::plugins::authorization::CacheKeyMetadata; +use crate::query_planner::fetch::QueryHash; use crate::spec::Query; /// A planner key. @@ -192,6 +194,65 @@ impl PlanNode { } } + /// Iteratively populate a Vec of QueryHashes representing Fetches in this plan. + /// + /// Do not include any operations which contain "requires" elements. + /// + /// This function is specifically designed to be used within the context of simple batching. It + /// explicitly fails if nodes which should *not* be encountered within that context are + /// encountered. e.g.: PlanNode::Defer + /// + /// It's unlikely/impossible that PlanNode::Defer or PlanNode::Subscription will ever be + /// supported, but it may be that PlanNode::Condition must eventually be supported (or other + /// new nodes types that are introduced). Explicitly fail each type to provide extra error + /// details and don't use _ so that future node types must be handled here. + pub(crate) fn query_hashes(&self) -> Result>, CacheResolverError> { + let mut query_hashes = vec![]; + let mut new_targets = vec![self]; + + loop { + let targets = new_targets; + if targets.is_empty() { + break; + } + + new_targets = vec![]; + for target in targets { + match target { + PlanNode::Sequence { nodes } | PlanNode::Parallel { nodes } => { + new_targets.extend(nodes); + } + PlanNode::Fetch(node) => { + // If requires.is_empty() we can batch it! + if node.requires.is_empty() { + query_hashes.push(node.schema_aware_hash.clone()); + } + } + PlanNode::Flatten(node) => new_targets.push(&node.node), + PlanNode::Defer { .. } => { + return Err(CacheResolverError::BatchingError( + "unexpected defer node encountered during query_hash processing" + .to_string(), + )) + } + PlanNode::Subscription { .. } => { + return Err(CacheResolverError::BatchingError( + "unexpected subscription node encountered during query_hash processing" + .to_string(), + )) + } + PlanNode::Condition { .. } => { + return Err(CacheResolverError::BatchingError( + "unexpected condition node encountered during query_hash processing" + .to_string(), + )) + } + } + } + } + Ok(query_hashes) + } + pub(crate) fn subgraph_fetches(&self) -> usize { match self { PlanNode::Sequence { nodes } => nodes.iter().map(|n| n.subgraph_fetches()).sum(), diff --git a/apollo-router/src/request.rs b/apollo-router/src/request.rs index eba779575b..1e51262dbf 100644 --- a/apollo-router/src/request.rs +++ b/apollo-router/src/request.rs @@ -177,8 +177,8 @@ impl Request { /// Convert Bytes into a GraphQL [`Request`]. /// - /// An error will be produced in the event that the query string parameters - /// cannot be turned into a valid GraphQL `Request`. + /// An error will be produced in the event that the bytes array cannot be + /// turned into a valid GraphQL `Request`. pub(crate) fn batch_from_bytes(bytes: &[u8]) -> Result, serde_json::Error> { let value: serde_json::Value = serde_json::from_slice(bytes).map_err(serde_json::Error::custom)?; diff --git a/apollo-router/src/response.rs b/apollo-router/src/response.rs index ad0da7f268..320b4c849d 100644 --- a/apollo-router/src/response.rs +++ b/apollo-router/src/response.rs @@ -102,12 +102,18 @@ impl Response { service: service_name.to_string(), reason: error.to_string(), })?; - let mut object = + let object = ensure_object!(value).map_err(|error| FetchError::SubrequestMalformedResponse { service: service_name.to_string(), reason: error.to_string(), })?; + Response::from_object(service_name, object) + } + pub(crate) fn from_object( + service_name: &str, + mut object: Object, + ) -> Result { let data = object.remove("data"); let errors = extract_key_value_from_object!(object, "errors", Value::Array(v) => v) .map_err(|err| FetchError::SubrequestMalformedResponse { diff --git a/apollo-router/src/services/router/service.rs b/apollo-router/src/services/router/service.rs index 2591fc3aca..837b14056f 100644 --- a/apollo-router/src/services/router/service.rs +++ b/apollo-router/src/services/router/service.rs @@ -36,6 +36,8 @@ use tracing::Instrument; use super::ClientRequestAccepts; use crate::axum_factory::CanceledRequest; +use crate::batching::Batch; +use crate::batching::BatchQuery; use crate::cache::DeduplicatingCache; use crate::configuration::Batching; use crate::configuration::BatchingMode; @@ -91,7 +93,7 @@ pub(crate) struct RouterService { persisted_query_layer: Arc, query_analysis_layer: QueryAnalysisLayer, http_max_request_bytes: usize, - experimental_batching: Batching, + batching: Batching, } impl RouterService { @@ -101,7 +103,7 @@ impl RouterService { persisted_query_layer: Arc, query_analysis_layer: QueryAnalysisLayer, http_max_request_bytes: usize, - experimental_batching: Batching, + batching: Batching, ) -> Self { RouterService { supergraph_creator, @@ -109,7 +111,7 @@ impl RouterService { persisted_query_layer, query_analysis_layer, http_max_request_bytes, - experimental_batching, + batching, } } } @@ -398,7 +400,7 @@ impl RouterService { async fn call_inner(&self, req: RouterRequest) -> Result { let context = req.context.clone(); - let supergraph_requests = match self.translate_request(req).await { + let (supergraph_requests, is_batch) = match self.translate_request(req).await { Ok(requests) => requests, Err(err) => { u64_counter!( @@ -424,22 +426,47 @@ impl RouterService { } }; + // We need to handle cases where a failure is part of a batch and thus must be cancelled. + // Requests can be cancelled at any point of the router pipeline, but all failures bubble back + // up through here, so we can catch them without having to specially handle batch queries in + // other portions of the codebase. let futures = supergraph_requests .into_iter() - .map(|supergraph_request| self.process_supergraph_request(supergraph_request)); + .map(|supergraph_request| async { + // We clone the context here, because if the request results in an Err, the + // response context will no longer exist. + let context = supergraph_request.context.clone(); + let result = self.process_supergraph_request(supergraph_request).await; + + // Regardless of the result, we need to make sure that we cancel any potential batch queries. This is because + // custom rust plugins, rhai scripts, and coprocessors can cancel requests at any time and return a GraphQL + // error wrapped in an `Ok` or in a `BoxError` wrapped in an `Err`. + let batch_query_opt = context.extensions().lock().remove::(); + if let Some(batch_query) = batch_query_opt { + // Only proceed with signalling cancelled if the batch_query is not finished + if !batch_query.finished() { + tracing::debug!("cancelling batch query in supergraph response"); + batch_query + .signal_cancelled("request terminated by user".to_string()) + .await?; + } + } + + result + }); // Use join_all to preserve ordering of concurrent operations // (Short circuit processing and propagate any errors in the batch) + // Note: We use `join_all` here since it awaits all futures before returning, thus allowing us to + // handle cancellation logic without fear of the other futures getting killed. let mut results: Vec = join_all(futures) .await .into_iter() .collect::, BoxError>>()?; - // If we only have one result, go ahead and return it. Otherwise, create a new result - // which is an array of all results. - if results.len() == 1 { - Ok(results.pop().expect("we should have at least one response")) - } else { + // If we detected we are processing a batch, return an array of results even if there is only + // one result + if is_batch { let mut results_it = results.into_iter(); let first = results_it .next() @@ -459,13 +486,16 @@ impl RouterService { response: http::Response::from_parts(parts, Body::from(bytes.freeze())), context, }) + } else { + Ok(results.pop().expect("we should have at least one response")) } } async fn translate_query_request( &self, parts: &Parts, - ) -> Result, TranslateError> { + ) -> Result<(Vec, bool), TranslateError> { + let mut is_batch = false; parts.uri.query().map(|q| { let mut result = vec![]; @@ -476,8 +506,8 @@ impl RouterService { Err(err) => { // It may be a batch of requests, so try that (if config allows) before // erroring out - if self.experimental_batching.enabled - && matches!(self.experimental_batching.mode, BatchingMode::BatchHttpLink) + if self.batching.enabled + && matches!(self.batching.mode, BatchingMode::BatchHttpLink) { result = graphql::Request::batch_from_urlencoded_query(q.to_string()) .map_err(|e| TranslateError { @@ -488,10 +518,11 @@ impl RouterService { "failed to decode a valid GraphQL request from path {e}" ), })?; + is_batch = true; } else if !q.is_empty() && q.as_bytes()[0] == b'[' { - let extension_details = if self.experimental_batching.enabled - && !matches!(self.experimental_batching.mode, BatchingMode::BatchHttpLink) { - format!("batching not supported for mode `{}`", self.experimental_batching.mode) + let extension_details = if self.batching.enabled + && !matches!(self.batching.mode, BatchingMode::BatchHttpLink) { + format!("batching not supported for mode `{}`", self.batching.mode) } else { "batching not enabled".to_string() }; @@ -513,7 +544,7 @@ impl RouterService { } } }; - Ok(result) + Ok((result, is_batch)) }).unwrap_or_else(|| { Err(TranslateError { status: StatusCode::BAD_REQUEST, @@ -527,16 +558,17 @@ impl RouterService { fn translate_bytes_request( &self, bytes: &Bytes, - ) -> Result, TranslateError> { + ) -> Result<(Vec, bool), TranslateError> { let mut result = vec![]; + let mut is_batch = false; match graphql::Request::deserialize_from_bytes(bytes) { Ok(request) => { result.push(request); } Err(err) => { - if self.experimental_batching.enabled - && matches!(self.experimental_batching.mode, BatchingMode::BatchHttpLink) + if self.batching.enabled + && matches!(self.batching.mode, BatchingMode::BatchHttpLink) { result = graphql::Request::batch_from_bytes(bytes).map_err(|e| TranslateError { @@ -547,14 +579,12 @@ impl RouterService { "failed to deserialize the request body into JSON: {e}" ), })?; + is_batch = true; } else if !bytes.is_empty() && bytes[0] == b'[' { - let extension_details = if self.experimental_batching.enabled - && !matches!(self.experimental_batching.mode, BatchingMode::BatchHttpLink) + let extension_details = if self.batching.enabled + && !matches!(self.batching.mode, BatchingMode::BatchHttpLink) { - format!( - "batching not supported for mode `{}`", - self.experimental_batching.mode - ) + format!("batching not supported for mode `{}`", self.batching.mode) } else { "batching not enabled".to_string() }; @@ -576,13 +606,13 @@ impl RouterService { } } }; - Ok(result) + Ok((result, is_batch)) } async fn translate_request( &self, req: RouterRequest, - ) -> Result, TranslateError> { + ) -> Result<(Vec, bool), TranslateError> { let RouterRequest { router_request, context, @@ -590,7 +620,8 @@ impl RouterService { let (parts, body) = router_request.into_parts(); - let graphql_requests: Result, TranslateError> = if parts.method + let graphql_requests: Result<(Vec, bool), TranslateError> = if parts + .method == Method::GET { self.translate_query_request(&parts).await @@ -640,15 +671,28 @@ impl RouterService { } }; - let ok_results = graphql_requests?; + let (ok_results, is_batch) = graphql_requests?; let mut results = Vec::with_capacity(ok_results.len()); + let batch_size = ok_results.len(); - if ok_results.len() > 1 { - context - .extensions() - .lock() - .insert(self.experimental_batching.clone()); - } + // Modifying our Context extensions. + // If we are processing a batch (is_batch == true), insert our batching configuration. + // If subgraph batching configuration exists and is enabled for any of our subgraphs, we create our shared batch details + let shared_batch_details = (is_batch) + .then(|| { + context.extensions().lock().insert(self.batching.clone()); + + self.batching.subgraph.as_ref() + }) + .flatten() + .map(|subgraph_batching_config| { + subgraph_batching_config.all.enabled + || subgraph_batching_config + .subgraphs + .values() + .any(|v| v.enabled) + }) + .and_then(|a| a.then_some(Arc::new(Batch::spawn_handler(batch_size)))); let mut ok_results_it = ok_results.into_iter(); let first = ok_results_it @@ -661,16 +705,17 @@ impl RouterService { // through the pipeline. This is because there is simply no way to clone http // extensions. // - // Secondly, we can't clone private_entries, but we need to propagate at least + // Secondly, we can't clone extensions, but we need to propagate at least // ClientRequestAccepts to ensure correct processing of the response. We do that manually, - // but the concern is that there may be other private_entries that wish to propagate into + // but the concern is that there may be other extensions that wish to propagate into // each request or we may add them in future and not know about it here... // - // (Technically we could clone private entries, since it is held under an `Arc`, but that - // would mean all the requests in a batch shared the same set of private entries and review + // (Technically we could clone extensions, since it is held under an `Arc`, but that + // would mean all the requests in a batch shared the same set of extensions and review // comments expressed the sentiment that this may be a bad thing...) // - for graphql_request in ok_results_it { + // Note: If we enter this loop, then we must be processing a batch. + for (index, graphql_request) in ok_results_it.enumerate() { // XXX Lose http extensions, is that ok? let mut new = http_ext::clone_http_request(&sg); *new.body_mut() = graphql_request; @@ -682,22 +727,45 @@ impl RouterService { .lock() .get::() .cloned(); - if let Some(client_request_accepts) = client_request_accepts_opt { - new_context - .extensions() - .lock() - .insert(client_request_accepts); + // Sub-scope so that new_context_guard is dropped before pushing into the new + // SupergraphRequest + { + let mut new_context_guard = new_context.extensions().lock(); + if let Some(client_request_accepts) = client_request_accepts_opt { + new_context_guard.insert(client_request_accepts); + } + new_context_guard.insert(self.batching.clone()); + // We are only going to insert a BatchQuery if Subgraph processing is enabled + if let Some(shared_batch_details) = &shared_batch_details { + new_context_guard.insert( + Batch::query_for_index(shared_batch_details.clone(), index + 1).map_err( + |err| TranslateError { + status: StatusCode::INTERNAL_SERVER_ERROR, + error: "failed to create batch", + extension_code: "BATCHING_ERROR", + extension_details: format!("failed to create batch entry: {err}"), + }, + )?, + ); + } } - new_context - .extensions() - .lock() - .insert(self.experimental_batching.clone()); results.push(SupergraphRequest { supergraph_request: new, - // Build a new context. Cloning would cause issues. context: new_context, }); } + + if let Some(shared_batch_details) = shared_batch_details { + context.extensions().lock().insert( + Batch::query_for_index(shared_batch_details, 0).map_err(|err| TranslateError { + status: StatusCode::INTERNAL_SERVER_ERROR, + error: "failed to create batch", + extension_code: "BATCHING_ERROR", + extension_details: format!("failed to create batch entry: {err}"), + })?, + ); + } + results.insert( 0, SupergraphRequest { @@ -705,7 +773,8 @@ impl RouterService { context, }, ); - Ok(results) + + Ok((results, is_batch)) } fn count_errors(errors: &[graphql::Error]) { @@ -756,7 +825,7 @@ pub(crate) struct RouterCreator { pub(crate) persisted_query_layer: Arc, query_analysis_layer: QueryAnalysisLayer, http_max_request_bytes: usize, - experimental_batching: Batching, + batching: Batching, } impl ServiceFactory for RouterCreator { @@ -807,7 +876,7 @@ impl RouterCreator { query_analysis_layer, http_max_request_bytes: configuration.limits.http_max_request_bytes, persisted_query_layer, - experimental_batching: configuration.experimental_batching.clone(), + batching: configuration.batching.clone(), }) } @@ -825,7 +894,7 @@ impl RouterCreator { self.persisted_query_layer.clone(), self.query_analysis_layer.clone(), self.http_max_request_bytes, - self.experimental_batching.clone(), + self.batching.clone(), )); ServiceBuilder::new() diff --git a/apollo-router/src/services/router/tests.rs b/apollo-router/src/services/router/tests.rs index 884b5d0c3a..3e58e50b44 100644 --- a/apollo-router/src/services/router/tests.rs +++ b/apollo-router/src/services/router/tests.rs @@ -305,7 +305,7 @@ async fn it_processes_a_valid_query_batch() { hyper::Body::from(result) }); let config = serde_json::json!({ - "experimental_batching": { + "batching": { "enabled": true, "mode" : "batch_http_link" } @@ -394,7 +394,7 @@ async fn it_will_not_process_a_poorly_formatted_query_batch() { hyper::Body::from(result) }); let config = serde_json::json!({ - "experimental_batching": { + "batching": { "enabled": true, "mode" : "batch_http_link" } @@ -448,7 +448,7 @@ async fn it_will_process_a_non_batched_defered_query() { hyper::Body::from(bytes) }); let config = serde_json::json!({ - "experimental_batching": { + "batching": { "enabled": true, "mode" : "batch_http_link" } @@ -508,7 +508,7 @@ async fn it_will_not_process_a_batched_deferred_query() { hyper::Body::from(result) }); let config = serde_json::json!({ - "experimental_batching": { + "batching": { "enabled": true, "mode" : "batch_http_link" } diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index 048541c32e..e5666544f4 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -18,12 +18,14 @@ use http::HeaderValue; use http::Request; use hyper::Body; use hyper_rustls::ConfigBuilderExt; +use itertools::Itertools; use mediatype::names::APPLICATION; use mediatype::names::JSON; use mediatype::MediaType; use mime::APPLICATION_JSON; use rustls::RootCertStore; use serde::Serialize; +use tokio::sync::oneshot; use tokio_tungstenite::connect_async; use tokio_tungstenite::connect_async_tls_with_config; use tokio_tungstenite::tungstenite::client::IntoClientRequest; @@ -31,6 +33,7 @@ use tower::util::BoxService; use tower::BoxError; use tower::Service; use tower::ServiceExt; +use tracing::instrument; use tracing::Instrument; use uuid::Uuid; @@ -38,8 +41,14 @@ use super::http::HttpClientServiceFactory; use super::http::HttpRequest; use super::layers::content_negotiation::GRAPHQL_JSON_RESPONSE_HEADER_VALUE; use super::Plugins; +use crate::batching::assemble_batch; +use crate::batching::BatchQuery; +use crate::batching::BatchQueryInfo; +use crate::configuration::Batching; +use crate::configuration::BatchingMode; use crate::configuration::TlsClientAuth; use crate::error::FetchError; +use crate::error::SubgraphBatchingError; use crate::graphql; use crate::json_ext::Object; use crate::plugins::authentication::subgraph::SigningParamsConfig; @@ -122,7 +131,7 @@ impl SubgraphService { service: impl Into, configuration: &Configuration, subscription_config: Option, - client_factory: crate::services::http::HttpClientServiceFactory, + client_factory: HttpClientServiceFactory, ) -> Result { let name: String = service.into(); @@ -233,6 +242,7 @@ impl tower::Service for SubgraphService { let arc_apq_enabled = self.apq.clone(); let mut notify = self.notify.clone(); + let make_calls = async move { // Subscription handling if request.operation_kind == OperationKind::Subscription @@ -355,13 +365,18 @@ impl tower::Service for SubgraphService { } } - let client = client_factory.create(&service_name); - // If APQ is not enabled, simply make the graphql call // with the same request body. let apq_enabled = arc_apq_enabled.as_ref(); if !apq_enabled.load(Relaxed) { - return call_http(request, body, context, client, &service_name).await; + return call_http( + request, + body, + context, + client_factory.clone(), + &service_name, + ) + .await; } // Else, if APQ is enabled, @@ -395,7 +410,7 @@ impl tower::Service for SubgraphService { request.clone(), apq_body.clone(), context.clone(), - client_factory.create(&service_name), + client_factory.clone(), &service_name, ) .await?; @@ -408,11 +423,25 @@ impl tower::Service for SubgraphService { match get_apq_error(gql_response) { APQError::PersistedQueryNotSupported => { apq_enabled.store(false, Relaxed); - call_http(request, body, context, client, &service_name).await + call_http( + request, + body, + context, + client_factory.clone(), + &service_name, + ) + .await } APQError::PersistedQueryNotFound => { apq_body.query = query; - call_http(request, apq_body, context, client, &service_name).await + call_http( + request, + apq_body, + context, + client_factory.clone(), + &service_name, + ) + .await } _ => Ok(response), } @@ -607,39 +636,10 @@ async fn call_websocket( )) } -/// call_http makes http calls with modified graphql::Request (body) -async fn call_http( - request: SubgraphRequest, - body: graphql::Request, - context: Context, - client: crate::services::http::BoxService, - service_name: &str, -) -> Result { - let SubgraphRequest { - subgraph_request, .. - } = request; - - let operation_name = subgraph_request - .body() - .operation_name - .clone() - .unwrap_or_default(); - - let (parts, _) = subgraph_request.into_parts(); - let body = serde_json::to_string(&body).expect("JSON serialization should not fail"); - let mut request = http::Request::from_parts(parts, Body::from(body)); - - request - .headers_mut() - .insert(CONTENT_TYPE, APPLICATION_JSON_HEADER_VALUE.clone()); - request - .headers_mut() - .append(ACCEPT, ACCEPT_GRAPHQL_JSON.clone()); - - let schema_uri = request.uri(); - let host = schema_uri.host().unwrap_or_default(); - let port = schema_uri.port_u16().unwrap_or_else(|| { - let scheme = schema_uri.scheme_str(); +// Utility function to extract uri details. +fn get_uri_details(uri: &hyper::Uri) -> (&str, u16, &str) { + let port = uri.port_u16().unwrap_or_else(|| { + let scheme = uri.scheme_str(); if scheme == Some("https") { 443 } else if scheme == Some("http") { @@ -649,52 +649,16 @@ async fn call_http( } }); - let path = schema_uri.path(); - - let subgraph_req_span = tracing::info_span!("subgraph_request", - "otel.kind" = "CLIENT", - "net.peer.name" = %host, - "net.peer.port" = %port, - "http.route" = %path, - "http.url" = %schema_uri, - "net.transport" = "ip_tcp", - "apollo.subgraph.name" = %service_name, - "graphql.operation.name" = %operation_name, - ); - - // The graphql spec is lax about what strategy to use for processing responses: https://github.com/graphql/graphql-over-http/blob/main/spec/GraphQLOverHTTP.md#processing-the-response - // - // "If the response uses a non-200 status code and the media type of the response payload is application/json - // then the client MUST NOT rely on the body to be a well-formed GraphQL response since the source of the response - // may not be the server but instead some intermediary such as API gateways, proxies, firewalls, etc." - // - // The TLDR of this is that it's really asking us to do the best we can with whatever information we have with some modifications depending on content type. - // Our goal is to give the user the most relevant information possible in the response errors - // - // Rules: - // 1. If the content type of the response is not `application/json` or `application/graphql-response+json` then we won't try to parse. - // 2. If an HTTP status is not 2xx it will always be attached as a graphql error. - // 3. If the response type is `application/json` and status is not 2xx and the body the entire body will be output if the response is not valid graphql. - - let display_body = context.contains_key(LOGGING_DISPLAY_BODY); - - // TODO: Temporary solution to plug FileUploads plugin until 'http_client' will be fixed https://github.com/apollographql/router/pull/4666 - let request = file_uploads::http_request_wrapper(request).await; - - // Perform the actual fetch. If this fails then we didn't manage to make the call at all, so we can't do anything with it. - let (parts, content_type, body) = - do_fetch(client, &context, service_name, request, display_body) - .instrument(subgraph_req_span) - .await?; - - if display_body { - if let Some(Ok(b)) = &body { - tracing::info!( - response.body = %String::from_utf8_lossy(b), apollo.subgraph.name = %service_name, "Raw response body from subgraph {service_name:?} received" - ); - } - } + (uri.host().unwrap_or_default(), port, uri.path()) +} +// Utility function to create a graphql response from HTTP response components +fn http_response_to_graphql_response( + service_name: &str, + content_type: Result, + body: Option>, + parts: &Parts, +) -> graphql::Response { let mut graphql_response = match (content_type, body, parts.status.is_success()) { (Ok(ContentType::ApplicationGraphqlResponseJson), Some(Ok(body)), _) | (Ok(ContentType::ApplicationJson), Some(Ok(body)), true) => { @@ -761,11 +725,409 @@ async fn call_http( .to_graphql_error(None), ) } + graphql_response +} + +/// Process a single subgraph batch request +#[instrument(skip(client_factory, context, request))] +pub(crate) async fn process_batch( + client_factory: HttpClientServiceFactory, + service: String, + context: Context, + mut request: http::Request, + listener_count: usize, +) -> Result, FetchError> { + // Now we need to "batch up" our data and send it to our subgraphs + request + .headers_mut() + .insert(CONTENT_TYPE, APPLICATION_JSON_HEADER_VALUE.clone()); + request + .headers_mut() + .append(ACCEPT, ACCEPT_GRAPHQL_JSON.clone()); + + let schema_uri = request.uri(); + let (host, port, path) = get_uri_details(schema_uri); + + // We can't provide a single operation name in the span (since we may be processing multiple + // operations). Product decision, use the hard coded value "batch". + let subgraph_req_span = tracing::info_span!("subgraph_request", + "otel.kind" = "CLIENT", + "net.peer.name" = %host, + "net.peer.port" = %port, + "http.route" = %path, + "http.url" = %schema_uri, + "net.transport" = "ip_tcp", + "apollo.subgraph.name" = %&service, + "graphql.operation.name" = "batch" + ); + + // The graphql spec is lax about what strategy to use for processing responses: https://github.com/graphql/graphql-over-http/blob/main/spec/GraphQLOverHTTP.md#processing-the-response + // + // "If the response uses a non-200 status code and the media type of the response payload is application/json + // then the client MUST NOT rely on the body to be a well-formed GraphQL response since the source of the response + // may not be the server but instead some intermediary such as API gateways, proxies, firewalls, etc." + // + // The TLDR of this is that it's really asking us to do the best we can with whatever information we have with some modifications depending on content type. + // Our goal is to give the user the most relevant information possible in the response errors + // + // Rules: + // 1. If the content type of the response is not `application/json` or `application/graphql-response+json` then we won't try to parse. + // 2. If an HTTP status is not 2xx it will always be attached as a graphql error. + // 3. If the response type is `application/json` and status is not 2xx and the body the entire body will be output if the response is not valid graphql. + + let display_body = context.contains_key(LOGGING_DISPLAY_BODY); + let client = client_factory.create(&service); + + // Update our batching metrics (just before we fetch) + tracing::info!(histogram.apollo.router.operations.batching.size = listener_count as f64, + mode = %BatchingMode::BatchHttpLink, // Only supported mode right now + subgraph = &service + ); + + tracing::info!(monotonic_counter.apollo.router.operations.batching = 1u64, + mode = %BatchingMode::BatchHttpLink, // Only supported mode right now + subgraph = &service + ); + + // Perform the actual fetch. If this fails then we didn't manage to make the call at all, so we can't do anything with it. + tracing::debug!("fetching from subgraph: {service}"); + let (parts, content_type, body) = do_fetch(client, &context, &service, request, display_body) + .instrument(subgraph_req_span) + .await?; + + if display_body { + if let Some(Ok(b)) = &body { + tracing::info!( + response.body = %String::from_utf8_lossy(b), apollo.subgraph.name = %&service, "Raw response body from subgraph {service:?} received" + ); + } + } + + tracing::debug!("parts: {parts:?}, content_type: {content_type:?}, body: {body:?}"); + let value = + serde_json::from_slice(&body.ok_or(FetchError::SubrequestMalformedResponse { + service: service.to_string(), + reason: "no body in response".to_string(), + })??) + .map_err(|error| FetchError::SubrequestMalformedResponse { + service: service.to_string(), + reason: error.to_string(), + })?; + + tracing::debug!("json value from body is: {value:?}"); + + let array = ensure_array!(value).map_err(|error| FetchError::SubrequestMalformedResponse { + service: service.to_string(), + reason: error.to_string(), + })?; + let mut graphql_responses = Vec::with_capacity(array.len()); + for value in array { + let object = + ensure_object!(value).map_err(|error| FetchError::SubrequestMalformedResponse { + service: service.to_string(), + reason: error.to_string(), + })?; + + // Map our Vec into Bytes + // Map our serde conversion error to a FetchError + let body = Some( + serde_json::to_vec(&object) + .map(|v| v.into()) + .map_err(|error| FetchError::SubrequestMalformedResponse { + service: service.to_string(), + reason: error.to_string(), + }), + ); + + let graphql_response = + http_response_to_graphql_response(&service, content_type.clone(), body, &parts); + graphql_responses.push(graphql_response); + } + + tracing::debug!("we have a vec of graphql_responses: {graphql_responses:?}"); + // Build an http Response for each graphql response + let subgraph_responses: Result, _> = graphql_responses + .into_iter() + .map(|res| { + http::Response::builder() + .status(parts.status) + .version(parts.version) + .body(res) + .map(|mut http_res| { + *http_res.headers_mut() = parts.headers.clone(); + let resp = SubgraphResponse::new_from_response(http_res, context.clone()); + + tracing::debug!("we have a resp: {resp:?}"); + resp + }) + .map_err(|e| FetchError::MalformedResponse { + reason: e.to_string(), + }) + }) + .collect(); + + tracing::debug!("we have a vec of subgraph_responses: {subgraph_responses:?}"); + subgraph_responses +} + +/// Notify all listeners of a batch query of the results +pub(crate) async fn notify_batch_query( + service: String, + senders: Vec>>, + responses: Result, FetchError>, +) -> Result<(), BoxError> { + tracing::debug!( + "handling response for service '{service}' with {} listeners: {responses:#?}", + senders.len() + ); + + match responses { + // If we had an error processing the batch, then pipe that error to all of the listeners + Err(e) => { + for tx in senders { + // Try to notify all waiters. If we can't notify an individual sender, then log an error + if let Err(log_error) = tx.send(Err(Box::new(e.clone()))).map_err(|error| { + FetchError::SubrequestBatchingError { + service: service.clone(), + reason: format!("tx send failed: {error:?}"), + } + }) { + tracing::error!(service, error=%log_error, "failed to notify sender that batch processing failed"); + } + } + } + + Ok(rs) => { + // Before we process our graphql responses, ensure that we have a tx for each + // response + if senders.len() != rs.len() { + return Err(Box::new(FetchError::SubrequestBatchingError { + service, + reason: format!( + "number of txs ({}) is not equal to number of graphql responses ({})", + senders.len(), + rs.len() + ), + })); + } + + // We have checked before we started looping that we had a tx for every + // graphql_response, so zip_eq shouldn't panic. + // Use the tx to send a graphql_response message to each waiter. + for (response, sender) in rs.into_iter().zip_eq(senders) { + if let Err(log_error) = + sender + .send(Ok(response)) + .map_err(|error| FetchError::SubrequestBatchingError { + service: service.to_string(), + reason: format!("tx send failed: {error:?}"), + }) + { + tracing::error!(service, error=%log_error, "failed to notify sender that batch processing succeeded"); + } + } + } + } + + Ok(()) +} + +type BatchInfo = ( + (String, http::Request, Context, usize), + Vec>>, +); + +/// Collect all batch requests and process them concurrently +#[instrument(skip_all)] +pub(crate) async fn process_batches( + client_factory: HttpClientServiceFactory, + svc_map: HashMap>, +) -> Result<(), BoxError> { + // We need to strip out the senders so that we can work with them separately. + let mut errors = vec![]; + let (info, txs): (Vec<_>, Vec<_>) = + futures::future::join_all(svc_map.into_iter().map(|(service, requests)| async { + let (_op_name, context, request, txs) = assemble_batch(requests).await?; + + Ok(((service, request, context, txs.len()), txs)) + })) + .await + .into_iter() + .filter_map(|x: Result| x.map_err(|e| errors.push(e)).ok()) + .unzip(); + + // If errors isn't empty, then process_batches cannot proceed. Let's log out the errors and + // return + if !errors.is_empty() { + for error in errors { + tracing::error!("assembling batch failed: {error}"); + } + return Err(SubgraphBatchingError::ProcessingFailed( + "assembling batches failed".to_string(), + ) + .into()); + } + // Collect all of the processing logic and run them concurrently, collecting all errors + let cf = &client_factory; + // It is not ok to panic if the length of the txs and info do not match. Let's make sure they + // do + if txs.len() != info.len() { + return Err(SubgraphBatchingError::ProcessingFailed( + "length of txs and info are not equal".to_string(), + ) + .into()); + } + let batch_futures = info.into_iter().zip_eq(txs).map( + |((service, request, context, listener_count), senders)| async move { + let batch_result = process_batch( + cf.clone(), + service.clone(), + context, + request, + listener_count, + ) + .await; + + notify_batch_query(service, senders, batch_result).await + }, + ); + + futures::future::try_join_all(batch_futures).await?; + + Ok(()) +} + +async fn call_http( + request: SubgraphRequest, + body: graphql::Request, + context: Context, + client_factory: HttpClientServiceFactory, + service_name: &str, +) -> Result { + // We use configuration to determine if calls may be batched. If we have Batching + // configuration, then we check (batch_include()) if the current subgraph has batching enabled + // in configuration. If it does, we then start to process a potential batch. + // + // If we are processing a batch, then we'd like to park tasks here, but we can't park them whilst + // we have the context extensions lock held. That would be very bad... + // We grab the (potential) BatchQuery and then operate on it later + let opt_batch_query = { + let extensions_guard = context.extensions().lock(); + + // We need to make sure to remove the BatchQuery from the context as it holds a sender to + // the owning batch + extensions_guard + .get::() + .and_then(|batching_config| batching_config.batch_include(service_name).then_some(())) + .and_then(|_| extensions_guard.get::().cloned()) + .and_then(|bq| (!bq.finished()).then_some(bq)) + }; + + // If we have a batch query, then it's time for batching + if let Some(query) = opt_batch_query { + // Let the owning batch know that this query is ready to process, getting back the channel + // from which we'll eventually receive our response. + let response_rx = query.signal_progress(client_factory, request, body).await?; + + // Park this query until we have our response and pass it back up + response_rx + .await + .map_err(|err| FetchError::SubrequestBatchingError { + service: service_name.to_string(), + reason: format!("tx receive failed: {err}"), + })? + } else { + tracing::debug!("we called http"); + let client = client_factory.create(service_name); + call_single_http(request, body, context, client, service_name).await + } +} + +/// call_single_http makes http calls with modified graphql::Request (body) +pub(crate) async fn call_single_http( + request: SubgraphRequest, + body: graphql::Request, + context: Context, + client: crate::services::http::BoxService, + service_name: &str, +) -> Result { + let SubgraphRequest { + subgraph_request, .. + } = request; + + let operation_name = subgraph_request + .body() + .operation_name + .clone() + .unwrap_or_default(); + + let (parts, _) = subgraph_request.into_parts(); + let body = serde_json::to_string(&body)?; + tracing::debug!("our JSON body: {body:?}"); + let mut request = http::Request::from_parts(parts, Body::from(body)); + + request + .headers_mut() + .insert(CONTENT_TYPE, APPLICATION_JSON_HEADER_VALUE.clone()); + request + .headers_mut() + .append(ACCEPT, ACCEPT_GRAPHQL_JSON.clone()); + + let schema_uri = request.uri(); + let (host, port, path) = get_uri_details(schema_uri); + + let subgraph_req_span = tracing::info_span!("subgraph_request", + "otel.kind" = "CLIENT", + "net.peer.name" = %host, + "net.peer.port" = %port, + "http.route" = %path, + "http.url" = %schema_uri, + "net.transport" = "ip_tcp", + "apollo.subgraph.name" = %service_name, + "graphql.operation.name" = %operation_name, + ); + + // The graphql spec is lax about what strategy to use for processing responses: https://github.com/graphql/graphql-over-http/blob/main/spec/GraphQLOverHTTP.md#processing-the-response + // + // "If the response uses a non-200 status code and the media type of the response payload is application/json + // then the client MUST NOT rely on the body to be a well-formed GraphQL response since the source of the response + // may not be the server but instead some intermediary such as API gateways, proxies, firewalls, etc." + // + // The TLDR of this is that it's really asking us to do the best we can with whatever information we have with some modifications depending on content type. + // Our goal is to give the user the most relevant information possible in the response errors + // + // Rules: + // 1. If the content type of the response is not `application/json` or `application/graphql-response+json` then we won't try to parse. + // 2. If an HTTP status is not 2xx it will always be attached as a graphql error. + // 3. If the response type is `application/json` and status is not 2xx and the body the entire body will be output if the response is not valid graphql. + + let display_body = context.contains_key(LOGGING_DISPLAY_BODY); + + // TODO: Temporary solution to plug FileUploads plugin until 'http_client' will be fixed https://github.com/apollographql/router/pull/4666 + let request = file_uploads::http_request_wrapper(request).await; + + // Perform the actual fetch. If this fails then we didn't manage to make the call at all, so we can't do anything with it. + let (parts, content_type, body) = + do_fetch(client, &context, service_name, request, display_body) + .instrument(subgraph_req_span) + .await?; + + if display_body { + if let Some(Ok(b)) = &body { + tracing::info!( + response.body = %String::from_utf8_lossy(b), apollo.subgraph.name = %service_name, "Raw response body from subgraph {service_name:?} received" + ); + } + } + + let graphql_response = + http_response_to_graphql_response(service_name, content_type, body, &parts); let resp = http::Response::from_parts(parts, graphql_response); Ok(SubgraphResponse::new_from_response(resp, context)) } +#[derive(Clone, Debug)] enum ContentType { ApplicationJson, ApplicationGraphqlResponseJson, @@ -2373,4 +2735,162 @@ mod tests { assert_eq!(resp.response.body(), &expected_resp); } + + #[test] + fn it_gets_uri_details() { + let path = "https://example.com/path".parse().unwrap(); + let (host, port, path) = super::get_uri_details(&path); + + assert_eq!(host, "example.com"); + assert_eq!(port, 443); + assert_eq!(path, "/path"); + } + + #[test] + fn it_converts_ok_http_to_graphql() { + let (parts, body) = http::Response::builder() + .status(StatusCode::OK) + .body(None) + .unwrap() + .into_parts(); + let actual = super::http_response_to_graphql_response( + "test_service", + Ok(ContentType::ApplicationGraphqlResponseJson), + body, + &parts, + ); + + let expected = graphql::Response::builder().build(); + assert_eq!(actual, expected); + } + + #[test] + fn it_converts_error_http_to_graphql() { + let (parts, body) = http::Response::builder() + .status(StatusCode::IM_A_TEAPOT) + .body(None) + .unwrap() + .into_parts(); + let actual = super::http_response_to_graphql_response( + "test_service", + Ok(ContentType::ApplicationGraphqlResponseJson), + body, + &parts, + ); + + let expected = graphql::Response::builder() + .error( + super::FetchError::SubrequestHttpError { + status_code: Some(418), + service: "test_service".into(), + reason: "418: I'm a teapot".into(), + } + .to_graphql_error(None), + ) + .build(); + assert_eq!(actual, expected); + } + + #[test] + fn it_converts_http_with_body_to_graphql() { + let mut json = serde_json::json!({ + "data": { + "some_field": "some_value" + } + }); + + let (parts, body) = http::Response::builder() + .status(StatusCode::OK) + .body(Some(Ok(Bytes::from(json.to_string())))) + .unwrap() + .into_parts(); + + let actual = super::http_response_to_graphql_response( + "test_service", + Ok(ContentType::ApplicationGraphqlResponseJson), + body, + &parts, + ); + + let expected = graphql::Response::builder() + .data(json["data"].take()) + .build(); + assert_eq!(actual, expected); + } + + #[test] + fn it_converts_http_with_graphql_errors_to_graphql() { + let error = graphql::Error::builder() + .message("error was encountered for test") + .extension_code("SOME_EXTENSION") + .build(); + let mut json = serde_json::json!({ + "data": { + "some_field": "some_value", + "error_field": null, + }, + "errors": [error], + }); + + let (parts, body) = http::Response::builder() + .status(StatusCode::OK) + .body(Some(Ok(Bytes::from(json.to_string())))) + .unwrap() + .into_parts(); + + let actual = super::http_response_to_graphql_response( + "test_service", + Ok(ContentType::ApplicationGraphqlResponseJson), + body, + &parts, + ); + + let expected = graphql::Response::builder() + .data(json["data"].take()) + .error(error) + .build(); + assert_eq!(actual, expected); + } + + #[test] + fn it_converts_error_http_with_graphql_errors_to_graphql() { + let error = graphql::Error::builder() + .message("error was encountered for test") + .extension_code("SOME_EXTENSION") + .build(); + let mut json = serde_json::json!({ + "data": { + "some_field": "some_value", + "error_field": null, + }, + "errors": [error], + }); + + let (parts, body) = http::Response::builder() + .status(StatusCode::IM_A_TEAPOT) + .body(Some(Ok(Bytes::from(json.to_string())))) + .unwrap() + .into_parts(); + + let actual = super::http_response_to_graphql_response( + "test_service", + Ok(ContentType::ApplicationGraphqlResponseJson), + body, + &parts, + ); + + let expected = graphql::Response::builder() + .data(json["data"].take()) + .error( + super::FetchError::SubrequestHttpError { + status_code: Some(418), + service: "test_service".into(), + reason: "418: I'm a teapot".into(), + } + .to_graphql_error(None), + ) + .error(error) + .build(); + assert_eq!(actual, expected); + } } diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index 3019a1f986..4ba4c85455 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -24,6 +24,7 @@ use tracing::field; use tracing::Span; use tracing_futures::Instrument; +use crate::batching::BatchQuery; use crate::configuration::Batching; use crate::context::OPERATION_NAME; use crate::error::CacheResolverError; @@ -617,19 +618,33 @@ async fn plan_query( .insert::(doc); } - planning + let qpr = planning .call( query_planner::CachingRequest::builder() .query(query_str) .and_operation_name(operation_name) - .context(context) + .context(context.clone()) .build(), ) .instrument(tracing::info_span!( QUERY_PLANNING_SPAN_NAME, "otel.kind" = "INTERNAL" )) - .await + .await?; + + let batching = context.extensions().lock().get::().cloned(); + if let Some(batch_query) = batching { + if let Some(QueryPlannerContent::Plan { plan, .. }) = &qpr.content { + let query_hashes = plan.root.query_hashes()?; + batch_query + .set_query_hashes(query_hashes) + .await + .map_err(|e| CacheResolverError::BatchingError(e.to_string()))?; + tracing::debug!("batch registered: {}", batch_query); + } + } + + Ok(qpr) } fn clone_supergraph_request( diff --git a/apollo-router/src/uplink/license_enforcement.rs b/apollo-router/src/uplink/license_enforcement.rs index cc33346818..50b6c410c6 100644 --- a/apollo-router/src/uplink/license_enforcement.rs +++ b/apollo-router/src/uplink/license_enforcement.rs @@ -382,6 +382,10 @@ impl LicenseEnforcementReport { .path("$.preview_file_uploads") .name("File uploads plugin") .build(), + ConfigurationRestriction::builder() + .path("$.batching") + .name("Batching support") + .build(), ConfigurationRestriction::builder() .path("$.experimental_demand_control") .name("Demand control plugin") diff --git a/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml b/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml index fdbb1ed4dd..238b8a00dd 100644 --- a/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml +++ b/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml @@ -1,4 +1,4 @@ -experimental_batching: +batching: enabled: true mode: batch_http_link rhai: @@ -28,5 +28,3 @@ telemetry: send_variable_values: only: - "sendValue" - - diff --git a/apollo-router/tests/fixtures/batching/all_enabled.router.yaml b/apollo-router/tests/fixtures/batching/all_enabled.router.yaml new file mode 100644 index 0000000000..24c9818562 --- /dev/null +++ b/apollo-router/tests/fixtures/batching/all_enabled.router.yaml @@ -0,0 +1,11 @@ +# Simple config to enable batching for all subgraphs + +batching: + enabled: true + mode: batch_http_link + subgraph: + all: + enabled: true + +include_subgraph_errors: + all: true diff --git a/apollo-router/tests/fixtures/batching/block_request.rhai b/apollo-router/tests/fixtures/batching/block_request.rhai new file mode 100644 index 0000000000..c0ec2e6ae0 --- /dev/null +++ b/apollo-router/tests/fixtures/batching/block_request.rhai @@ -0,0 +1,10 @@ +// Simple rhai script to block a request for batching testing +fn execution_service(service) { + let request_callback = |request| { + if request.body.query.contains("failMe") { + throw "cancelled expected failure" + } + }; + + service.map_request(request_callback); +} diff --git a/apollo-router/tests/fixtures/batching/coprocessor.router.yaml b/apollo-router/tests/fixtures/batching/coprocessor.router.yaml new file mode 100644 index 0000000000..7292662239 --- /dev/null +++ b/apollo-router/tests/fixtures/batching/coprocessor.router.yaml @@ -0,0 +1,19 @@ +# Simple config to enable batching and a coprocessor for testing killed requests + +batching: + enabled: true + mode: batch_http_link + subgraph: + all: + enabled: true + +coprocessor: + url: http://127.0.0.1:REPLACEME # Will be overwritten by the test + subgraph: + all: + request: + service_name: true + body: true + +include_subgraph_errors: + all: true diff --git a/apollo-router/tests/fixtures/batching/rhai_script.router.yaml b/apollo-router/tests/fixtures/batching/rhai_script.router.yaml new file mode 100644 index 0000000000..b4b488be39 --- /dev/null +++ b/apollo-router/tests/fixtures/batching/rhai_script.router.yaml @@ -0,0 +1,15 @@ +# Simple config to enable batching and rhai scripts for testing + +batching: + enabled: true + mode: batch_http_link + subgraph: + all: + enabled: true + +rhai: + scripts: ./tests/fixtures/batching + main: block_request.rhai + +include_subgraph_errors: + all: true diff --git a/apollo-router/tests/fixtures/batching/schema.graphql b/apollo-router/tests/fixtures/batching/schema.graphql new file mode 100644 index 0000000000..0968c300bb --- /dev/null +++ b/apollo-router/tests/fixtures/batching/schema.graphql @@ -0,0 +1,56 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + A @join__graph(name: "a", url: "http://127.0.0.1:4005/a") + B @join__graph(name: "b", url: "http://127.0.0.1:4005/b") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Structure + @join__type(graph: A, key: "index") + @join__type(graph: B, key: "index") +{ + index: Int, +} + +type Query + @join__type(graph: A) + @join__type(graph: B) +{ + entryA(count: Int): Structure @join__field(graph: A) + entryB(count: Int): Structure @join__field(graph: B) +} diff --git a/apollo-router/tests/fixtures/batching/short_timeouts.router.yaml b/apollo-router/tests/fixtures/batching/short_timeouts.router.yaml new file mode 100644 index 0000000000..747688fa2d --- /dev/null +++ b/apollo-router/tests/fixtures/batching/short_timeouts.router.yaml @@ -0,0 +1,14 @@ +# Batching config with short timeouts for testing + +batching: + enabled: true + mode: batch_http_link + subgraph: + all: + enabled: true +traffic_shaping: + all: + timeout: 1s + +include_subgraph_errors: + all: true diff --git a/apollo-router/tests/integration/batching.rs b/apollo-router/tests/integration/batching.rs new file mode 100644 index 0000000000..a9e8e3234d --- /dev/null +++ b/apollo-router/tests/integration/batching.rs @@ -0,0 +1,1026 @@ +use apollo_router::graphql::Request; +use insta::assert_yaml_snapshot; +use itertools::Itertools; +use tower::BoxError; +use wiremock::ResponseTemplate; + +use crate::integration::common::ValueExt as _; + +const CONFIG: &str = include_str!("../fixtures/batching/all_enabled.router.yaml"); +const SHORT_TIMEOUTS_CONFIG: &str = include_str!("../fixtures/batching/short_timeouts.router.yaml"); + +fn test_is_enabled() -> bool { + std::env::var("TEST_APOLLO_KEY").is_ok() && std::env::var("TEST_APOLLO_GRAPH_REF").is_ok() +} + +#[tokio::test(flavor = "multi_thread")] +async fn it_supports_single_subgraph_batching() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 5; + + let requests: Vec<_> = (0..REQUEST_COUNT) + .map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }) + .collect(); + let responses = helper::run_test( + CONFIG, + &requests[..], + Some(helper::expect_batch), + None::, + ) + .await?; + + if test_is_enabled() { + // Make sure that we got back what we wanted + assert_yaml_snapshot!(responses, @r###" + --- + - data: + entryA: + index: 0 + - data: + entryA: + index: 1 + - data: + entryA: + index: 2 + - data: + entryA: + index: 3 + - data: + entryA: + index: 4 + "###); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn it_supports_multi_subgraph_batching() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 3; + + let requests_a = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + let requests_b = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryB(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + + // Interleave requests so that we can verify that they get properly separated + let requests: Vec<_> = requests_a.interleave(requests_b).collect(); + let responses = helper::run_test( + CONFIG, + &requests, + Some(helper::expect_batch), + Some(helper::expect_batch), + ) + .await?; + + if test_is_enabled() { + // Make sure that we got back what we wanted + assert_yaml_snapshot!(responses, @r###" + --- + - data: + entryA: + index: 0 + - data: + entryB: + index: 0 + - data: + entryA: + index: 1 + - data: + entryB: + index: 1 + - data: + entryA: + index: 2 + - data: + entryB: + index: 2 + "###); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn it_batches_with_errors_in_single_graph() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 4; + + let requests: Vec<_> = (0..REQUEST_COUNT) + .map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }) + .collect(); + let responses = helper::run_test( + CONFIG, + &requests[..], + Some(helper::fail_second_batch_request), + None::, + ) + .await?; + + if test_is_enabled() { + // Make sure that we got back what we wanted + assert_yaml_snapshot!(responses, @r###" + --- + - data: + entryA: + index: 0 + - errors: + - message: expected error in A + - data: + entryA: + index: 2 + - data: + entryA: + index: 3 + "###); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn it_batches_with_errors_in_multi_graph() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 3; + + let requests_a = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + let requests_b = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryB(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + + // Interleave requests so that we can verify that they get properly separated + let requests: Vec<_> = requests_a.interleave(requests_b).collect(); + let responses = helper::run_test( + CONFIG, + &requests, + Some(helper::fail_second_batch_request), + Some(helper::fail_second_batch_request), + ) + .await?; + + if test_is_enabled() { + assert_yaml_snapshot!(responses, @r###" + --- + - data: + entryA: + index: 0 + - data: + entryB: + index: 0 + - errors: + - message: expected error in A + - errors: + - message: expected error in B + - data: + entryA: + index: 2 + - data: + entryB: + index: 2 + "###); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn it_handles_short_timeouts() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 2; + + let requests_a = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + let requests_b = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryB(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + + // Interleave requests so that we can verify that they get properly separated + // Have the B subgraph timeout + let requests: Vec<_> = requests_a.interleave(requests_b).collect(); + let responses = helper::run_test( + SHORT_TIMEOUTS_CONFIG, + &requests, + Some(helper::expect_batch), + Some(helper::never_respond), + ) + .await?; + + if test_is_enabled() { + assert_yaml_snapshot!(responses, @r###" + --- + - data: + entryA: + index: 0 + - errors: + - message: "HTTP fetch failed from 'b': request timed out" + path: [] + extensions: + code: SUBREQUEST_HTTP_ERROR + service: b + reason: request timed out + - data: + entryA: + index: 1 + - errors: + - message: "HTTP fetch failed from 'b': request timed out" + path: [] + extensions: + code: SUBREQUEST_HTTP_ERROR + service: b + reason: request timed out + "###); + } + + Ok(()) +} + +// This test makes two simultaneous requests to the router, with the first +// being never resolved. This is to make sure that the router doesn't hang while +// processing a separate batch request. +#[tokio::test(flavor = "multi_thread")] +async fn it_handles_indefinite_timeouts() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 3; + + let requests_a: Vec<_> = (0..REQUEST_COUNT) + .map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }) + .collect(); + let requests_b: Vec<_> = (0..REQUEST_COUNT) + .map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryB(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }) + .collect(); + + let responses_a = helper::run_test( + SHORT_TIMEOUTS_CONFIG, + &requests_a, + Some(helper::expect_batch), + None::, + ); + let responses_b = helper::run_test( + SHORT_TIMEOUTS_CONFIG, + &requests_b, + None::, + Some(helper::never_respond), + ); + + // Run both requests simultaneously + let (results_a, results_b) = futures::try_join!(responses_a, responses_b)?; + + // verify the output + let responses = [results_a, results_b].concat(); + if test_is_enabled() { + assert_yaml_snapshot!(responses, @r###" + --- + - data: + entryA: + index: 0 + - data: + entryA: + index: 1 + - data: + entryA: + index: 2 + - errors: + - message: "HTTP fetch failed from 'b': request timed out" + path: [] + extensions: + code: SUBREQUEST_HTTP_ERROR + service: b + reason: request timed out + - errors: + - message: "HTTP fetch failed from 'b': request timed out" + path: [] + extensions: + code: SUBREQUEST_HTTP_ERROR + service: b + reason: request timed out + - errors: + - message: "HTTP fetch failed from 'b': request timed out" + path: [] + extensions: + code: SUBREQUEST_HTTP_ERROR + service: b + reason: request timed out + "###); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn it_handles_cancelled_by_rhai() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 2; + const RHAI_CONFIG: &str = include_str!("../fixtures/batching/rhai_script.router.yaml"); + + let requests_a = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + let requests_b = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}_failMe{{ entryB(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + + // Interleave requests so that we can verify that they get properly separated + // Have the B subgraph get all of its requests cancelled by a rhai script + let requests: Vec<_> = requests_a.interleave(requests_b).collect(); + let responses = helper::run_test( + RHAI_CONFIG, + &requests, + Some(helper::expect_batch), + None::, + ) + .await?; + + if test_is_enabled() { + assert_yaml_snapshot!(responses, @r###" + --- + - data: + entryA: + index: 0 + - errors: + - message: "rhai execution error: 'Runtime error: cancelled expected failure (line 5, position 13)\nin closure call'" + - data: + entryA: + index: 1 + - errors: + - message: "rhai execution error: 'Runtime error: cancelled expected failure (line 5, position 13)\nin closure call'" + "###); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn it_handles_single_request_cancelled_by_rhai() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 2; + const RHAI_CONFIG: &str = include_str!("../fixtures/batching/rhai_script.router.yaml"); + + let requests_a = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + let requests_b = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query {}{{ entryB(count: {REQUEST_COUNT}) {{ index }} }}", + (index == 1) + .then_some("failMe".to_string()) + .unwrap_or(format!("op{index}")) + )) + .build() + }); + + // Custom validation for subgraph B + fn handle_b(request: &wiremock::Request) -> ResponseTemplate { + let requests: Vec = request.body_json().unwrap(); + + // We should have gotten all of the regular elements minus the second + assert_eq!(requests.len(), REQUEST_COUNT - 1); + + // Each element should have be for the specified subgraph and should have a field selection + // of index. The index should be 0..n without 1. + // Note: The router appends info to the query, so we append it at this check + for (request, index) in requests.into_iter().zip((0..).filter(|&i| i != 1)) { + assert_eq!( + request.query, + Some(format!( + "query op{index}__b__0{{entryB(count:{REQUEST_COUNT}){{index}}}}", + )) + ); + } + + ResponseTemplate::new(200).set_body_json( + (0..REQUEST_COUNT) + .filter(|&i| i != 1) + .map(|index| { + serde_json::json!({ + "data": { + "entryB": { + "index": index + } + } + }) + }) + .collect::>(), + ) + } + + // Interleave requests so that we can verify that they get properly separated + // Have the B subgraph get all of its requests cancelled by a rhai script + let requests: Vec<_> = requests_a.interleave(requests_b).collect(); + let responses = helper::run_test( + RHAI_CONFIG, + &requests, + Some(helper::expect_batch), + Some(handle_b), + ) + .await?; + + if test_is_enabled() { + assert_yaml_snapshot!(responses, @r###" + --- + - data: + entryA: + index: 0 + - data: + entryB: + index: 0 + - data: + entryA: + index: 1 + - errors: + - message: "rhai execution error: 'Runtime error: cancelled expected failure (line 5, position 13)\nin closure call'" + "###); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn it_handles_cancelled_by_coprocessor() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 2; + const COPROCESSOR_CONFIG: &str = include_str!("../fixtures/batching/coprocessor.router.yaml"); + + let requests_a = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + let requests_b = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryB(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + + // Spin up a coprocessor for cancelling requests to A + let coprocessor = wiremock::MockServer::builder().start().await; + let subgraph_a_canceller = wiremock::Mock::given(wiremock::matchers::method("POST")) + .respond_with(|request: &wiremock::Request| { + let info: serde_json::Value = request.body_json().unwrap(); + let subgraph = info + .as_object() + .unwrap() + .get("serviceName") + .unwrap() + .as_string() + .unwrap(); + + // Pass through the request if the subgraph isn't 'A' + let response = if subgraph != "a" { + info + } else { + // Patch it otherwise to stop execution + let mut res = info; + let block = res.as_object_mut().unwrap(); + block.insert("control".to_string(), serde_json::json!({ "break": 403 })); + block.insert( + "body".to_string(), + serde_json::json!({ + "errors": [{ + "message": "Subgraph A is not allowed", + "extensions": { + "code": "ERR_NOT_ALLOWED", + }, + }], + }), + ); + + res + }; + ResponseTemplate::new(200).set_body_json(response) + }) + .named("coprocessor POST /"); + coprocessor.register(subgraph_a_canceller).await; + + // Make sure to patch the config with the coprocessor's port + let config = COPROCESSOR_CONFIG.replace("REPLACEME", &coprocessor.address().port().to_string()); + + // Interleave requests so that we can verify that they get properly separated + // Have the A subgraph get all of its requests cancelled by a coprocessor + let requests: Vec<_> = requests_a.interleave(requests_b).collect(); + let responses = helper::run_test( + config.as_str(), + &requests, + None::, + Some(helper::expect_batch), + ) + .await?; + + if test_is_enabled() { + assert_yaml_snapshot!(responses, @r###" + --- + - errors: + - message: Subgraph A is not allowed + extensions: + code: ERR_NOT_ALLOWED + - data: + entryB: + index: 0 + - errors: + - message: Subgraph A is not allowed + extensions: + code: ERR_NOT_ALLOWED + - data: + entryB: + index: 1 + "###); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn it_handles_single_request_cancelled_by_coprocessor() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 4; + const COPROCESSOR_CONFIG: &str = include_str!("../fixtures/batching/coprocessor.router.yaml"); + + let requests_a = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + let requests_b = (0..REQUEST_COUNT).map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryB(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }); + + // Spin up a coprocessor for cancelling requests to A + let coprocessor = wiremock::MockServer::builder().start().await; + let subgraph_a_canceller = wiremock::Mock::given(wiremock::matchers::method("POST")) + .respond_with(|request: &wiremock::Request| { + let info: serde_json::Value = request.body_json().unwrap(); + let subgraph = info + .as_object() + .unwrap() + .get("serviceName") + .unwrap() + .as_string() + .unwrap(); + let query = info + .as_object() + .unwrap() + .get("body") + .unwrap() + .as_object() + .unwrap() + .get("query") + .unwrap() + .as_string() + .unwrap(); + + // Cancel the request if we're in subgraph A, index 2 + let response = if subgraph == "a" && query.contains("op2") { + // Patch it to stop execution + let mut res = info; + let block = res.as_object_mut().unwrap(); + block.insert("control".to_string(), serde_json::json!({ "break": 403 })); + block.insert( + "body".to_string(), + serde_json::json!({ + "errors": [{ + "message": "Subgraph A index 2 is not allowed", + "extensions": { + "code": "ERR_NOT_ALLOWED", + }, + }], + }), + ); + + res + } else { + info + }; + ResponseTemplate::new(200).set_body_json(response) + }) + .named("coprocessor POST /"); + coprocessor.register(subgraph_a_canceller).await; + + // We aren't expecting the whole batch anymore, so we need a handler here for it + fn handle_a(request: &wiremock::Request) -> ResponseTemplate { + let requests: Vec = request.body_json().unwrap(); + + // We should have gotten all of the regular elements minus the third + assert_eq!(requests.len(), REQUEST_COUNT - 1); + + // Each element should have be for the specified subgraph and should have a field selection + // of index. The index should be 0..n without 2. + // Note: The router appends info to the query, so we append it at this check + for (request, index) in requests.into_iter().zip((0..).filter(|&i| i != 2)) { + assert_eq!( + request.query, + Some(format!( + "query op{index}__a__0{{entryA(count:{REQUEST_COUNT}){{index}}}}", + )) + ); + } + + ResponseTemplate::new(200).set_body_json( + (0..REQUEST_COUNT) + .filter(|&i| i != 2) + .map(|index| { + serde_json::json!({ + "data": { + "entryA": { + "index": index + } + } + }) + }) + .collect::>(), + ) + } + + // Make sure to patch the config with the coprocessor's port + let config = COPROCESSOR_CONFIG.replace("REPLACEME", &coprocessor.address().port().to_string()); + + // Interleave requests so that we can verify that they get properly separated + // Have the A subgraph get all of its requests cancelled by a coprocessor + let requests: Vec<_> = requests_a.interleave(requests_b).collect(); + let responses = helper::run_test( + config.as_str(), + &requests, + Some(handle_a), + Some(helper::expect_batch), + ) + .await?; + + if test_is_enabled() { + assert_yaml_snapshot!(responses, @r###" + --- + - data: + entryA: + index: 0 + - data: + entryB: + index: 0 + - data: + entryA: + index: 1 + - data: + entryB: + index: 1 + - errors: + - message: Subgraph A index 2 is not allowed + extensions: + code: ERR_NOT_ALLOWED + - data: + entryB: + index: 2 + - data: + entryA: + index: 3 + - data: + entryB: + index: 3 + "###); + } + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn it_handles_single_invalid_graphql() -> Result<(), BoxError> { + const REQUEST_COUNT: usize = 5; + + let mut requests: Vec<_> = (0..REQUEST_COUNT) + .map(|index| { + Request::fake_builder() + .query(format!( + "query op{index}{{ entryA(count: {REQUEST_COUNT}) {{ index }} }}" + )) + .build() + }) + .collect(); + + // Mess up the 4th one + requests[3].query = Some("query op3".into()); + + // We aren't expecting the whole batch anymore, so we need a handler here for it + fn handle_a(request: &wiremock::Request) -> ResponseTemplate { + let requests: Vec = request.body_json().unwrap(); + + // We should have gotten all of the regular elements minus the third + assert_eq!(requests.len(), REQUEST_COUNT - 1); + + // Each element should have be for the specified subgraph and should have a field selection + // of index. The index should be 0..n without 3. + // Note: The router appends info to the query, so we append it at this check + for (request, index) in requests.into_iter().zip((0..).filter(|&i| i != 3)) { + assert_eq!( + request.query, + Some(format!( + "query op{index}__a__0{{entryA(count:{REQUEST_COUNT}){{index}}}}", + )) + ); + } + + ResponseTemplate::new(200).set_body_json( + (0..REQUEST_COUNT) + .filter(|&i| i != 3) + .map(|index| { + serde_json::json!({ + "data": { + "entryA": { + "index": index + } + } + }) + }) + .collect::>(), + ) + } + + let responses = helper::run_test( + CONFIG, + &requests[..], + Some(handle_a), + None::, + ) + .await?; + + if test_is_enabled() { + // Make sure that we got back what we wanted + assert_yaml_snapshot!(responses, @r###" + --- + - data: + entryA: + index: 0 + - data: + entryA: + index: 1 + - data: + entryA: + index: 2 + - errors: + - message: "parsing error: syntax error: expected a Selection Set" + locations: + - line: 1 + column: 10 + extensions: + code: PARSING_ERROR + - data: + entryA: + index: 4 + "###); + } + + Ok(()) +} + +/// Utility methods for these tests +mod helper { + use std::time::Duration; + + use apollo_router::graphql::Request; + use apollo_router::graphql::Response; + use tower::BoxError; + use wiremock::matchers; + use wiremock::MockServer; + use wiremock::Respond; + use wiremock::ResponseTemplate; + + use super::test_is_enabled; + use crate::integration::common::IntegrationTest; + + /// Helper type for specifying a valid handler + pub type Handler = fn(&wiremock::Request) -> ResponseTemplate; + + /// Helper method for creating a wiremock handler from a handler + /// + /// If the handler is `None`, then the fallback is to always fail any request to the mock server + macro_rules! make_handler { + ($subgraph_path:expr, $handler:expr) => { + if let Some(f) = $handler { + wiremock::Mock::given(matchers::method("POST")) + .and(matchers::path($subgraph_path)) + .respond_with(f) + .expect(1) + .named(stringify!(batching POST $subgraph_path)) + } else { + wiremock::Mock::given(matchers::method("POST")) + .and(matchers::path($subgraph_path)) + .respond_with(always_fail) + .expect(0) + .named(stringify!(batching POST $subgraph_path)) + } + } + } + + /// Set up the integration test stack + pub async fn run_test( + config: &str, + requests: &[Request], + handler_a: Option, + handler_b: Option, + ) -> Result, BoxError> { + // Ensure that we have the test keys before running + // Note: The [IntegrationTest] ensures that these test credentials get + // set before running the router. + if !test_is_enabled() { + return Ok(Vec::new()); + }; + + // Create a wiremock server for each handler + let mock_server_a = MockServer::start().await; + let mock_server_b = MockServer::start().await; + mock_server_a.register(make_handler!("/a", handler_a)).await; + mock_server_b.register(make_handler!("/b", handler_b)).await; + + // Start up the router with the mocked subgraphs + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("tests/fixtures/batching/schema.graphql") + .subgraph_override("a", format!("{}/a", mock_server_a.uri())) + .subgraph_override("b", format!("{}/b", mock_server_b.uri())) + .build() + .await; + + router.start().await; + router.assert_started().await; + + // Execute the request + let request = serde_json::to_value(requests)?; + let (_span, response) = router.execute_query(&request).await; + + serde_json::from_slice::>(&response.bytes().await?).map_err(BoxError::from) + } + + /// Subgraph handler for receiving a batch of requests + pub fn expect_batch(request: &wiremock::Request) -> ResponseTemplate { + let requests: Vec = request.body_json().unwrap(); + + // Extract info about this operation + let (subgraph, count): (String, usize) = { + let re = regex::Regex::new(r"entry([AB])\(count:([0-9]+)\)").unwrap(); + let captures = re.captures(requests[0].query.as_ref().unwrap()).unwrap(); + + (captures[1].to_string(), captures[2].parse().unwrap()) + }; + + // We should have gotten `count` elements + assert_eq!(requests.len(), count); + + // Each element should have be for the specified subgraph and should have a field selection + // of index. + // Note: The router appends info to the query, so we append it at this check + for (index, request) in requests.into_iter().enumerate() { + assert_eq!( + request.query, + Some(format!( + "query op{index}__{}__0{{entry{}(count:{count}){{index}}}}", + subgraph.to_lowercase(), + subgraph + )) + ); + } + + ResponseTemplate::new(200).set_body_json( + (0..count) + .map(|index| { + serde_json::json!({ + "data": { + format!("entry{subgraph}"): { + "index": index + } + } + }) + }) + .collect::>(), + ) + } + + /// Handler that always returns an error for the second batch field + pub fn fail_second_batch_request(request: &wiremock::Request) -> ResponseTemplate { + let requests: Vec = request.body_json().unwrap(); + + // Extract info about this operation + let (subgraph, count): (String, usize) = { + let re = regex::Regex::new(r"entry([AB])\(count:([0-9]+)\)").unwrap(); + let captures = re.captures(requests[0].query.as_ref().unwrap()).unwrap(); + + (captures[1].to_string(), captures[2].parse().unwrap()) + }; + + // We should have gotten `count` elements + assert_eq!(requests.len(), count); + + // Create the response with the second element as an error + let responses = { + let mut rs: Vec<_> = (0..count) + .map(|index| { + serde_json::json!({ + "data": { + format!("entry{subgraph}"): { + "index": index + } + } + }) + }) + .collect(); + + rs[1] = serde_json::json!({ "errors": [{ "message": format!("expected error in {subgraph}") }] }); + rs + }; + + // Respond with an error on the second element but valid data for the rest + ResponseTemplate::new(200).set_body_json(responses) + } + + /// Subgraph handler that delays indefinitely + /// + /// Useful for testing timeouts at the batch level + pub fn never_respond(request: &wiremock::Request) -> ResponseTemplate { + let requests: Vec = request.body_json().unwrap(); + + // Extract info about this operation + let (_, count): (String, usize) = { + let re = regex::Regex::new(r"entry([AB])\(count:([0-9]+)\)").unwrap(); + let captures = re.captures(requests[0].query.as_ref().unwrap()).unwrap(); + + (captures[1].to_string(), captures[2].parse().unwrap()) + }; + + // We should have gotten `count` elements + assert_eq!(requests.len(), count); + + // Respond as normal but with a long delay + ResponseTemplate::new(200).set_delay(Duration::from_secs(365 * 24 * 60 * 60)) + } + + /// Subgraph handler that always fails + /// + /// Useful for subgraphs tests that should never actually be called + fn always_fail(_request: &wiremock::Request) -> ResponseTemplate { + ResponseTemplate::new(400).set_body_json(serde_json::json!({ + "errors": [{ + "message": "called into subgraph that should not have happened", + }] + })) + } +} diff --git a/apollo-router/tests/integration/mod.rs b/apollo-router/tests/integration/mod.rs index 80ee7c18f5..97937bac53 100644 --- a/apollo-router/tests/integration/mod.rs +++ b/apollo-router/tests/integration/mod.rs @@ -1,3 +1,4 @@ +mod batching; #[path = "../common.rs"] pub(crate) mod common; pub(crate) use common::IntegrationTest; diff --git a/apollo-router/tests/integration/snapshots/integration_tests__integration__lifecycle__cli_config_experimental.snap b/apollo-router/tests/integration/snapshots/integration_tests__integration__lifecycle__cli_config_experimental.snap index e98c33cfce..928c8e8cb8 100644 --- a/apollo-router/tests/integration/snapshots/integration_tests__integration__lifecycle__cli_config_experimental.snap +++ b/apollo-router/tests/integration/snapshots/integration_tests__integration__lifecycle__cli_config_experimental.snap @@ -9,7 +9,6 @@ stderr: stdout: List of all experimental configurations with related GitHub discussions: - - experimental_batching: https://github.com/apollographql/router/discussions/3840 - experimental_response_trace_id: https://github.com/apollographql/router/discussions/2147 - experimental_retry: https://github.com/apollographql/router/discussions/2241 - experimental_when_header: https://github.com/apollographql/router/discussions/1961 diff --git a/docs/source/config.json b/docs/source/config.json index 1b247ec085..c4b9a039bd 100644 --- a/docs/source/config.json +++ b/docs/source/config.json @@ -79,7 +79,7 @@ "Query batching": [ "/executing-operations/query-batching", [ - "experimental" + "enterprise" ] ], "GraphQL Subscriptions": { diff --git a/docs/source/configuration/traffic-shaping.mdx b/docs/source/configuration/traffic-shaping.mdx index 25c2e0df98..df53d9c34c 100644 --- a/docs/source/configuration/traffic-shaping.mdx +++ b/docs/source/configuration/traffic-shaping.mdx @@ -66,7 +66,7 @@ You can change the default timeout for client requests to the router like so: ```yaml title="router.yaml" traffic_shaping: - router: + router: timeout: 50s # If client requests to the router take more than 50 seconds, cancel the request (30 seconds by default) ``` @@ -74,7 +74,7 @@ You can change the default timeout for all requests between the router and subgr ```yaml title="router.yaml" traffic_shaping: - all: + all: timeout: 50s # If subgraph requests take more than 50 seconds, cancel the request (30 seconds by default) ``` @@ -93,7 +93,7 @@ Compression is automatically supported on the client side, depending on the `Acc The Apollo Router has _experimental_ support for receiving client query batches: ```yaml title="router.yaml" -experimental_batching: +batching: enabled: true mode: batch_http_link ``` diff --git a/docs/source/executing-operations/query-batching.mdx b/docs/source/executing-operations/query-batching.mdx index 2fdebf80a5..e9b6ed9537 100644 --- a/docs/source/executing-operations/query-batching.mdx +++ b/docs/source/executing-operations/query-batching.mdx @@ -1,9 +1,9 @@ --- title: Query batching -description: Receive query batches with the Apollo Router +description: Receive query batches with the Apollo Router --- - + Learn about query batching and how to configure the Apollo Router to receive query batches. @@ -11,24 +11,36 @@ Learn about query batching and how to configure the Apollo Router to receive que Modern applications often require several requests to render a single page. This is usually the result of a component-based architecture where individual micro-frontends (MFE) make requests separately to fetch data relevant to them. Not only does this cause a performance overheadโ€”different components may be requesting the same dataโ€”it can also cause a consistency issue. To combat this, MFE-based UIs batch multiple client operations, issued close together, into a single HTTP request. This is supported in Apollo Client and Apollo Server. -The Apollo Router supports client query batching. If youโ€™re using Apollo Client, you can leverage the built-in support for batching to reduce the number of individual operations sent to the router. +The router's batching support is provided by two sets of functionality: + - client batching + - subgraph batching -Once configured, Apollo Client automatically combines multiple operations into a single HTTP request. The number of operations within a batch is client-configurable, including the maximum number in a batch and the maximum duration to wait for operations to accumulate before sending the batch. +With client batching, the router accepts batched requests from a client and processes each request of a batch separately. Consequently, the router doesn't present requests to subgraphs in batch form, so subgraphs must process the requests of a batch individually. -The Apollo Router must be configured to receive query batches, otherwise it rejects them. When processing a batch, the router deserializes and processes each operation of a batch independently, and it responds to the client only after all operations of the batch have been completed. Each operation executes concurrently with respect to other operations in the batch. +With subgraph batching, the router analyzes input client batch requests and issues batch requests to subgraphs. Subgraph batching is an extension to client batching and requires participating subgraphs to support batching requests. See the examples below to see illustrations of how this works in practice. -## Configure query batching +The Apollo Router supports client and subgraph query batching. + +If youโ€™re using Apollo Client, you can leverage the built-in support for batching to reduce the number of individual operations sent to the router. + +Once configured, Apollo Client automatically combines multiple operations into a single HTTP request. The number of operations within a batch is client-configurable, including the maximum number in a batch and the maximum duration to wait for operations to accumulate before sending the batch. + +The Apollo Router must be configured to receive query batches, otherwise it rejects them. When processing a batch, the router deserializes and processes each operation of a batch independently. It responds to the client only after all operations of the batch have been completed. Each operation executes concurrently with respect to other operations in the batch. + +## Configure client query batching Both the Apollo Router and client need to be configured to support query batching. ### Configure router -By default, receiving client query batches is _not_ enabled in the Apollo Router. +#### Client query batching + +By default, receiving client query batches is _not_ enabled in the Apollo Router. To enable query batching, set the following fields in your `router.yaml` configuration file: ```yaml title="router.yaml" -experimental_batching: +batching: enabled: true mode: batch_http_link ``` @@ -38,6 +50,138 @@ experimental_batching: | `enabled` | Flag to enable reception of client query batches | boolean | `false` | | `mode` | Supported client batching mode | `batch_http_link`: the client uses Apollo Link and its [`BatchHttpLink`](/react/api/link/apollo-link-batch-http) link. | No Default | +#### Subgraph query batching + +If client query batching is enabled, and the router's subgraphs [support query batching](/apollo-server/api/apollo-server#allowbatchedhttprequests), then subgraph query batching can be enabled by setting the following fields in your `router.yaml` configuration file: + +```yaml title="router.all_enabled.yaml" +batching: + enabled: true + mode: batch_http_link + subgraph: + # Enable batching on all subgraphs + all: + enabled: true +``` + +```yaml title="router.yaml" +batching: + enabled: true + mode: batch_http_link + subgraph: + # Disable batching on all subgraphs + all: + enabled: false + # Configure(over-ride) batching support per subgraph + subgraphs: + subgraph_1: + enabled: true + subgraph_2: + enabled: true +``` + + + +- The router can be configured to support batching for either all subgraphs or individually enabled per subgraph. + +- There are limitations on the ability of the router to preserve batches from the client request into the subgraph requests. In particular, certain forms of queries will require data to be present before they are processed. Consequently, the router will only be able to generate batches from queries which are processed which don't contain such constraints. This may result in the router issuing multiple batches or requests. + +- If [query deduplication](../configuration/traffic-shaping/#query-deduplication) is enabled, it will not apply to batched queries. Batching will take precedence over query deduplication. Query deduplication will still be performed for non-batched queries. + + + +##### Example: simple subgraph batching + +This example shows how the router can batch subgraph requests in the most efficient scenario, where the queries of a batch don't have required fetch constraints. + +Assume the federated graph contains three subgraphs: `accounts`, `products`, and `reviews`. + +The input client query to the federated graph: + +```json title="simple-batch.json" +[ + {"query":"query MeQuery1 {\n me {\n id\n }\n}"} + {"query":"query MeQuery2 {\n me {\n name\n }\n}"}, + {"query":"query MeQuery3 {\n me {\n id\n }\n}"} + {"query":"query MeQuery4 {\n me {\n name\n }\n}"}, + {"query":"query MeQuery5 {\n me {\n id\n }\n}"} + {"query":"query MeQuery6 {\n me {\n name\n }\n}"}, + {"query":"query MeQuery7 {\n me {\n id\n }\n}"} + {"query":"query MeQuery8 {\n me {\n name\n }\n}"}, + {"query":"query MeQuery9 {\n me {\n id\n }\n}"} + {"query":"query MeQuery10 {\n me {\n name\n }\n}"}, + {"query":"query MeQuery11 {\n me {\n id\n }\n}"} + {"query":"query MeQuery12 {\n me {\n name\n }\n}"}, + {"query":"query MeQuery13 {\n me {\n id\n }\n}"} + {"query":"query MeQuery14 {\n me {\n name\n }\n}"}, + {"query":"query MeQuery15 {\n me {\n id\n }\n}"} +] +``` + +From the input query, the router generates a set of subgraph queries: +``` +"query MeQuery1__accounts__0{me{id}}", +"query MeQuery2__accounts__0{me{name}}", +"query MeQuery3__accounts__0{me{id}}", +"query MeQuery4__accounts__0{me{name}}", +"query MeQuery5__accounts__0{me{id}}", +"query MeQuery6__accounts__0{me{name}}", +"query MeQuery7__accounts__0{me{id}}", +"query MeQuery8__accounts__0{me{name}}", +"query MeQuery9__accounts__0{me{id}}", +"query MeQuery10__accounts__0{me{name}}", +"query MeQuery11__accounts__0{me{id}}", +"query MeQuery12__accounts__0{me{name}}", +"query MeQuery13__accounts__0{me{id}}", +"query MeQuery14__accounts__0{me{name}}", +"query MeQuery15__accounts__0{me{id}}", +``` +All of the queries can be combined into a single batch. So instead of 15 (non-batch) subgraph fetches, the router only has to make one fetch. + +| Subgraph | Fetch Count (without)| Fetch Count (with) | +|----------|----------------------|--------------------| +| accounts | 15 | 1 | + +##### Example: complex subgraph batching + +This example shows how the router might batch subgraph requests for a graph, where the client batch contains a query for an entity. + +Assume the federated graph contains three subgraphs: `accounts`, `products`, and `reviews`. + +The input client query to the federated graph: + +```json title="federated-batch.json" +[ + {"query":"query MeQuery1 {\n me {\n id\n }\n}"}, + {"query":"query MeQuery2 {\n me {\n reviews {\n body\n }\n }\n}"}, + {"query":"query MeQuery3 {\n topProducts {\n upc\n reviews {\n author {\n name\n }\n }\n }\n me {\n name\n }\n}"}, + {"query":"query MeQuery4 {\n me {\n name\n }\n}"}, + {"query":"query MeQuery5 {\n me {\n id\n }\n}"} +] +``` + +From the input query, the router generates a set of subgraph queries: +``` +"query MeQuery1__accounts__0{me{id}}", +"query MeQuery2__accounts__0{me{__typename id}}", +"query MeQuery3__products__0{topProducts{__typename upc}}", +"query MeQuery3__accounts__3{me{name}}", +"query MeQuery4__accounts__0{me{name}}", +"query MeQuery5__accounts__0{me{id}}", +"query MeQuery2__reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on User{reviews{body}}}}", +"query MeQuery3__reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{author{__typename id}}}}}", +"query MeQuery3__accounts__2($representations:[_Any!]!){_entities(representations:$representations){...on User{name}}}", +``` +The first six queries can be combined into two batchesโ€”one for `accounts` and one for `products`. They must be fetched before the final three queries can be executed individually. + +Overall, without subgraph batching, the router would make nine fetches in total across the three subgraphs, but with subgraph batching, that total is reduced to five fetches. + +| Subgraph | Fetch Count (without)| Fetch Count (with) | +|----------|----------------------|--------------------| +| accounts | 6 | 2 | +| products | 1 | 1 | +| reviews | 2 | 2 | + ### Configure client To enable batching in an Apollo client, configure `BatchHttpLink`. For details on implementing `BatchHttpLink`, see [batching operations](/react/api/link/apollo-link-batch-http/). @@ -69,11 +213,12 @@ Metrics in the Apollo Router for query batching: mode +[subgraph] -Counter for the number of received batches. +Counter for the number of received (from client) or dispatched (to subgraph) batches. @@ -87,6 +232,7 @@ Counter for the number of received batches. mode +[subgraph] @@ -98,6 +244,7 @@ Histogram for the size of received batches. +The `subgraph` attribute is optional. If the attribute isn't present, the metric identifies batches received from clients. If the attribute is present, the metric identifies batches sent to a particular subgraph. ## Query batch formats @@ -166,7 +313,7 @@ As a result, the router returns an invalid batch error: ### Individual query error -If a single query in a batch cannot be processed, this results in an individual error. +If a single query in a batch cannot be processed, this results in an individual error. For example, the query `MyFirstQuery` is accessing a field that doesn't exist, while the rest of the batch query is valid. @@ -203,7 +350,7 @@ As a result, an error is returned for the individual invalid query and the other ## Known limitations ### Unsupported query modes - + When batching is enabled, any batch operation that results in a stream of responses is unsupported, including: - [`@defer`](/graphos/operations/defer/) - [subscriptions](/graphos/operations/subscriptions/) From ab069ef2df14d41c37b8087f4aa9a3b60fc38242 Mon Sep 17 00:00:00 2001 From: Lucas Leadbetter <5595530+lleadbet@users.noreply.github.com> Date: Tue, 16 Apr 2024 07:23:56 -0400 Subject: [PATCH 19/46] add sha256 support to rhai (#4940) This adds a new `sha256::digest` function to Rhai to enable hash values. Fixes #4939 --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [x] Changes are compatible[^1] - [x] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [x] Unit Tests - [ ] Integration Tests - [ ] Manual Tests --- .changesets/feat_feat_sha256_in_rhai.md | 17 +++++++++++++++++ apollo-router/src/plugins/rhai/engine.rs | 14 ++++++++++++++ apollo-router/src/plugins/rhai/tests.rs | 11 +++++++++++ docs/source/customizations/rhai-api.mdx | 18 ++++++++++++++++++ 4 files changed, 60 insertions(+) create mode 100644 .changesets/feat_feat_sha256_in_rhai.md diff --git a/.changesets/feat_feat_sha256_in_rhai.md b/.changesets/feat_feat_sha256_in_rhai.md new file mode 100644 index 0000000000..5d7e15fe73 --- /dev/null +++ b/.changesets/feat_feat_sha256_in_rhai.md @@ -0,0 +1,17 @@ +### Add support for SHA256 hashing in Rhai ([Issue #4939](https://github.com/apollographql/router/issues/4939)) + +This adds a new `sha256` module to create SHA256 hashes within Rhai scripts. An example looks like: + +```rs +fn supergraph_service(service){ + service.map_request(|request|{ + log_info("hello world"); + let sha = sha256::digest("hello world"); + log_info(sha); + }); +} +``` + +The only function currently is `digest`. + +By [@lleadbet](https://github.com/lleadbet) in https://github.com/apollographql/router/pull/4940 diff --git a/apollo-router/src/plugins/rhai/engine.rs b/apollo-router/src/plugins/rhai/engine.rs index 65a73326c8..eaf288dfbd 100644 --- a/apollo-router/src/plugins/rhai/engine.rs +++ b/apollo-router/src/plugins/rhai/engine.rs @@ -173,6 +173,17 @@ mod router_json { } } +#[export_module] +mod router_sha256 { + use sha2::Digest; + + #[rhai_fn(pure)] + pub(crate) fn digest(input: &mut ImmutableString) -> String { + let hash = sha2::Sha256::digest(input.as_bytes()); + hex::encode(hash) + } +} + #[export_module] mod router_expansion { pub(crate) type Expansion = expansion::Expansion; @@ -1625,6 +1636,7 @@ impl Rhai { let base64_module = exported_module!(router_base64); let json_module = exported_module!(router_json); + let sha256_module = exported_module!(router_sha256); let expansion_module = exported_module!(router_expansion); @@ -1651,6 +1663,8 @@ impl Rhai { .register_static_module("base64", base64_module.into()) // Register our json module (not global) .register_static_module("json", json_module.into()) + // Register our SHA256 module (not global) + .register_static_module("sha256", sha256_module.into()) // Register our expansion module (not global) // Hide the fact that it is an expansion module by calling it "env" .register_static_module("env", expansion_module.into()) diff --git a/apollo-router/src/plugins/rhai/tests.rs b/apollo-router/src/plugins/rhai/tests.rs index 22a004d807..41a59ced1b 100644 --- a/apollo-router/src/plugins/rhai/tests.rs +++ b/apollo-router/src/plugins/rhai/tests.rs @@ -11,6 +11,7 @@ use http::StatusCode; use rhai::Engine; use rhai::EvalAltResult; use serde_json::Value; +use sha2::Digest; use tower::util::BoxService; use tower::BoxError; use tower::Service; @@ -584,6 +585,16 @@ fn it_can_generate_uuid() { assert_eq!(uuid_v4_rhai, uuid_parsed.to_string()); } +#[test] +fn it_can_sha256_string() { + let engine = new_rhai_test_engine(); + let hash = sha2::Sha256::digest("hello world".as_bytes()); + let hash_rhai: String = engine + .eval(r#"sha256::digest("hello world")"#) + .expect("can decode string"); + assert_eq!(hash_rhai, hex::encode(hash)); +} + async fn base_globals_function(fn_name: &str) -> Result> { let dyn_plugin: Box = crate::plugin::plugins() .find(|factory| factory.name == "apollo.rhai") diff --git a/docs/source/customizations/rhai-api.mdx b/docs/source/customizations/rhai-api.mdx index bdafa98344..00dd9adbf5 100644 --- a/docs/source/customizations/rhai-api.mdx +++ b/docs/source/customizations/rhai-api.mdx @@ -242,6 +242,24 @@ You don't need to import the "base64" module. It is imported in the router. +## sha256 hash strings + +Your Rhai customization can use the function `sha256::digest()` to hash strings using the SHA256 hashing algorithm. + +```rhai +fn supergraph_service(service){ + service.map_request(|request|{ + let sha = sha256::digest("hello world"); + log_info(sha); + }); +} +``` + + +You don't need to import the "sha256" module. It is imported in the router. + + + ### Different alphabets Base64 supports multiple alphabets to encode data, depending on the supported characters where it is used. The router supports the following alphabets: From bf6d05ed53f8f71d4a210d85f0963ec86fa23775 Mon Sep 17 00:00:00 2001 From: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Date: Tue, 16 Apr 2024 13:57:06 +0200 Subject: [PATCH 20/46] improve xtask release task Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .gitignore | 1 + xtask/src/commands/release.rs | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index ad362c28b0..c8cb2b01a8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Generated by Cargo # will have compiled files and executables **/target/ +.cargo_check # These are backup files generated by rustfmt **/*.rs.bk diff --git a/xtask/src/commands/release.rs b/xtask/src/commands/release.rs index 7375753f33..61217cf9d3 100644 --- a/xtask/src/commands/release.rs +++ b/xtask/src/commands/release.rs @@ -59,6 +59,9 @@ pub struct Prepare { /// Skip the license check #[clap(long)] skip_license_check: bool, + /// It's a pre-release so skip the changelog generation + #[clap(long)] + pre_release: bool, /// The new version that is being created OR to bump (major|minor|patch|current). version: Version, @@ -109,7 +112,9 @@ impl Prepare { self.update_helm_charts(&version)?; self.update_docs(&version)?; self.docker_files(&version)?; - self.finalize_changelog(&version)?; + if !self.pre_release { + self.finalize_changelog(&version)?; + } } Ok(()) From 9b5ffc8c67d18c302f8f3208fa75f77ba6c73347 Mon Sep 17 00:00:00 2001 From: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Date: Tue, 16 Apr 2024 14:00:17 +0200 Subject: [PATCH 21/46] prep release: v1.45.0-alpha.0 --- Cargo.lock | 6 +- RELEASE_CHECKLIST.md | 19 +-- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- .../templates/base/Cargo.toml | 2 +- .../templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- .../tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- helm/chart/router/Chart.yaml | 4 +- helm/chart/router/README.md | 8 +- licenses.html | 144 ++++++++++++++---- scripts/install.sh | 2 +- 14 files changed, 136 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 418d74cb73..61eaeccf07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.44.0" +version = "1.45.0-alpha.0" dependencies = [ "access-json", "anyhow", @@ -411,7 +411,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.44.0" +version = "1.45.0-alpha.0" dependencies = [ "apollo-parser", "apollo-router", @@ -427,7 +427,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.44.0" +version = "1.45.0-alpha.0" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/RELEASE_CHECKLIST.md b/RELEASE_CHECKLIST.md index 45fbc8cf54..8fb327dfdd 100644 --- a/RELEASE_CHECKLIST.md +++ b/RELEASE_CHECKLIST.md @@ -50,6 +50,7 @@ Make sure you have the following software installed and available in your `PATH` - `gh`: [The GitHub CLI](https://cli.github.com/) - `cargo`: [Cargo & Rust Installation](https://doc.rust-lang.org/cargo/getting-started/installation.html) + - `helm`: see - `helm-docs`: see - `cargo-about`: install with `cargo install --locked cargo-about` - `cargo-deny`: install with `cargo install --locked cargo-deny` @@ -166,7 +167,7 @@ Start following the steps below to start a release PR. The process is **not ful 6. Run the release automation script using this command to use the environment variable set previously: ``` - cargo xtask release prepare "${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" + cargo xtask release prepare --pre-release "${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" ``` Running this command will: @@ -175,29 +176,19 @@ Start following the steps below to start a release PR. The process is **not ful - Run our compliance checks and update the `licenses.html` file as appropriate. - Ensure we're not using any incompatible licenses in the release. - Currently, it will also do one step which we will **immediately undo** in the next step, since it is not desireable for pre-release versions: - - - Migrate the current set of `/.changesets/*.md` files into `/CHANGELOG.md` using the version specified. - -7. Revert the changes to the `CHANGELOG.md` made in the last step since we don't finalize the changelog from the `.changesets` until the final release is prepared. (This really could be replaced with a `--skip-changesets` flag.) - - ``` - git checkout -- .changesets/ CHANGELOG.md - ``` - -8. Now, review and stage he changes produced by the previous step. This is most safely done using the `--patch` (or `-p`) flag to `git add` (`-u` ignores untracked files). +7. Now, review and stage he changes produced by the previous step. This is most safely done using the `--patch` (or `-p`) flag to `git add` (`-u` ignores untracked files). ``` git add -up . ``` -9. Now commit those changes locally, using a brief message: +8. Now commit those changes locally, using a brief message: ``` git commit -m "prep release: v${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" ``` -10. Push this commit up to the existing release PR: +9. Push this commit up to the existing release PR: ``` git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "${APOLLO_ROUTER_RELEASE_VERSION}" diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index 04c97a6e26..a418bcd2a2 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.44.0" +version = "1.45.0-alpha.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index ff6054dca0..e5b46e8ec3 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.44.0" +version = "1.45.0-alpha.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index 56e237bfce..d89d4860c5 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.44.0" +apollo-router = "1.45.0-alpha.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index 0f1d41f37b..2897e97759 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.44.0" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-alpha.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 651ba3d4d5..d67e53727b 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.44.0" +version = "1.45.0-alpha.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 0115029b0b..d2340a04c8 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.44.0 + image: ghcr.io/apollographql/router:v1.45.0-alpha.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 04c0c57799..7ff6f49f7c 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.44.0 + image: ghcr.io/apollographql/router:v1.45.0-alpha.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index f7a431ea72..db276fd901 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.44.0 + image: ghcr.io/apollographql/router:v1.45.0-alpha.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index bfbfc803cf..cd24fa8e08 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.44.0 +version: 1.45.0-alpha.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.44.0" +appVersion: "v1.45.0-alpha.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index b997146903..f1d5e3cb05 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.44.0](https://img.shields.io/badge/Version-1.44.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.44.0](https://img.shields.io/badge/AppVersion-v1.44.0-informational?style=flat-square) +![Version: 1.45.0-alpha.0](https://img.shields.io/badge/Version-1.45.0--alpha.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-alpha.0](https://img.shields.io/badge/AppVersion-v1.45.0--alpha.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.44.0 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.44.0 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.44.0 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ @@ -94,5 +94,3 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | topologySpreadConstraints | list | `[]` | Sets the [topology spread constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) for Deployment pods | | virtualservice.enabled | bool | `false` | | ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.2](https://github.com/norwoodj/helm-docs/releases/v1.11.2) diff --git a/licenses.html b/licenses.html index e36dcba570..0ec8fa2b56 100644 --- a/licenses.html +++ b/licenses.html @@ -45,7 +45,7 @@

      Third Party Licenses

      Overview of licenses:

                                       Apache License
                                  Version 2.0, January 2004
      @@ -11530,6 +11530,14 @@ 

      Used by:

      additional terms or conditions.
    • +
    • +

      Apache License 2.0

      +

      Used by:

      + +
      ../../LICENSE-APACHE
      +
    • Apache License 2.0

      Used by:

      @@ -12177,17 +12185,15 @@

      Used by:

      Apache License 2.0

      Used by:

    • + +
    • +

      Apache License 2.0

      +

      Used by:

      + +
      Copyright 2023 The allocator-api2 project developers
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +	http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +
    • +
    • +

      Apache License 2.0

      +

      Used by:

      + +
      Copyright [2022] [Bryn Cooke]
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +    http://www.apache.org/licenses/LICENSE-2.0
      +
       Unless required by applicable law or agreed to in writing, software
       distributed under the License is distributed on an "AS IS" BASIS,
       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      @@ -12352,29 +12399,6 @@ 

      Used by:

    • zstd-sys
    MIT or Apache-2.0
    -
    - -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    The Apache License, Version 2.0 (Apache-2.0)
    -
    -Copyright 2015-2020 the fiat-crypto authors (see the AUTHORS file)
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -    http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
     
  • @@ -14202,6 +14226,66 @@

    Used by:

    shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +
  • + +
  • +

    MIT License

    +

    Used by:

    + +
    Copyright (c) 2019 Carl Lerche
    +
    +Permission is hereby granted, free of charge, to any
    +person obtaining a copy of this software and associated
    +documentation files (the "Software"), to deal in the
    +Software without restriction, including without
    +limitation the rights to use, copy, modify, merge,
    +publish, distribute, sublicense, and/or sell copies of
    +the Software, and to permit persons to whom the Software
    +is furnished to do so, subject to the following
    +conditions:
    +
    +The above copyright notice and this permission notice
    +shall be included in all copies or substantial portions
    +of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
    +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
    +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
    +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
    +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
    +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
    +DEALINGS IN THE SOFTWARE.
    +
    +Copyright (c) 2018 David Tolnay
    +
    +Permission is hereby granted, free of charge, to any
    +person obtaining a copy of this software and associated
    +documentation files (the "Software"), to deal in the
    +Software without restriction, including without
    +limitation the rights to use, copy, modify, merge,
    +publish, distribute, sublicense, and/or sell copies of
    +the Software, and to permit persons to whom the Software
    +is furnished to do so, subject to the following
    +conditions:
    +
    +The above copyright notice and this permission notice
    +shall be included in all copies or substantial portions
    +of the Software.
    +
     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
     ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
     TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    @@ -15113,8 +15197,6 @@ 

    Used by:

    MIT License

    Used by:

      -
    • async-stream
    • -
    • async-stream-impl
    • base64-simd
    • convert_case
    • crunchy
    • @@ -15986,6 +16068,8 @@

      Used by:

      MIT License

      Used by:

        +
      • aho-corasick
      • +
      • byteorder
      • globset
      • memchr
      • regex-automata
      • diff --git a/scripts/install.sh b/scripts/install.sh index 64afab308c..7d7a2e70f1 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.44.0" +PACKAGE_VERSION="v1.45.0-alpha.0" download_binary() { downloader --check From f37813c25c02fb5abb00e9f4518a0065f8a0d209 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Tue, 16 Apr 2024 17:57:25 +0200 Subject: [PATCH 22/46] fix(configuration): add a migration for experimental_graphql_validation_mode (#4964) Fixup of #4551 Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .../src/configuration/migrations/0024-graphql_validation.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 apollo-router/src/configuration/migrations/0024-graphql_validation.yaml diff --git a/apollo-router/src/configuration/migrations/0024-graphql_validation.yaml b/apollo-router/src/configuration/migrations/0024-graphql_validation.yaml new file mode 100644 index 0000000000..f830067a53 --- /dev/null +++ b/apollo-router/src/configuration/migrations/0024-graphql_validation.yaml @@ -0,0 +1,4 @@ +description: experimental_graphql_validation_mode is no longer supported +actions: + - type: delete + path: experimental_graphql_validation_mode From c02dee4a968841418c5ed33ca9d01b1b4a0ae182 Mon Sep 17 00:00:00 2001 From: Nick Marsh Date: Wed, 17 Apr 2024 16:00:56 +1000 Subject: [PATCH 23/46] Signature and reference generation tweaks (#4951) --- .changesets/fix_njm_p_681_pr_tweaks.md | 5 + .../src/apollo_studio_interop/mod.rs | 1431 +---------------- .../src/apollo_studio_interop/tests.rs | 1407 ++++++++++++++++ licenses.html | 124 +- 4 files changed, 1535 insertions(+), 1432 deletions(-) create mode 100644 .changesets/fix_njm_p_681_pr_tweaks.md create mode 100644 apollo-router/src/apollo_studio_interop/tests.rs diff --git a/.changesets/fix_njm_p_681_pr_tweaks.md b/.changesets/fix_njm_p_681_pr_tweaks.md new file mode 100644 index 0000000000..841ce5a42f --- /dev/null +++ b/.changesets/fix_njm_p_681_pr_tweaks.md @@ -0,0 +1,5 @@ +### Performance tweaks of Apollo usage report field generation ([PR 4951](https://github.com/apollographql/router/pull/4951)) + +Improves performance of the Apollo usage report signature/stats key and referenced field generation. + +By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4951 \ No newline at end of file diff --git a/apollo-router/src/apollo_studio_interop/mod.rs b/apollo-router/src/apollo_studio_interop/mod.rs index a07842a7ab..b1afad3f34 100644 --- a/apollo-router/src/apollo_studio_interop/mod.rs +++ b/apollo-router/src/apollo_studio_interop/mod.rs @@ -213,7 +213,7 @@ impl UsageReportingGenerator<'_> { OperationType::Mutation => "Mutation", OperationType::Subscription => "Subscription", }; - self.extract_fields(&operation_type.into(), &operation.selection_set); + self.extract_fields(operation_type, &operation.selection_set); self.fields_by_type .iter() @@ -237,19 +237,19 @@ impl UsageReportingGenerator<'_> { } } - fn extract_fields(&mut self, parent_type: &String, selection_set: &SelectionSet) { + fn extract_fields(&mut self, parent_type: &str, selection_set: &SelectionSet) { if !self.fields_by_interface.contains_key(parent_type) { - let field_schema_type = self.schema.types.get(parent_type.as_str()); + let field_schema_type = self.schema.types.get(parent_type); let is_interface = field_schema_type.is_some_and(|t| t.is_interface()); self.fields_by_interface - .insert(parent_type.clone(), is_interface); + .insert(parent_type.into(), is_interface); } for selection in &selection_set.selections { match selection { Selection::Field(field) => { self.fields_by_type - .entry(parent_type.clone()) + .entry(parent_type.into()) .or_default() .insert(field.name.to_string()); @@ -259,7 +259,7 @@ impl UsageReportingGenerator<'_> { Selection::InlineFragment(fragment) => { let frag_type_name = match fragment.type_condition.clone() { Some(fragment_type) => fragment_type.to_string(), - None => parent_type.clone(), + None => parent_type.into(), }; self.extract_fields(&frag_type_name, &fragment.selection_set); } @@ -525,1421 +525,4 @@ fn format_value(value: &Value, f: &mut fmt::Formatter) -> fmt::Result { } #[cfg(test)] -mod tests { - use apollo_compiler::Schema; - use router_bridge::planner::PlanOptions; - use router_bridge::planner::Planner; - use router_bridge::planner::QueryPlannerConfig; - use test_log::test; - - use super::*; - - // Generate the signature and referenced fields using router-bridge to confirm that the expected value we used is correct. - // We can remove this when we no longer use the bridge but should keep the rust implementation verifications. - async fn assert_bridge_results( - schema_str: &str, - query_str: &str, - expected_sig: &str, - expected_refs: &HashMap, - ) { - let planner = Planner::::new( - schema_str.to_string(), - QueryPlannerConfig::default(), - ) - .await - .unwrap(); - let plan = planner - .plan(query_str.to_string(), None, PlanOptions::default()) - .await - .unwrap(); - let bridge_result = ComparableUsageReporting { - result: plan.usage_reporting, - }; - let expected_result = UsageReporting { - stats_report_key: expected_sig.to_string(), - referenced_fields_by_type: expected_refs.clone(), - }; - assert!(matches!( - bridge_result.compare(&expected_result), - UsageReportingComparisonResult::Equal - )); - } - - fn assert_expected_results( - actual: &ComparableUsageReporting, - expected_sig: &str, - expected_refs: &HashMap, - ) { - let expected_result = UsageReporting { - stats_report_key: expected_sig.to_string(), - referenced_fields_by_type: expected_refs.clone(), - }; - assert!(matches!( - actual.compare(&expected_result), - UsageReportingComparisonResult::Equal - )); - } - - #[test(tokio::test)] - async fn test_complex_query() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query UnusedQuery { - noInputQuery { - enumResponse - } - } - - fragment UnusedFragment on EverythingResponse { - enumResponse - } - - fragment Fragment2 on EverythingResponse { - basicTypes { - nullableFloat - } - } - - query TransformedQuery { - - - scalarInputQuery(idInput: "a1", listInput: [], boolInput: true, intInput: 1, stringInput: "x", floatInput: 1.2) @skip(if: false) @include(if: true) { - ...Fragment2, - - - objectTypeWithInputField(boolInput: true, secondInput: false) { - stringField - __typename - intField - } - - enumResponse - interfaceResponse { - sharedField - ... on InterfaceImplementation2 { - implementation2Field - } - ... on InterfaceImplementation1 { - implementation1Field - } - } - ...Fragment1, - } - } - - fragment Fragment1 on EverythingResponse { - basicTypes { - nonNullFloat - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = - generate_usage_reporting(&doc, &doc, &Some("TransformedQuery".into()), &schema); - - let expected_sig = "# TransformedQuery\nfragment Fragment1 on EverythingResponse{basicTypes{nonNullFloat}}fragment Fragment2 on EverythingResponse{basicTypes{nullableFloat}}query TransformedQuery{scalarInputQuery(boolInput:true floatInput:0 idInput:\"\"intInput:0 listInput:[]stringInput:\"\")@skip(if:false)@include(if:true){enumResponse interfaceResponse{sharedField...on InterfaceImplementation2{implementation2Field}...on InterfaceImplementation1{implementation1Field}}objectTypeWithInputField(boolInput:true,secondInput:false){__typename intField stringField}...Fragment1...Fragment2}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["scalarInputQuery".into()], - is_interface: false, - }, - ), - ( - "BasicTypesResponse".into(), - ReferencedFieldsForType { - field_names: vec!["nullableFloat".into(), "nonNullFloat".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec![ - "basicTypes".into(), - "objectTypeWithInputField".into(), - "enumResponse".into(), - "interfaceResponse".into(), - ], - is_interface: false, - }, - ), - ( - "AnInterface".into(), - ReferencedFieldsForType { - field_names: vec!["sharedField".into()], - is_interface: true, - }, - ), - ( - "ObjectTypeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["stringField".into(), "__typename".into(), "intField".into()], - is_interface: false, - }, - ), - ( - "InterfaceImplementation1".into(), - ReferencedFieldsForType { - field_names: vec!["implementation1Field".into()], - is_interface: false, - }, - ), - ( - "InterfaceImplementation2".into(), - ReferencedFieldsForType { - field_names: vec!["implementation2Field".into()], - is_interface: false, - }, - ), - ]); - assert_expected_results(&generated, expected_sig, &expected_refs); - - // the router-bridge planner will throw errors on unused fragments/queries so we remove them here - let sanitised_query_str = r#"fragment Fragment2 on EverythingResponse { - basicTypes { - nullableFloat - } - } - - query TransformedQuery { - - - scalarInputQuery(idInput: "a1", listInput: [], boolInput: true, intInput: 1, stringInput: "x", floatInput: 1.2) @skip(if: false) @include(if: true) { - ...Fragment2, - - - objectTypeWithInputField(boolInput: true, secondInput: false) { - stringField - __typename - intField - } - - enumResponse - interfaceResponse { - sharedField - ... on InterfaceImplementation2 { - implementation2Field - } - ... on InterfaceImplementation1 { - implementation1Field - } - } - ...Fragment1, - } - } - - fragment Fragment1 on EverythingResponse { - basicTypes { - nonNullFloat - } - }"#; - - assert_bridge_results( - schema_str, - sanitised_query_str, - expected_sig, - &expected_refs, - ) - .await; - } - - #[test(tokio::test)] - async fn test_complex_references() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query Query($secondInput: Boolean!) { - scalarResponseQuery - noInputQuery { - basicTypes { - nonNullId - nonNullInt - } - enumResponse - interfaceImplementationResponse { - sharedField - implementation2Field - } - interfaceResponse { - ... on InterfaceImplementation1 { - implementation1Field - sharedField - } - ... on InterfaceImplementation2 { - implementation2Field - sharedField - } - } - listOfUnions { - ... on UnionType1 { - nullableString - } - } - objectTypeWithInputField(secondInput: $secondInput) { - intField - } - } - basicInputTypeQuery(input: { someFloat: 1 }) { - unionResponse { - ... on UnionType1 { - nullableString - } - } - unionType2Response { - unionType2Field - } - listOfObjects { - stringField - } - } - }"#; - - let schema: Valid = - Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = generate_usage_reporting(&doc, &doc, &Some("Query".into()), &schema); - - let expected_sig = "# Query\nquery Query($secondInput:Boolean!){basicInputTypeQuery(input:{}){listOfObjects{stringField}unionResponse{...on UnionType1{nullableString}}unionType2Response{unionType2Field}}noInputQuery{basicTypes{nonNullId nonNullInt}enumResponse interfaceImplementationResponse{implementation2Field sharedField}interfaceResponse{...on InterfaceImplementation1{implementation1Field sharedField}...on InterfaceImplementation2{implementation2Field sharedField}}listOfUnions{...on UnionType1{nullableString}}objectTypeWithInputField(secondInput:$secondInput){intField}}scalarResponseQuery}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec![ - "scalarResponseQuery".into(), - "noInputQuery".into(), - "basicInputTypeQuery".into(), - ], - is_interface: false, - }, - ), - ( - "BasicTypesResponse".into(), - ReferencedFieldsForType { - field_names: vec!["nonNullId".into(), "nonNullInt".into()], - is_interface: false, - }, - ), - ( - "ObjectTypeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["intField".into(), "stringField".into()], - is_interface: false, - }, - ), - ( - "UnionType2".into(), - ReferencedFieldsForType { - field_names: vec!["unionType2Field".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec![ - "basicTypes".into(), - "enumResponse".into(), - "interfaceImplementationResponse".into(), - "interfaceResponse".into(), - "listOfUnions".into(), - "objectTypeWithInputField".into(), - "unionResponse".into(), - "unionType2Response".into(), - "listOfObjects".into(), - ], - is_interface: false, - }, - ), - ( - "InterfaceImplementation1".into(), - ReferencedFieldsForType { - field_names: vec!["implementation1Field".into(), "sharedField".into()], - is_interface: false, - }, - ), - ( - "UnionType1".into(), - ReferencedFieldsForType { - field_names: vec!["nullableString".into()], - is_interface: false, - }, - ), - ( - "InterfaceImplementation2".into(), - ReferencedFieldsForType { - field_names: vec!["sharedField".into(), "implementation2Field".into()], - is_interface: false, - }, - ), - ]); - assert_expected_results(&generated, expected_sig, &expected_refs); - - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_basic_whitespace() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query MyQuery { - noInputQuery { - id - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = generate_usage_reporting(&doc, &doc, &Some("MyQuery".into()), &schema); - - let expected_sig = "# MyQuery\nquery MyQuery{noInputQuery{id}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["noInputQuery".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["id".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_anonymous_query() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query { - noInputQuery { - id - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = generate_usage_reporting(&doc, &doc, &None, &schema); - - let expected_sig = "# -\n{noInputQuery{id}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["noInputQuery".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["id".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_anonymous_mutation() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"mutation { - noInputMutation { - id - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = generate_usage_reporting(&doc, &doc, &None, &schema); - - let expected_sig = "# -\nmutation{noInputMutation{id}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Mutation".into(), - ReferencedFieldsForType { - field_names: vec!["noInputMutation".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["id".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_anonymous_subscription() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str: &str = r#"subscription { - noInputSubscription { - id - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = generate_usage_reporting(&doc, &doc, &None, &schema); - - let expected_sig = "# -\nsubscription{noInputSubscription{id}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Subscription".into(), - ReferencedFieldsForType { - field_names: vec!["noInputSubscription".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["id".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_ordered_fields_and_variables() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query VariableScalarInputQuery($idInput: ID!, $boolInput: Boolean!, $floatInput: Float!, $intInput: Int!, $listInput: [String!]!, $stringInput: String!, $nullableStringInput: String) { - sortQuery( - idInput: $idInput - boolInput: $boolInput - floatInput: $floatInput - INTInput: $intInput - listInput: $listInput - stringInput: $stringInput - nullableStringInput: $nullableStringInput - ) { - zzz - CCC - nullableId - aaa - id - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = generate_usage_reporting( - &doc, - &doc, - &Some("VariableScalarInputQuery".into()), - &schema, - ); - - let expected_sig = "# VariableScalarInputQuery\nquery VariableScalarInputQuery($boolInput:Boolean!,$floatInput:Float!,$idInput:ID!,$intInput:Int!,$listInput:[String!]!,$nullableStringInput:String,$stringInput:String!){sortQuery(INTInput:$intInput boolInput:$boolInput floatInput:$floatInput idInput:$idInput listInput:$listInput nullableStringInput:$nullableStringInput stringInput:$stringInput){CCC aaa id nullableId zzz}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["sortQuery".into()], - is_interface: false, - }, - ), - ( - "SortResponse".into(), - ReferencedFieldsForType { - field_names: vec![ - "aaa".into(), - "CCC".into(), - "id".into(), - "nullableId".into(), - "zzz".into(), - ], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_fragments() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query FragmentQuery { - noInputQuery { - listOfBools - interfaceResponse { - sharedField - ... on InterfaceImplementation2 { - implementation2Field - } - ...bbbInterfaceFragment - ...aaaInterfaceFragment - ... { - ... on InterfaceImplementation1 { - implementation1Field - } - } - ... on InterfaceImplementation1 { - implementation1Field - } - } - unionResponse { - ... on UnionType2 { - unionType2Field - } - ... on UnionType1 { - unionType1Field - } - } - ...zzzFragment - ...aaaFragment - ...ZZZFragment - } - } - - fragment zzzFragment on EverythingResponse { - listOfInterfaces { - sharedField - } - } - - fragment ZZZFragment on EverythingResponse { - listOfInterfaces { - sharedField - } - } - - fragment aaaFragment on EverythingResponse { - listOfInterfaces { - sharedField - } - } - - fragment UnusedFragment on InterfaceImplementation2 { - sharedField - implementation2Field - } - - fragment bbbInterfaceFragment on InterfaceImplementation2 { - sharedField - implementation2Field - } - - fragment aaaInterfaceFragment on InterfaceImplementation1 { - sharedField - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = - generate_usage_reporting(&doc, &doc, &Some("FragmentQuery".into()), &schema); - - let expected_sig = "# FragmentQuery\nfragment ZZZFragment on EverythingResponse{listOfInterfaces{sharedField}}fragment aaaFragment on EverythingResponse{listOfInterfaces{sharedField}}fragment aaaInterfaceFragment on InterfaceImplementation1{sharedField}fragment bbbInterfaceFragment on InterfaceImplementation2{implementation2Field sharedField}fragment zzzFragment on EverythingResponse{listOfInterfaces{sharedField}}query FragmentQuery{noInputQuery{interfaceResponse{sharedField...aaaInterfaceFragment...bbbInterfaceFragment...on InterfaceImplementation2{implementation2Field}...{...on InterfaceImplementation1{implementation1Field}}...on InterfaceImplementation1{implementation1Field}}listOfBools unionResponse{...on UnionType2{unionType2Field}...on UnionType1{unionType1Field}}...ZZZFragment...aaaFragment...zzzFragment}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "UnionType1".into(), - ReferencedFieldsForType { - field_names: vec!["unionType1Field".into()], - is_interface: false, - }, - ), - ( - "UnionType2".into(), - ReferencedFieldsForType { - field_names: vec!["unionType2Field".into()], - is_interface: false, - }, - ), - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["noInputQuery".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec![ - "listOfInterfaces".into(), - "listOfBools".into(), - "interfaceResponse".into(), - "unionResponse".into(), - ], - is_interface: false, - }, - ), - ( - "InterfaceImplementation1".into(), - ReferencedFieldsForType { - field_names: vec!["sharedField".into(), "implementation1Field".into()], - is_interface: false, - }, - ), - ( - "InterfaceImplementation1".into(), - ReferencedFieldsForType { - field_names: vec!["implementation1Field".into(), "sharedField".into()], - is_interface: false, - }, - ), - ( - "AnInterface".into(), - ReferencedFieldsForType { - field_names: vec!["sharedField".into()], - is_interface: true, - }, - ), - ( - "InterfaceImplementation2".into(), - ReferencedFieldsForType { - field_names: vec!["sharedField".into(), "implementation2Field".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - - // the router-bridge planner will throw errors on unused fragments/queries so we remove them here - let sanitised_query_str = r#"query FragmentQuery { - noInputQuery { - listOfBools - interfaceResponse { - sharedField - ... on InterfaceImplementation2 { - implementation2Field - } - ...bbbInterfaceFragment - ...aaaInterfaceFragment - ... { - ... on InterfaceImplementation1 { - implementation1Field - } - } - ... on InterfaceImplementation1 { - implementation1Field - } - } - unionResponse { - ... on UnionType2 { - unionType2Field - } - ... on UnionType1 { - unionType1Field - } - } - ...zzzFragment - ...aaaFragment - ...ZZZFragment - } - } - - fragment zzzFragment on EverythingResponse { - listOfInterfaces { - sharedField - } - } - - fragment ZZZFragment on EverythingResponse { - listOfInterfaces { - sharedField - } - } - - fragment aaaFragment on EverythingResponse { - listOfInterfaces { - sharedField - } - } - - fragment bbbInterfaceFragment on InterfaceImplementation2 { - sharedField - implementation2Field - } - - fragment aaaInterfaceFragment on InterfaceImplementation1 { - sharedField - }"#; - assert_bridge_results( - schema_str, - sanitised_query_str, - expected_sig, - &expected_refs, - ) - .await; - } - - #[test(tokio::test)] - async fn test_directives() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"fragment Fragment1 on InterfaceImplementation1 { - sharedField - implementation1Field - } - - fragment Fragment2 on InterfaceImplementation2 @withArgs(arg2: "" arg1: "test" arg3: true arg5: [1,2] arg4: 2) @noArgs { - sharedField - implementation2Field - } - - query DirectiveQuery @withArgs(arg2: "" arg1: "test") @noArgs { - noInputQuery { - enumResponse @withArgs(arg3: false arg5: [1,2] arg4: 2) @noArgs - unionResponse { - ... on UnionType1 @withArgs(arg2: "" arg1: "test") @noArgs { - unionType1Field - } - } - interfaceResponse { - ... Fragment1 @withArgs(arg1: "test") @noArgs - ... Fragment2 - } - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = - generate_usage_reporting(&doc, &doc, &Some("DirectiveQuery".into()), &schema); - - let expected_sig = "# DirectiveQuery\nfragment Fragment1 on InterfaceImplementation1{implementation1Field sharedField}fragment Fragment2 on InterfaceImplementation2@noArgs@withArgs(arg1:\"\",arg2:\"\",arg3:true,arg4:0,arg5:[]){implementation2Field sharedField}query DirectiveQuery@withArgs(arg1:\"\",arg2:\"\")@noArgs{noInputQuery{enumResponse@withArgs(arg3:false,arg4:0,arg5:[])@noArgs interfaceResponse{...Fragment1@noArgs@withArgs(arg1:\"\")...Fragment2}unionResponse{...on UnionType1@noArgs@withArgs(arg1:\"\",arg2:\"\"){unionType1Field}}}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "UnionType1".into(), - ReferencedFieldsForType { - field_names: vec!["unionType1Field".into()], - is_interface: false, - }, - ), - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["noInputQuery".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec![ - "enumResponse".into(), - "interfaceResponse".into(), - "unionResponse".into(), - ], - is_interface: false, - }, - ), - ( - "InterfaceImplementation1".into(), - ReferencedFieldsForType { - field_names: vec!["sharedField".into(), "implementation1Field".into()], - is_interface: false, - }, - ), - ( - "InterfaceImplementation1".into(), - ReferencedFieldsForType { - field_names: vec!["implementation1Field".into(), "sharedField".into()], - is_interface: false, - }, - ), - ( - "InterfaceImplementation2".into(), - ReferencedFieldsForType { - field_names: vec!["sharedField".into(), "implementation2Field".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_aliases() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query AliasQuery { - xxAlias: enumInputQuery(enumInput: SOME_VALUE_1) { - aliased: enumResponse - } - aaAlias: enumInputQuery(enumInput: SOME_VALUE_2) { - aliasedAgain: enumResponse - } - ZZAlias: enumInputQuery(enumInput: SOME_VALUE_3) { - enumResponse - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = generate_usage_reporting(&doc, &doc, &Some("AliasQuery".into()), &schema); - - let expected_sig = "# AliasQuery\nquery AliasQuery{enumInputQuery(enumInput:SOME_VALUE_1){enumResponse}enumInputQuery(enumInput:SOME_VALUE_2){enumResponse}enumInputQuery(enumInput:SOME_VALUE_3){enumResponse}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["enumResponse".into()], - is_interface: false, - }, - ), - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["enumInputQuery".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_inline_values() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query InlineInputTypeQuery { - inputTypeQuery(input: { - inputString: "foo", - inputInt: 42, - inputBoolean: null, - nestedType: { someFloat: 4.2 }, - enumInput: SOME_VALUE_1, - nestedTypeList: [ { someFloat: 4.2, someNullableFloat: null } ], - listInput: [1, 2, 3] - }) { - enumResponse - } - }"#; - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = - generate_usage_reporting(&doc, &doc, &Some("InlineInputTypeQuery".into()), &schema); - - let expected_sig = "# InlineInputTypeQuery\nquery InlineInputTypeQuery{inputTypeQuery(input:{}){enumResponse}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["enumResponse".into()], - is_interface: false, - }, - ), - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["inputTypeQuery".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_root_type_fragment() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query SomeQuery { - ... on Query { - ... { - basicResponseQuery { - id - } - } - } - noInputQuery { - enumResponse - } - }"#; - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = generate_usage_reporting(&doc, &doc, &None, &schema); - - let expected_sig = "# SomeQuery\nquery SomeQuery{noInputQuery{enumResponse}...on Query{...{basicResponseQuery{id}}}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "BasicResponse".into(), - ReferencedFieldsForType { - field_names: vec!["id".into()], - is_interface: false, - }, - ), - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into(), "noInputQuery".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["enumResponse".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_directive_arg_spacing() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query { - basicResponseQuery { - id @withArgs(arg1: "") - id - } - }"#; - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = generate_usage_reporting(&doc, &doc, &None, &schema); - - let expected_sig = "# -\n{basicResponseQuery{id@withArgs(arg1:\"\")id}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "BasicResponse".into(), - ReferencedFieldsForType { - field_names: vec!["id".into()], - is_interface: false, - }, - ), - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_operation_with_single_variable() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query QueryWithVar($input_enum: SomeEnum) { - enumInputQuery(enumInput: $input_enum) { - listOfBools - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = generate_usage_reporting(&doc, &doc, &Some("QueryWithVar".into()), &schema); - - let expected_sig = "# QueryWithVar\nquery QueryWithVar($input_enum:SomeEnum){enumInputQuery(enumInput:$input_enum){listOfBools}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["enumInputQuery".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["listOfBools".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_operation_with_multiple_variables() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query QueryWithVars($stringInput: String!, $floatInput: Float!, $boolInput: Boolean!) { - scalarInputQuery(listInput: ["x"], stringInput: $stringInput, intInput: 6, floatInput: $floatInput, boolInput: $boolInput, idInput: "y") { - enumResponse - } - inputTypeQuery(input: { inputInt: 2, inputString: "z", listInput: [], nestedType: { someFloat: 5 }}) { - enumResponse - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = - generate_usage_reporting(&doc, &doc, &Some("QueryWithVars".into()), &schema); - - let expected_sig = "# QueryWithVars\nquery QueryWithVars($boolInput:Boolean!,$floatInput:Float!,$stringInput:String!){inputTypeQuery(input:{}){enumResponse}scalarInputQuery(boolInput:$boolInput floatInput:$floatInput idInput:\"\"intInput:0 listInput:[]stringInput:$stringInput){enumResponse}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["scalarInputQuery".into(), "inputTypeQuery".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["enumResponse".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_field_arg_comma_or_space() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query QueryArgLength($StringInputWithAVeryyyLongNameSoLineLengthIs80: String!, $inputType: AnotherInputType, $enumInputWithAVryLongNameSoLineLengthIsOver80: SomeEnum, $enumInputType: EnumInputType) { - enumInputQuery (enumInput:$enumInputWithAVryLongNameSoLineLengthIsOver80,inputType:$enumInputType) { - enumResponse - } - defaultArgQuery(stringInput:$StringInputWithAVeryyyLongNameSoLineLengthIs80,inputType:$inputType) { - id - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = - generate_usage_reporting(&doc, &doc, &Some("QueryArgLength".into()), &schema); - - // enumInputQuery has a variable line length of 81, so it should be separated by spaces (which are converted from newlines - // in the original implementation). - // enumInputQuery has a variable line length of 80, so it should be separated by commas. - let expected_sig = "# QueryArgLength\nquery QueryArgLength($StringInputWithAVeryyyLongNameSoLineLengthIs80:String!,$enumInputType:EnumInputType,$enumInputWithAVryLongNameSoLineLengthIsOver80:SomeEnum,$inputType:AnotherInputType){defaultArgQuery(inputType:$inputType stringInput:$StringInputWithAVeryyyLongNameSoLineLengthIs80){id}enumInputQuery(enumInput:$enumInputWithAVryLongNameSoLineLengthIsOver80 inputType:$enumInputType){enumResponse}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["enumInputQuery".into(), "defaultArgQuery".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["enumResponse".into()], - is_interface: false, - }, - ), - ( - "BasicResponse".into(), - ReferencedFieldsForType { - field_names: vec!["id".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_operation_arg_always_commas() { - let schema_str = include_str!("testdata/schema_interop.graphql"); - - let query_str = r#"query QueryArgLength($enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80: SomeEnum, $enumInputType: EnumInputType) { - enumInputQuery (enumInput:$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80,inputType:$enumInputType) { - enumResponse - } - }"#; - - let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); - - let generated = - generate_usage_reporting(&doc, &doc, &Some("QueryArgLength".into()), &schema); - - // operation variables shouldn't ever be converted to spaces, since the line length check is only on field variables - // in the original implementation - let expected_sig = "# QueryArgLength\nquery QueryArgLength($enumInputType:EnumInputType,$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80:SomeEnum){enumInputQuery(enumInput:$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80 inputType:$enumInputType){enumResponse}}"; - let expected_refs: HashMap = HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["enumInputQuery".into()], - is_interface: false, - }, - ), - ( - "EverythingResponse".into(), - ReferencedFieldsForType { - field_names: vec!["enumResponse".into()], - is_interface: false, - }, - ), - ]); - - assert_expected_results(&generated, expected_sig, &expected_refs); - assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; - } - - #[test(tokio::test)] - async fn test_compare() { - let source = ComparableUsageReporting { - result: UsageReporting { - stats_report_key: "# -\n{basicResponseQuery{field1 field2}}".into(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field1".into(), "field2".into()], - is_interface: false, - }, - ), - ]), - }, - }; - - // Same signature and ref fields should match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), - }), - UsageReportingComparisonResult::Equal - )); - - // Reordered signature should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: "# -\n{basicResponseQuery{field2 field1}}".into(), - referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), - }), - UsageReportingComparisonResult::StatsReportKeyNotEqual - )); - - // Different signature should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: - "# NamedQuery\nquery NamedQuery {basicResponseQuery{field1 field2}}".into(), - referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), - }), - UsageReportingComparisonResult::StatsReportKeyNotEqual - )); - - // Reordered parent type should match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field1".into(), "field2".into()], - is_interface: false, - }, - ), - ]) - }), - UsageReportingComparisonResult::Equal - )); - - // Reordered fields should match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field2".into(), "field1".into()], - is_interface: false, - }, - ), - ]) - }), - UsageReportingComparisonResult::Equal - )); - - // Added parent type should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field1".into(), "field2".into()], - is_interface: false, - }, - ), - ( - "OtherType".into(), - ReferencedFieldsForType { - field_names: vec!["otherField".into()], - is_interface: false, - }, - ), - ]) - }), - UsageReportingComparisonResult::ReferencedFieldsNotEqual - )); - - // Added field should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field1".into(), "field2".into(), "field3".into()], - is_interface: false, - }, - ), - ]) - }), - UsageReportingComparisonResult::ReferencedFieldsNotEqual - )); - - // Missing parent type should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ),]) - }), - UsageReportingComparisonResult::ReferencedFieldsNotEqual - )); - - // Missing field should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: source.result.stats_report_key.clone(), - referenced_fields_by_type: HashMap::from([ - ( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ), - ( - "SomeResponse".into(), - ReferencedFieldsForType { - field_names: vec!["field1".into()], - is_interface: false, - }, - ), - ]) - }), - UsageReportingComparisonResult::ReferencedFieldsNotEqual - )); - - // Both different should not match - assert!(matches!( - source.compare(&UsageReporting { - stats_report_key: "# -\n{basicResponseQuery{field2 field1}}".into(), - referenced_fields_by_type: HashMap::from([( - "Query".into(), - ReferencedFieldsForType { - field_names: vec!["basicResponseQuery".into()], - is_interface: false, - }, - ),]) - }), - UsageReportingComparisonResult::BothNotEqual - )); - } -} +mod tests; diff --git a/apollo-router/src/apollo_studio_interop/tests.rs b/apollo-router/src/apollo_studio_interop/tests.rs new file mode 100644 index 0000000000..c3ae55ce6c --- /dev/null +++ b/apollo-router/src/apollo_studio_interop/tests.rs @@ -0,0 +1,1407 @@ +use apollo_compiler::Schema; +use router_bridge::planner::PlanOptions; +use router_bridge::planner::Planner; +use router_bridge::planner::QueryPlannerConfig; +use test_log::test; + +use super::*; + +// Generate the signature and referenced fields using router-bridge to confirm that the expected value we used is correct. +// We can remove this when we no longer use the bridge but should keep the rust implementation verifications. +async fn assert_bridge_results( + schema_str: &str, + query_str: &str, + expected_sig: &str, + expected_refs: &HashMap, +) { + let planner = + Planner::::new(schema_str.to_string(), QueryPlannerConfig::default()) + .await + .unwrap(); + let plan = planner + .plan(query_str.to_string(), None, PlanOptions::default()) + .await + .unwrap(); + let bridge_result = ComparableUsageReporting { + result: plan.usage_reporting, + }; + let expected_result = UsageReporting { + stats_report_key: expected_sig.to_string(), + referenced_fields_by_type: expected_refs.clone(), + }; + assert!(matches!( + bridge_result.compare(&expected_result), + UsageReportingComparisonResult::Equal + )); +} + +fn assert_expected_results( + actual: &ComparableUsageReporting, + expected_sig: &str, + expected_refs: &HashMap, +) { + let expected_result = UsageReporting { + stats_report_key: expected_sig.to_string(), + referenced_fields_by_type: expected_refs.clone(), + }; + assert!(matches!( + actual.compare(&expected_result), + UsageReportingComparisonResult::Equal + )); +} + +#[test(tokio::test)] +async fn test_complex_query() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query UnusedQuery { + noInputQuery { + enumResponse + } + } + + fragment UnusedFragment on EverythingResponse { + enumResponse + } + + fragment Fragment2 on EverythingResponse { + basicTypes { + nullableFloat + } + } + + query TransformedQuery { + + + scalarInputQuery(idInput: "a1", listInput: [], boolInput: true, intInput: 1, stringInput: "x", floatInput: 1.2) @skip(if: false) @include(if: true) { + ...Fragment2, + + + objectTypeWithInputField(boolInput: true, secondInput: false) { + stringField + __typename + intField + } + + enumResponse + interfaceResponse { + sharedField + ... on InterfaceImplementation2 { + implementation2Field + } + ... on InterfaceImplementation1 { + implementation1Field + } + } + ...Fragment1, + } + } + + fragment Fragment1 on EverythingResponse { + basicTypes { + nonNullFloat + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("TransformedQuery".into()), &schema); + + let expected_sig = "# TransformedQuery\nfragment Fragment1 on EverythingResponse{basicTypes{nonNullFloat}}fragment Fragment2 on EverythingResponse{basicTypes{nullableFloat}}query TransformedQuery{scalarInputQuery(boolInput:true floatInput:0 idInput:\"\"intInput:0 listInput:[]stringInput:\"\")@skip(if:false)@include(if:true){enumResponse interfaceResponse{sharedField...on InterfaceImplementation2{implementation2Field}...on InterfaceImplementation1{implementation1Field}}objectTypeWithInputField(boolInput:true,secondInput:false){__typename intField stringField}...Fragment1...Fragment2}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["scalarInputQuery".into()], + is_interface: false, + }, + ), + ( + "BasicTypesResponse".into(), + ReferencedFieldsForType { + field_names: vec!["nullableFloat".into(), "nonNullFloat".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec![ + "basicTypes".into(), + "objectTypeWithInputField".into(), + "enumResponse".into(), + "interfaceResponse".into(), + ], + is_interface: false, + }, + ), + ( + "AnInterface".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into()], + is_interface: true, + }, + ), + ( + "ObjectTypeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["stringField".into(), "__typename".into(), "intField".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["implementation1Field".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation2".into(), + ReferencedFieldsForType { + field_names: vec!["implementation2Field".into()], + is_interface: false, + }, + ), + ]); + assert_expected_results(&generated, expected_sig, &expected_refs); + + // the router-bridge planner will throw errors on unused fragments/queries so we remove them here + let sanitised_query_str = r#"fragment Fragment2 on EverythingResponse { + basicTypes { + nullableFloat + } + } + + query TransformedQuery { + + + scalarInputQuery(idInput: "a1", listInput: [], boolInput: true, intInput: 1, stringInput: "x", floatInput: 1.2) @skip(if: false) @include(if: true) { + ...Fragment2, + + + objectTypeWithInputField(boolInput: true, secondInput: false) { + stringField + __typename + intField + } + + enumResponse + interfaceResponse { + sharedField + ... on InterfaceImplementation2 { + implementation2Field + } + ... on InterfaceImplementation1 { + implementation1Field + } + } + ...Fragment1, + } + } + + fragment Fragment1 on EverythingResponse { + basicTypes { + nonNullFloat + } + }"#; + + assert_bridge_results( + schema_str, + sanitised_query_str, + expected_sig, + &expected_refs, + ) + .await; +} + +#[test(tokio::test)] +async fn test_complex_references() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query Query($secondInput: Boolean!) { + scalarResponseQuery + noInputQuery { + basicTypes { + nonNullId + nonNullInt + } + enumResponse + interfaceImplementationResponse { + sharedField + implementation2Field + } + interfaceResponse { + ... on InterfaceImplementation1 { + implementation1Field + sharedField + } + ... on InterfaceImplementation2 { + implementation2Field + sharedField + } + } + listOfUnions { + ... on UnionType1 { + nullableString + } + } + objectTypeWithInputField(secondInput: $secondInput) { + intField + } + } + basicInputTypeQuery(input: { someFloat: 1 }) { + unionResponse { + ... on UnionType1 { + nullableString + } + } + unionType2Response { + unionType2Field + } + listOfObjects { + stringField + } + } + }"#; + + let schema: Valid = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("Query".into()), &schema); + + let expected_sig = "# Query\nquery Query($secondInput:Boolean!){basicInputTypeQuery(input:{}){listOfObjects{stringField}unionResponse{...on UnionType1{nullableString}}unionType2Response{unionType2Field}}noInputQuery{basicTypes{nonNullId nonNullInt}enumResponse interfaceImplementationResponse{implementation2Field sharedField}interfaceResponse{...on InterfaceImplementation1{implementation1Field sharedField}...on InterfaceImplementation2{implementation2Field sharedField}}listOfUnions{...on UnionType1{nullableString}}objectTypeWithInputField(secondInput:$secondInput){intField}}scalarResponseQuery}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec![ + "scalarResponseQuery".into(), + "noInputQuery".into(), + "basicInputTypeQuery".into(), + ], + is_interface: false, + }, + ), + ( + "BasicTypesResponse".into(), + ReferencedFieldsForType { + field_names: vec!["nonNullId".into(), "nonNullInt".into()], + is_interface: false, + }, + ), + ( + "ObjectTypeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["intField".into(), "stringField".into()], + is_interface: false, + }, + ), + ( + "UnionType2".into(), + ReferencedFieldsForType { + field_names: vec!["unionType2Field".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec![ + "basicTypes".into(), + "enumResponse".into(), + "interfaceImplementationResponse".into(), + "interfaceResponse".into(), + "listOfUnions".into(), + "objectTypeWithInputField".into(), + "unionResponse".into(), + "unionType2Response".into(), + "listOfObjects".into(), + ], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["implementation1Field".into(), "sharedField".into()], + is_interface: false, + }, + ), + ( + "UnionType1".into(), + ReferencedFieldsForType { + field_names: vec!["nullableString".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation2".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into(), "implementation2Field".into()], + is_interface: false, + }, + ), + ]); + assert_expected_results(&generated, expected_sig, &expected_refs); + + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_basic_whitespace() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query MyQuery { + noInputQuery { + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("MyQuery".into()), &schema); + + let expected_sig = "# MyQuery\nquery MyQuery{noInputQuery{id}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["noInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_anonymous_query() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query { + noInputQuery { + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let expected_sig = "# -\n{noInputQuery{id}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["noInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_anonymous_mutation() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"mutation { + noInputMutation { + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let expected_sig = "# -\nmutation{noInputMutation{id}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Mutation".into(), + ReferencedFieldsForType { + field_names: vec!["noInputMutation".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_anonymous_subscription() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str: &str = r#"subscription { + noInputSubscription { + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let expected_sig = "# -\nsubscription{noInputSubscription{id}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Subscription".into(), + ReferencedFieldsForType { + field_names: vec!["noInputSubscription".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_ordered_fields_and_variables() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query VariableScalarInputQuery($idInput: ID!, $boolInput: Boolean!, $floatInput: Float!, $intInput: Int!, $listInput: [String!]!, $stringInput: String!, $nullableStringInput: String) { + sortQuery( + idInput: $idInput + boolInput: $boolInput + floatInput: $floatInput + INTInput: $intInput + listInput: $listInput + stringInput: $stringInput + nullableStringInput: $nullableStringInput + ) { + zzz + CCC + nullableId + aaa + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting( + &doc, + &doc, + &Some("VariableScalarInputQuery".into()), + &schema, + ); + + let expected_sig = "# VariableScalarInputQuery\nquery VariableScalarInputQuery($boolInput:Boolean!,$floatInput:Float!,$idInput:ID!,$intInput:Int!,$listInput:[String!]!,$nullableStringInput:String,$stringInput:String!){sortQuery(INTInput:$intInput boolInput:$boolInput floatInput:$floatInput idInput:$idInput listInput:$listInput nullableStringInput:$nullableStringInput stringInput:$stringInput){CCC aaa id nullableId zzz}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["sortQuery".into()], + is_interface: false, + }, + ), + ( + "SortResponse".into(), + ReferencedFieldsForType { + field_names: vec![ + "aaa".into(), + "CCC".into(), + "id".into(), + "nullableId".into(), + "zzz".into(), + ], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_fragments() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query FragmentQuery { + noInputQuery { + listOfBools + interfaceResponse { + sharedField + ... on InterfaceImplementation2 { + implementation2Field + } + ...bbbInterfaceFragment + ...aaaInterfaceFragment + ... { + ... on InterfaceImplementation1 { + implementation1Field + } + } + ... on InterfaceImplementation1 { + implementation1Field + } + } + unionResponse { + ... on UnionType2 { + unionType2Field + } + ... on UnionType1 { + unionType1Field + } + } + ...zzzFragment + ...aaaFragment + ...ZZZFragment + } + } + + fragment zzzFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment ZZZFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment aaaFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment UnusedFragment on InterfaceImplementation2 { + sharedField + implementation2Field + } + + fragment bbbInterfaceFragment on InterfaceImplementation2 { + sharedField + implementation2Field + } + + fragment aaaInterfaceFragment on InterfaceImplementation1 { + sharedField + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("FragmentQuery".into()), &schema); + + let expected_sig = "# FragmentQuery\nfragment ZZZFragment on EverythingResponse{listOfInterfaces{sharedField}}fragment aaaFragment on EverythingResponse{listOfInterfaces{sharedField}}fragment aaaInterfaceFragment on InterfaceImplementation1{sharedField}fragment bbbInterfaceFragment on InterfaceImplementation2{implementation2Field sharedField}fragment zzzFragment on EverythingResponse{listOfInterfaces{sharedField}}query FragmentQuery{noInputQuery{interfaceResponse{sharedField...aaaInterfaceFragment...bbbInterfaceFragment...on InterfaceImplementation2{implementation2Field}...{...on InterfaceImplementation1{implementation1Field}}...on InterfaceImplementation1{implementation1Field}}listOfBools unionResponse{...on UnionType2{unionType2Field}...on UnionType1{unionType1Field}}...ZZZFragment...aaaFragment...zzzFragment}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "UnionType1".into(), + ReferencedFieldsForType { + field_names: vec!["unionType1Field".into()], + is_interface: false, + }, + ), + ( + "UnionType2".into(), + ReferencedFieldsForType { + field_names: vec!["unionType2Field".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["noInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec![ + "listOfInterfaces".into(), + "listOfBools".into(), + "interfaceResponse".into(), + "unionResponse".into(), + ], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into(), "implementation1Field".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["implementation1Field".into(), "sharedField".into()], + is_interface: false, + }, + ), + ( + "AnInterface".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into()], + is_interface: true, + }, + ), + ( + "InterfaceImplementation2".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into(), "implementation2Field".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + + // the router-bridge planner will throw errors on unused fragments/queries so we remove them here + let sanitised_query_str = r#"query FragmentQuery { + noInputQuery { + listOfBools + interfaceResponse { + sharedField + ... on InterfaceImplementation2 { + implementation2Field + } + ...bbbInterfaceFragment + ...aaaInterfaceFragment + ... { + ... on InterfaceImplementation1 { + implementation1Field + } + } + ... on InterfaceImplementation1 { + implementation1Field + } + } + unionResponse { + ... on UnionType2 { + unionType2Field + } + ... on UnionType1 { + unionType1Field + } + } + ...zzzFragment + ...aaaFragment + ...ZZZFragment + } + } + + fragment zzzFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment ZZZFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment aaaFragment on EverythingResponse { + listOfInterfaces { + sharedField + } + } + + fragment bbbInterfaceFragment on InterfaceImplementation2 { + sharedField + implementation2Field + } + + fragment aaaInterfaceFragment on InterfaceImplementation1 { + sharedField + }"#; + assert_bridge_results( + schema_str, + sanitised_query_str, + expected_sig, + &expected_refs, + ) + .await; +} + +#[test(tokio::test)] +async fn test_directives() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"fragment Fragment1 on InterfaceImplementation1 { + sharedField + implementation1Field + } + + fragment Fragment2 on InterfaceImplementation2 @withArgs(arg2: "" arg1: "test" arg3: true arg5: [1,2] arg4: 2) @noArgs { + sharedField + implementation2Field + } + + query DirectiveQuery @withArgs(arg2: "" arg1: "test") @noArgs { + noInputQuery { + enumResponse @withArgs(arg3: false arg5: [1,2] arg4: 2) @noArgs + unionResponse { + ... on UnionType1 @withArgs(arg2: "" arg1: "test") @noArgs { + unionType1Field + } + } + interfaceResponse { + ... Fragment1 @withArgs(arg1: "test") @noArgs + ... Fragment2 + } + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("DirectiveQuery".into()), &schema); + + let expected_sig = "# DirectiveQuery\nfragment Fragment1 on InterfaceImplementation1{implementation1Field sharedField}fragment Fragment2 on InterfaceImplementation2@noArgs@withArgs(arg1:\"\",arg2:\"\",arg3:true,arg4:0,arg5:[]){implementation2Field sharedField}query DirectiveQuery@withArgs(arg1:\"\",arg2:\"\")@noArgs{noInputQuery{enumResponse@withArgs(arg3:false,arg4:0,arg5:[])@noArgs interfaceResponse{...Fragment1@noArgs@withArgs(arg1:\"\")...Fragment2}unionResponse{...on UnionType1@noArgs@withArgs(arg1:\"\",arg2:\"\"){unionType1Field}}}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "UnionType1".into(), + ReferencedFieldsForType { + field_names: vec!["unionType1Field".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["noInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec![ + "enumResponse".into(), + "interfaceResponse".into(), + "unionResponse".into(), + ], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into(), "implementation1Field".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation1".into(), + ReferencedFieldsForType { + field_names: vec!["implementation1Field".into(), "sharedField".into()], + is_interface: false, + }, + ), + ( + "InterfaceImplementation2".into(), + ReferencedFieldsForType { + field_names: vec!["sharedField".into(), "implementation2Field".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_aliases() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query AliasQuery { + xxAlias: enumInputQuery(enumInput: SOME_VALUE_1) { + aliased: enumResponse + } + aaAlias: enumInputQuery(enumInput: SOME_VALUE_2) { + aliasedAgain: enumResponse + } + ZZAlias: enumInputQuery(enumInput: SOME_VALUE_3) { + enumResponse + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("AliasQuery".into()), &schema); + + let expected_sig = "# AliasQuery\nquery AliasQuery{enumInputQuery(enumInput:SOME_VALUE_1){enumResponse}enumInputQuery(enumInput:SOME_VALUE_2){enumResponse}enumInputQuery(enumInput:SOME_VALUE_3){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["enumInputQuery".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_inline_values() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query InlineInputTypeQuery { + inputTypeQuery(input: { + inputString: "foo", + inputInt: 42, + inputBoolean: null, + nestedType: { someFloat: 4.2 }, + enumInput: SOME_VALUE_1, + nestedTypeList: [ { someFloat: 4.2, someNullableFloat: null } ], + listInput: [1, 2, 3] + }) { + enumResponse + } + }"#; + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = + generate_usage_reporting(&doc, &doc, &Some("InlineInputTypeQuery".into()), &schema); + + let expected_sig = "# InlineInputTypeQuery\nquery InlineInputTypeQuery{inputTypeQuery(input:{}){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["inputTypeQuery".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_root_type_fragment() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query SomeQuery { + ... on Query { + ... { + basicResponseQuery { + id + } + } + } + noInputQuery { + enumResponse + } + }"#; + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let expected_sig = "# SomeQuery\nquery SomeQuery{noInputQuery{enumResponse}...on Query{...{basicResponseQuery{id}}}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "BasicResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into(), "noInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_directive_arg_spacing() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query { + basicResponseQuery { + id @withArgs(arg1: "") + id + } + }"#; + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &None, &schema); + + let expected_sig = "# -\n{basicResponseQuery{id@withArgs(arg1:\"\")id}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "BasicResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_operation_with_single_variable() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query QueryWithVar($input_enum: SomeEnum) { + enumInputQuery(enumInput: $input_enum) { + listOfBools + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("QueryWithVar".into()), &schema); + + let expected_sig = "# QueryWithVar\nquery QueryWithVar($input_enum:SomeEnum){enumInputQuery(enumInput:$input_enum){listOfBools}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["enumInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["listOfBools".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_operation_with_multiple_variables() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query QueryWithVars($stringInput: String!, $floatInput: Float!, $boolInput: Boolean!) { + scalarInputQuery(listInput: ["x"], stringInput: $stringInput, intInput: 6, floatInput: $floatInput, boolInput: $boolInput, idInput: "y") { + enumResponse + } + inputTypeQuery(input: { inputInt: 2, inputString: "z", listInput: [], nestedType: { someFloat: 5 }}) { + enumResponse + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("QueryWithVars".into()), &schema); + + let expected_sig = "# QueryWithVars\nquery QueryWithVars($boolInput:Boolean!,$floatInput:Float!,$stringInput:String!){inputTypeQuery(input:{}){enumResponse}scalarInputQuery(boolInput:$boolInput floatInput:$floatInput idInput:\"\"intInput:0 listInput:[]stringInput:$stringInput){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["scalarInputQuery".into(), "inputTypeQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_field_arg_comma_or_space() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query QueryArgLength($StringInputWithAVeryyyLongNameSoLineLengthIs80: String!, $inputType: AnotherInputType, $enumInputWithAVryLongNameSoLineLengthIsOver80: SomeEnum, $enumInputType: EnumInputType) { + enumInputQuery (enumInput:$enumInputWithAVryLongNameSoLineLengthIsOver80,inputType:$enumInputType) { + enumResponse + } + defaultArgQuery(stringInput:$StringInputWithAVeryyyLongNameSoLineLengthIs80,inputType:$inputType) { + id + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("QueryArgLength".into()), &schema); + + // enumInputQuery has a variable line length of 81, so it should be separated by spaces (which are converted from newlines + // in the original implementation). + // enumInputQuery has a variable line length of 80, so it should be separated by commas. + let expected_sig = "# QueryArgLength\nquery QueryArgLength($StringInputWithAVeryyyLongNameSoLineLengthIs80:String!,$enumInputType:EnumInputType,$enumInputWithAVryLongNameSoLineLengthIsOver80:SomeEnum,$inputType:AnotherInputType){defaultArgQuery(inputType:$inputType stringInput:$StringInputWithAVeryyyLongNameSoLineLengthIs80){id}enumInputQuery(enumInput:$enumInputWithAVryLongNameSoLineLengthIsOver80 inputType:$enumInputType){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["enumInputQuery".into(), "defaultArgQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ( + "BasicResponse".into(), + ReferencedFieldsForType { + field_names: vec!["id".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_operation_arg_always_commas() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query QueryArgLength($enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80: SomeEnum, $enumInputType: EnumInputType) { + enumInputQuery (enumInput:$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80,inputType:$enumInputType) { + enumResponse + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = generate_usage_reporting(&doc, &doc, &Some("QueryArgLength".into()), &schema); + + // operation variables shouldn't ever be converted to spaces, since the line length check is only on field variables + // in the original implementation + let expected_sig = "# QueryArgLength\nquery QueryArgLength($enumInputType:EnumInputType,$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80:SomeEnum){enumInputQuery(enumInput:$enumInputWithAVerrrrrrrrrrrryLongNameSoLineLengthIsOver80 inputType:$enumInputType){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["enumInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + +#[test(tokio::test)] +async fn test_compare() { + let source = ComparableUsageReporting { + result: UsageReporting { + stats_report_key: "# -\n{basicResponseQuery{field1 field2}}".into(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field1".into(), "field2".into()], + is_interface: false, + }, + ), + ]), + }, + }; + + // Same signature and ref fields should match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), + }), + UsageReportingComparisonResult::Equal + )); + + // Reordered signature should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: "# -\n{basicResponseQuery{field2 field1}}".into(), + referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), + }), + UsageReportingComparisonResult::StatsReportKeyNotEqual + )); + + // Different signature should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: "# NamedQuery\nquery NamedQuery {basicResponseQuery{field1 field2}}" + .into(), + referenced_fields_by_type: source.result.referenced_fields_by_type.clone(), + }), + UsageReportingComparisonResult::StatsReportKeyNotEqual + )); + + // Reordered parent type should match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field1".into(), "field2".into()], + is_interface: false, + }, + ), + ]) + }), + UsageReportingComparisonResult::Equal + )); + + // Reordered fields should match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field2".into(), "field1".into()], + is_interface: false, + }, + ), + ]) + }), + UsageReportingComparisonResult::Equal + )); + + // Added parent type should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field1".into(), "field2".into()], + is_interface: false, + }, + ), + ( + "OtherType".into(), + ReferencedFieldsForType { + field_names: vec!["otherField".into()], + is_interface: false, + }, + ), + ]) + }), + UsageReportingComparisonResult::ReferencedFieldsNotEqual + )); + + // Added field should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field1".into(), "field2".into(), "field3".into()], + is_interface: false, + }, + ), + ]) + }), + UsageReportingComparisonResult::ReferencedFieldsNotEqual + )); + + // Missing parent type should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ),]) + }), + UsageReportingComparisonResult::ReferencedFieldsNotEqual + )); + + // Missing field should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: source.result.stats_report_key.clone(), + referenced_fields_by_type: HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ), + ( + "SomeResponse".into(), + ReferencedFieldsForType { + field_names: vec!["field1".into()], + is_interface: false, + }, + ), + ]) + }), + UsageReportingComparisonResult::ReferencedFieldsNotEqual + )); + + // Both different should not match + assert!(matches!( + source.compare(&UsageReporting { + stats_report_key: "# -\n{basicResponseQuery{field2 field1}}".into(), + referenced_fields_by_type: HashMap::from([( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["basicResponseQuery".into()], + is_interface: false, + }, + ),]) + }), + UsageReportingComparisonResult::BothNotEqual + )); +} diff --git a/licenses.html b/licenses.html index e36dcba570..6ac3eb5f8e 100644 --- a/licenses.html +++ b/licenses.html @@ -44,8 +44,8 @@

        Third Party Licenses

        Overview of licenses:

  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    ../../LICENSE-APACHE
    +
  • Apache License 2.0

    Used by:

    @@ -12177,12 +12188,8 @@

    Used by:

    Apache License 2.0

    Used by:

      -
    • allocator-api2
    • apollo-compiler
    • -
    • apollo-encoder
    • apollo-parser
    • -
    • apollo-smith
    • -
    • buildstructor
    • curve25519-dalek-derive
    • deadpool-runtime
    • deno-proc-macro-rules
    • @@ -12281,6 +12288,47 @@

      Used by:

      http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +
    • +

      Apache License 2.0

      +

      Used by:

      + +
      Copyright 2023 The allocator-api2 project developers
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +	http://www.apache.org/licenses/LICENSE-2.0
      +
      +Unless required by applicable law or agreed to in writing, software
      +distributed under the License is distributed on an "AS IS" BASIS,
      +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      +See the License for the specific language governing permissions and
      +limitations under the License.
      +
    • +
    • +

      Apache License 2.0

      +

      Used by:

      + +
      Copyright [2022] [Bryn Cooke]
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +    http://www.apache.org/licenses/LICENSE-2.0
      +
       Unless required by applicable law or agreed to in writing, software
       distributed under the License is distributed on an "AS IS" BASIS,
       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      @@ -14202,6 +14250,66 @@ 

      Used by:

      shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +
      +
    • +
    • +

      MIT License

      +

      Used by:

      + +
      Copyright (c) 2019 Carl Lerche
      +
      +Permission is hereby granted, free of charge, to any
      +person obtaining a copy of this software and associated
      +documentation files (the "Software"), to deal in the
      +Software without restriction, including without
      +limitation the rights to use, copy, modify, merge,
      +publish, distribute, sublicense, and/or sell copies of
      +the Software, and to permit persons to whom the Software
      +is furnished to do so, subject to the following
      +conditions:
      +
      +The above copyright notice and this permission notice
      +shall be included in all copies or substantial portions
      +of the Software.
      +
      +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
      +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
      +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
      +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
      +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
      +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
      +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
      +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
      +DEALINGS IN THE SOFTWARE.
      +
      +Copyright (c) 2018 David Tolnay
      +
      +Permission is hereby granted, free of charge, to any
      +person obtaining a copy of this software and associated
      +documentation files (the "Software"), to deal in the
      +Software without restriction, including without
      +limitation the rights to use, copy, modify, merge,
      +publish, distribute, sublicense, and/or sell copies of
      +the Software, and to permit persons to whom the Software
      +is furnished to do so, subject to the following
      +conditions:
      +
      +The above copyright notice and this permission notice
      +shall be included in all copies or substantial portions
      +of the Software.
      +
       THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
       ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
       TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
      @@ -15113,8 +15221,6 @@ 

      Used by:

      MIT License

      Used by:

        -
      • async-stream
      • -
      • async-stream-impl
      • base64-simd
      • convert_case
      • crunchy
      • @@ -15986,6 +16092,8 @@

        Used by:

        MIT License

        Used by:

          +
        • aho-corasick
        • +
        • byteorder
        • globset
        • memchr
        • regex-automata
        • From d2d7f78693bd1bf7f8b23460f4987e9e919caa34 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Wed, 17 Apr 2024 12:58:17 +0200 Subject: [PATCH 24/46] skip useless intergration tests (#4970) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- apollo-router/tests/integration_tests.rs | 101 ----------------------- 1 file changed, 101 deletions(-) diff --git a/apollo-router/tests/integration_tests.rs b/apollo-router/tests/integration_tests.rs index cb2a45496d..c35d17ae29 100644 --- a/apollo-router/tests/integration_tests.rs +++ b/apollo-router/tests/integration_tests.rs @@ -27,7 +27,6 @@ use http::StatusCode; use http::Uri; use maplit::hashmap; use mime::APPLICATION_JSON; -use serde_json::to_string_pretty; use serde_json_bytes::json; use serde_json_bytes::Value; use tower::BoxError; @@ -37,71 +36,6 @@ use walkdir::WalkDir; mod integration; -macro_rules! assert_federated_response { - ($query:expr, $service_requests:expr $(,)?) => { - let request = supergraph::Request::fake_builder() - .query($query) - .variable("topProductsFirst", 2_i32) - .variable("reviewsForAuthorAuthorId", 1_i32) - .method(Method::POST) - .build() - .unwrap(); - - let expected = match query_node(&request).await { - Ok(e) => e, - Err(err) => { - panic!("query_node failed: {err}. Probably caused by missing gateway during testing"); - } - }; - assert_eq!(expected.errors, []); - - let (actual, registry) = query_rust(request).await; - assert_eq!(actual.errors, []); - - tracing::debug!("query:\n{}\n", $query); - - assert!( - expected.data.as_ref().unwrap().is_object(), - "nodejs: no response's data: please check that the gateway and the subgraphs are running", - ); - - tracing::debug!("expected: {}", to_string_pretty(&expected).unwrap()); - tracing::debug!("actual: {}", to_string_pretty(&actual).unwrap()); - - let expected = expected.data.as_ref().expect("expected data should not be none"); - let actual = actual.data.as_ref().expect("received data should not be none"); - assert!( - expected.eq_and_ordered(actual), - "the gateway and the router didn't return the same data:\ngateway:\n{}\nrouter\n{}", - expected, - actual - ); - assert_eq!(registry.totals(), $service_requests); - }; -} - -#[tokio::test(flavor = "multi_thread")] -async fn basic_request() { - assert_federated_response!( - r#"{ topProducts { name name2:name } }"#, - hashmap! { - "products".to_string()=>1, - }, - ); -} - -#[tokio::test(flavor = "multi_thread")] -async fn basic_composition() { - assert_federated_response!( - r#"{ topProducts { upc name reviews {id product { name } author { id name } } } }"#, - hashmap! { - "products".to_string()=>2, - "reviews".to_string()=>1, - "accounts".to_string()=>1, - }, - ); -} - #[tokio::test(flavor = "multi_thread")] async fn api_schema_hides_field() { let request = supergraph::Request::fake_builder() @@ -146,29 +80,6 @@ async fn validation_errors_from_rust() { insta::assert_json_snapshot!(response.errors); } -#[tokio::test(flavor = "multi_thread")] -async fn basic_mutation() { - assert_federated_response!( - r#"mutation { - createProduct(upc:"8", name:"Bob") { - upc - name - reviews { - body - } - } - createReview(upc: "8", id:"100", body: "Bif"){ - id - body - } - }"#, - hashmap! { - "products".to_string()=>1, - "reviews".to_string()=>2, - }, - ); -} - #[tokio::test(flavor = "multi_thread")] async fn queries_should_work_over_get() { // get request @@ -1222,18 +1133,6 @@ async fn query_operation_id() { .is_none()); } -async fn query_node(request: &supergraph::Request) -> Result { - reqwest::Client::new() - .post("https://federation-demo-gateway.fly.dev/") - .json(request.supergraph_request.body()) - .send() - .await - .map_err(|err| format!("HTTP fetch failed from 'test node': {err}"))? - .json() - .await - .map_err(|err| format!("service 'test node' response was malformed: {err}")) -} - async fn http_query_rust( request: supergraph::Request, ) -> (router::Response, CountingServiceRegistry) { From 18ef65b805e43b0b88b388f5ee9aac42d3a0ae8d Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Wed, 17 Apr 2024 12:24:00 +0100 Subject: [PATCH 25/46] Don't try to process empty batches of requests (#4969) During testing we found an existing issue with batching support which causes the router to panic if an empty batch is submitted to the router. This is only a problem if batching support is enabled. We now explicitly check for empty batches and return a helpful error message if they are detected. --- apollo-router/src/services/router/service.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/apollo-router/src/services/router/service.rs b/apollo-router/src/services/router/service.rs index 837b14056f..ba35fd6c0e 100644 --- a/apollo-router/src/services/router/service.rs +++ b/apollo-router/src/services/router/service.rs @@ -518,6 +518,14 @@ impl RouterService { "failed to decode a valid GraphQL request from path {e}" ), })?; + if result.is_empty() { + return Err(TranslateError { + status: StatusCode::BAD_REQUEST, + error: "failed to decode a valid GraphQL request from path", + extension_code: "INVALID_GRAPHQL_REQUEST", + extension_details: "failed to decode a valid GraphQL request from path: empty array ".to_string() + }); + } is_batch = true; } else if !q.is_empty() && q.as_bytes()[0] == b'[' { let extension_details = if self.batching.enabled @@ -579,6 +587,16 @@ impl RouterService { "failed to deserialize the request body into JSON: {e}" ), })?; + if result.is_empty() { + return Err(TranslateError { + status: StatusCode::BAD_REQUEST, + error: "failed to decode a valid GraphQL request from path", + extension_code: "INVALID_GRAPHQL_REQUEST", + extension_details: + "failed to decode a valid GraphQL request from path: empty array " + .to_string(), + }); + } is_batch = true; } else if !bytes.is_empty() && bytes[0] == b'[' { let extension_details = if self.batching.enabled From c9ea3235376cd9619ac9617f08368a472e10c35a Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 17 Apr 2024 05:24:47 -0700 Subject: [PATCH 26/46] changelog: fix a few typos in headers (#4959) Two of my recent changes had sloppy headers. --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ad0441a09b..09891b855c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -132,7 +132,7 @@ Additionally, the router now verifies that a TTL is configured for all subgraphs By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4882 -### Helm: include all standard labels in pod spec but complete sentence that stands on its own ([PR #4862](https://github.com/apollographql/router/pull/4862)) +### Helm: include all standard labels in pod spec ([PR #4862](https://github.com/apollographql/router/pull/4862)) The templates for the router's Helm chart have been updated so that the `helm.sh/chart`, `app.kubernetes.io/version`, and `app.kubernetes.io/managed-by` labels are now included on pods, as they already were for all other resources created by the Helm chart. @@ -140,7 +140,7 @@ The specific change to the template is that the pod spec template now uses the ` By [@glasser](https://github.com/glasser) in https://github.com/apollographql/router/pull/4862 -### Persisted queries return 4xx errors ([PR #4887](https://github.com/apollographql/router/pull/4887) +### Persisted queries return 4xx errors ([PR #4887](https://github.com/apollographql/router/pull/4887)) Previously, sending an invalid persisted query request could return a 200 status code to the client when they should have returned errors. These requests now return errors as 4xx status codes: From acda2d7216ed9af6234b65d39cd9c8ec7aab7f23 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Wed, 17 Apr 2024 12:50:39 +0000 Subject: [PATCH 27/46] prep release: v1.45.0-alpha.1 --- Cargo.lock | 6 +- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- .../templates/base/Cargo.toml | 2 +- .../templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- .../tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- helm/chart/router/Chart.yaml | 4 +- helm/chart/router/README.md | 8 +- licenses.html | 253 +++++++++++++++++- scripts/install.sh | 2 +- 13 files changed, 269 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 61eaeccf07..b88325a737 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.45.0-alpha.0" +version = "1.45.0-alpha.1" dependencies = [ "access-json", "anyhow", @@ -411,7 +411,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.45.0-alpha.0" +version = "1.45.0-alpha.1" dependencies = [ "apollo-parser", "apollo-router", @@ -427,7 +427,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.45.0-alpha.0" +version = "1.45.0-alpha.1" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index a418bcd2a2..da07367c9f 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.45.0-alpha.0" +version = "1.45.0-alpha.1" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index e5b46e8ec3..faaaa8362d 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.45.0-alpha.0" +version = "1.45.0-alpha.1" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index d89d4860c5..f469446858 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.45.0-alpha.0" +apollo-router = "1.45.0-alpha.1" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index 2897e97759..d5e2cb17ea 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-alpha.0" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-alpha.1" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index d67e53727b..80b4b6ecbc 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.45.0-alpha.0" +version = "1.45.0-alpha.1" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index d2340a04c8..1b016b8fa4 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.45.0-alpha.0 + image: ghcr.io/apollographql/router:v1.45.0-alpha.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 7ff6f49f7c..1c98c51c1f 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.45.0-alpha.0 + image: ghcr.io/apollographql/router:v1.45.0-alpha.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index db276fd901..f34bd46154 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.45.0-alpha.0 + image: ghcr.io/apollographql/router:v1.45.0-alpha.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index cd24fa8e08..92d52b04e7 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.45.0-alpha.0 +version: 1.45.0-alpha.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.45.0-alpha.0" +appVersion: "v1.45.0-alpha.1" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index f1d5e3cb05..e4c7c8b7a6 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.45.0-alpha.0](https://img.shields.io/badge/Version-1.45.0--alpha.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-alpha.0](https://img.shields.io/badge/AppVersion-v1.45.0--alpha.0-informational?style=flat-square) +![Version: 1.45.0-alpha.1](https://img.shields.io/badge/Version-1.45.0--alpha.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-alpha.1](https://img.shields.io/badge/AppVersion-v1.45.0--alpha.1-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.0 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.1 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha. **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.0 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.1 --values my-values.yaml ``` _See [configuration](#configuration) below._ @@ -94,3 +94,5 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | topologySpreadConstraints | list | `[]` | Sets the [topology spread constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) for Deployment pods | | virtualservice.enabled | bool | `false` | | +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.3](https://github.com/norwoodj/helm-docs/releases/v1.11.3) diff --git a/licenses.html b/licenses.html index 0ec8fa2b56..730fe407df 100644 --- a/licenses.html +++ b/licenses.html @@ -44,7 +44,7 @@

          Third Party Licenses

          Overview of licenses:

                                           Apache License
                                      Version 2.0, January 2004
          @@ -4651,6 +4652,204 @@ 

          Used by:

          of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS +
          + +
        • +

          Apache License 2.0

          +

          Used by:

          + +
                                           Apache License
          +                           Version 2.0, January 2004
          +                        http://www.apache.org/licenses/
          +
          +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
          +
          +   1. Definitions.
          +
          +      "License" shall mean the terms and conditions for use, reproduction,
          +      and distribution as defined by Sections 1 through 9 of this document.
          +
          +      "Licensor" shall mean the copyright owner or entity authorized by
          +      the copyright owner that is granting the License.
          +
          +      "Legal Entity" shall mean the union of the acting entity and all
          +      other entities that control, are controlled by, or are under common
          +      control with that entity. For the purposes of this definition,
          +      "control" means (i) the power, direct or indirect, to cause the
          +      direction or management of such entity, whether by contract or
          +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
          +      outstanding shares, or (iii) beneficial ownership of such entity.
          +
          +      "You" (or "Your") shall mean an individual or Legal Entity
          +      exercising permissions granted by this License.
          +
          +      "Source" form shall mean the preferred form for making modifications,
          +      including but not limited to software source code, documentation
          +      source, and configuration files.
          +
          +      "Object" form shall mean any form resulting from mechanical
          +      transformation or translation of a Source form, including but
          +      not limited to compiled object code, generated documentation,
          +      and conversions to other media types.
          +
          +      "Work" shall mean the work of authorship, whether in Source or
          +      Object form, made available under the License, as indicated by a
          +      copyright notice that is included in or attached to the work
          +      (an example is provided in the Appendix below).
          +
          +      "Derivative Works" shall mean any work, whether in Source or Object
          +      form, that is based on (or derived from) the Work and for which the
          +      editorial revisions, annotations, elaborations, or other modifications
          +      represent, as a whole, an original work of authorship. For the purposes
          +      of this License, Derivative Works shall not include works that remain
          +      separable from, or merely link (or bind by name) to the interfaces of,
          +      the Work and Derivative Works thereof.
          +
          +      "Contribution" shall mean any work of authorship, including
          +      the original version of the Work and any modifications or additions
          +      to that Work or Derivative Works thereof, that is intentionally
          +      submitted to Licensor for inclusion in the Work by the copyright owner
          +      or by an individual or Legal Entity authorized to submit on behalf of
          +      the copyright owner. For the purposes of this definition, "submitted"
          +      means any form of electronic, verbal, or written communication sent
          +      to the Licensor or its representatives, including but not limited to
          +      communication on electronic mailing lists, source code control systems,
          +      and issue tracking systems that are managed by, or on behalf of, the
          +      Licensor for the purpose of discussing and improving the Work, but
          +      excluding communication that is conspicuously marked or otherwise
          +      designated in writing by the copyright owner as "Not a Contribution."
          +
          +      "Contributor" shall mean Licensor and any individual or Legal Entity
          +      on behalf of whom a Contribution has been received by Licensor and
          +      subsequently incorporated within the Work.
          +
          +   2. Grant of Copyright License. Subject to the terms and conditions of
          +      this License, each Contributor hereby grants to You a perpetual,
          +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
          +      copyright license to reproduce, prepare Derivative Works of,
          +      publicly display, publicly perform, sublicense, and distribute the
          +      Work and such Derivative Works in Source or Object form.
          +
          +   3. Grant of Patent License. Subject to the terms and conditions of
          +      this License, each Contributor hereby grants to You a perpetual,
          +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
          +      (except as stated in this section) patent license to make, have made,
          +      use, offer to sell, sell, import, and otherwise transfer the Work,
          +      where such license applies only to those patent claims licensable
          +      by such Contributor that are necessarily infringed by their
          +      Contribution(s) alone or by combination of their Contribution(s)
          +      with the Work to which such Contribution(s) was submitted. If You
          +      institute patent litigation against any entity (including a
          +      cross-claim or counterclaim in a lawsuit) alleging that the Work
          +      or a Contribution incorporated within the Work constitutes direct
          +      or contributory patent infringement, then any patent licenses
          +      granted to You under this License for that Work shall terminate
          +      as of the date such litigation is filed.
          +
          +   4. Redistribution. You may reproduce and distribute copies of the
          +      Work or Derivative Works thereof in any medium, with or without
          +      modifications, and in Source or Object form, provided that You
          +      meet the following conditions:
          +
          +      (a) You must give any other recipients of the Work or
          +          Derivative Works a copy of this License; and
          +
          +      (b) You must cause any modified files to carry prominent notices
          +          stating that You changed the files; and
          +
          +      (c) You must retain, in the Source form of any Derivative Works
          +          that You distribute, all copyright, patent, trademark, and
          +          attribution notices from the Source form of the Work,
          +          excluding those notices that do not pertain to any part of
          +          the Derivative Works; and
          +
          +      (d) If the Work includes a "NOTICE" text file as part of its
          +          distribution, then any Derivative Works that You distribute must
          +          include a readable copy of the attribution notices contained
          +          within such NOTICE file, excluding those notices that do not
          +          pertain to any part of the Derivative Works, in at least one
          +          of the following places: within a NOTICE text file distributed
          +          as part of the Derivative Works; within the Source form or
          +          documentation, if provided along with the Derivative Works; or,
          +          within a display generated by the Derivative Works, if and
          +          wherever such third-party notices normally appear. The contents
          +          of the NOTICE file are for informational purposes only and
          +          do not modify the License. You may add Your own attribution
          +          notices within Derivative Works that You distribute, alongside
          +          or as an addendum to the NOTICE text from the Work, provided
          +          that such additional attribution notices cannot be construed
          +          as modifying the License.
          +
          +      You may add Your own copyright statement to Your modifications and
          +      may provide additional or different license terms and conditions
          +      for use, reproduction, or distribution of Your modifications, or
          +      for any such Derivative Works as a whole, provided Your use,
          +      reproduction, and distribution of the Work otherwise complies with
          +      the conditions stated in this License.
          +
          +   5. Submission of Contributions. Unless You explicitly state otherwise,
          +      any Contribution intentionally submitted for inclusion in the Work
          +      by You to the Licensor shall be under the terms and conditions of
          +      this License, without any additional terms or conditions.
          +      Notwithstanding the above, nothing herein shall supersede or modify
          +      the terms of any separate license agreement you may have executed
          +      with Licensor regarding such Contributions.
          +
          +   6. Trademarks. This License does not grant permission to use the trade
          +      names, trademarks, service marks, or product names of the Licensor,
          +      except as required for reasonable and customary use in describing the
          +      origin of the Work and reproducing the content of the NOTICE file.
          +
          +   7. Disclaimer of Warranty. Unless required by applicable law or
          +      agreed to in writing, Licensor provides the Work (and each
          +      Contributor provides its Contributions) on an "AS IS" BASIS,
          +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
          +      implied, including, without limitation, any warranties or conditions
          +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
          +      PARTICULAR PURPOSE. You are solely responsible for determining the
          +      appropriateness of using or redistributing the Work and assume any
          +      risks associated with Your exercise of permissions under this License.
          +
          +   8. Limitation of Liability. In no event and under no legal theory,
          +      whether in tort (including negligence), contract, or otherwise,
          +      unless required by applicable law (such as deliberate and grossly
          +      negligent acts) or agreed to in writing, shall any Contributor be
          +      liable to You for damages, including any direct, indirect, special,
          +      incidental, or consequential damages of any character arising as a
          +      result of this License or out of the use or inability to use the
          +      Work (including but not limited to damages for loss of goodwill,
          +      work stoppage, computer failure or malfunction, or any and all
          +      other commercial damages or losses), even if such Contributor
          +      has been advised of the possibility of such damages.
          +
          +   9. Accepting Warranty or Additional Liability. While redistributing
          +      the Work or Derivative Works thereof, You may choose to offer,
          +      and charge a fee for, acceptance of support, warranty, indemnity,
          +      or other liability obligations and/or rights consistent with this
          +      License. However, in accepting such obligations, You may act only
          +      on Your own behalf and on Your sole responsibility, not on behalf
          +      of any other Contributor, and only if You agree to indemnify,
          +      defend, and hold each Contributor harmless for any liability
          +      incurred by, or claims asserted against, such Contributor by reason
          +      of your accepting any such warranty or additional liability.
          +
          +   END OF TERMS AND CONDITIONS
          +
          +   Copyright 2019 Yoshua Wuyts
          +
          +   Licensed under the Apache License, Version 2.0 (the "License");
          +   you may not use this file except in compliance with the License.
          +   You may obtain a copy of the License at
          +
          +       http://www.apache.org/licenses/LICENSE-2.0
          +
          +   Unless required by applicable law or agreed to in writing, software
          +   distributed under the License is distributed on an "AS IS" BASIS,
          +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          +   See the License for the specific language governing permissions and
          +   limitations under the License.
           
        • @@ -8212,13 +8411,26 @@

          Used by:

        • arbitrary
        • arc-swap
        • async-channel
        • +
        • async-channel
        • async-compression
        • +
        • async-executor
        • +
        • async-global-executor
        • +
        • async-io
        • +
        • async-io
        • +
        • async-lock
        • +
        • async-lock
        • +
        • async-process
        • +
        • async-signal
        • +
        • async-std
        • +
        • async-task
        • +
        • atomic-waker
        • autocfg
        • backtrace
        • base64
        • base64
        • bitflags
        • bitflags
        • +
        • blocking
        • bstr
        • bumpalo
        • bytes-utils
        • @@ -8241,6 +8453,9 @@

          Used by:

        • envmnt
        • equivalent
        • event-listener
        • +
        • event-listener
        • +
        • event-listener
        • +
        • event-listener-strategy
        • fastrand
        • fastrand
        • filetime
        • @@ -8251,6 +8466,7 @@

          Used by:

        • fraction
        • fsio
        • futures-lite
        • +
        • futures-lite
        • gimli
        • git2
        • group
        • @@ -8269,6 +8485,7 @@

          Used by:

        • indexmap
        • indexmap
        • inventory
        • +
        • io-lifetimes
        • ipconfig
        • itertools
        • itertools
        • @@ -8282,6 +8499,7 @@

          Used by:

        • libz-ng-sys
        • libz-sys
        • linux-raw-sys
        • +
        • linux-raw-sys
        • lock_api
        • log
        • maplit
        • @@ -8313,8 +8531,10 @@

          Used by:

        • pest_generator
        • pest_meta
        • petgraph
        • +
        • piper
        • pkg-config
        • platforms
        • +
        • polling
        • proc-macro2
        • prost
        • prost
        • @@ -8336,6 +8556,7 @@

          Used by:

        • rustc_version
        • rustc_version
        • rustix
        • +
        • rustix
        • rustls
        • rustls-native-certs
        • rustls-pemfile
        • @@ -8353,6 +8574,7 @@

          Used by:

        • similar
        • smallvec
        • socket2
        • +
        • socket2
        • stable_deref_trait
        • syn
        • syn
        • @@ -8377,6 +8599,7 @@

          Used by:

        • unicode-xid
        • url
        • uuid
        • +
        • value-bag
        • version_check
        • waker-fn
        • wasi
        • @@ -10882,6 +11105,7 @@

          Used by:

                                        Apache License
          @@ -11535,6 +11759,7 @@ 

          Apache License 2.0

          Used by:

          ../../LICENSE-APACHE
          @@ -12187,13 +12412,12 @@

          Used by:

          MIT or Apache-2.0
          +
          + +
        • +

          Apache License 2.0

          +

          Used by:

          + +
          The Apache License, Version 2.0 (Apache-2.0)
          +
          +Copyright 2015-2020 the fiat-crypto authors (see the AUTHORS file)
          +
          +Licensed under the Apache License, Version 2.0 (the "License");
          +you may not use this file except in compliance with the License.
          +You may obtain a copy of the License at
          +
          +    http://www.apache.org/licenses/LICENSE-2.0
          +
          +Unless required by applicable law or agreed to in writing, software
          +distributed under the License is distributed on an "AS IS" BASIS,
          +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
          +See the License for the specific language governing permissions and
          +limitations under the License.
           
        • diff --git a/scripts/install.sh b/scripts/install.sh index 7d7a2e70f1..bf1017fad7 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.45.0-alpha.0" +PACKAGE_VERSION="v1.45.0-alpha.1" download_binary() { downloader --check From 9d54568738c6587a3a3ec40e40836552846c5b37 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Wed, 17 Apr 2024 16:25:42 +0300 Subject: [PATCH 28/46] Update RELEASE_CHECKLIST.md pre-release step. (#4971) Adding missing branch push to existing command. --- RELEASE_CHECKLIST.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/RELEASE_CHECKLIST.md b/RELEASE_CHECKLIST.md index 8fb327dfdd..8b5fd81c05 100644 --- a/RELEASE_CHECKLIST.md +++ b/RELEASE_CHECKLIST.md @@ -194,13 +194,13 @@ Start following the steps below to start a release PR. The process is **not ful git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "${APOLLO_ROUTER_RELEASE_VERSION}" ``` -10. Git tag & push the pre-release: +10. Git tag the current commit and & push the branch and the pre-release tag simultaneously: This process will kick off the bulk of the release process on CircleCI, including building each architecture on its own infrastructure and notarizing the macOS binary. ``` git tag -a "v${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" -m "${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" && \ - git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "v${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" + git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "${APOLLO_ROUTER_RELEASE_VERSION}" "v${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" ``` ### Preparing the final release From bf3a9d121948356d9be8db5481683727fd0c6edd Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Wed, 17 Apr 2024 17:31:30 +0200 Subject: [PATCH 29/46] move back the router-bridge version in the router's Cargo.toml (#4972) When running `cargo publish`, the build script looks for a workspace Cargo.toml that does not exist because that does not execute from inside the repository --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. --- apollo-router/Cargo.toml | 3 ++- apollo-router/build/main.rs | 13 ++----------- .../src/query_planner/bridge_query_planner.rs | 13 ++----------- 3 files changed, 6 insertions(+), 23 deletions(-) diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 80b4b6ecbc..128f6ee101 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -180,7 +180,8 @@ rand = "0.8.5" rhai = { version = "=1.17.1", features = ["sync", "serde", "internals"] } regex = "1.10.3" reqwest.workspace = true -router-bridge.workspace = true +# note: this dependency should _always_ be pinned, prefix the version with an `=` +router-bridge = "=0.5.18+v2.7.2" rust-embed = "8.2.0" rustls = "0.21.10" rustls-native-certs = "0.6.3" diff --git a/apollo-router/build/main.rs b/apollo-router/build/main.rs index c5b1a4e7ad..763d894df0 100644 --- a/apollo-router/build/main.rs +++ b/apollo-router/build/main.rs @@ -5,21 +5,12 @@ mod studio; fn main() -> Result<(), Box> { let cargo_manifest: serde_json::Value = basic_toml::from_str( - &fs::read_to_string( - PathBuf::from(&env!("CARGO_MANIFEST_DIR")) - .parent() - .unwrap() - .join("Cargo.toml"), - ) - .expect("could not read Cargo.toml"), + &fs::read_to_string(PathBuf::from(&env!("CARGO_MANIFEST_DIR")).join("Cargo.toml")) + .expect("could not read Cargo.toml"), ) .expect("could not parse Cargo.toml"); let router_bridge = cargo_manifest - .get("workspace") - .expect("Cargo.toml does not contain workspace") - .as_object() - .expect("Cargo.toml workspace key is not an object") .get("dependencies") .expect("Cargo.toml does not contain dependencies") .as_object() diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index bdde60d459..5bba5e2a01 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -1484,20 +1484,11 @@ mod tests { #[test] fn router_bridge_dependency_is_pinned() { let cargo_manifest: serde_json::Value = basic_toml::from_str( - &fs::read_to_string( - PathBuf::from(&env!("CARGO_MANIFEST_DIR")) - .parent() - .unwrap() - .join("Cargo.toml"), - ) - .expect("could not read Cargo.toml"), + &fs::read_to_string(PathBuf::from(&env!("CARGO_MANIFEST_DIR")).join("Cargo.toml")) + .expect("could not read Cargo.toml"), ) .expect("could not parse Cargo.toml"); let router_bridge_version = cargo_manifest - .get("workspace") - .expect("Cargo.toml does not contain workspace") - .as_object() - .expect("Cargo.toml workspace key is not an object") .get("dependencies") .expect("Cargo.toml does not contain dependencies") .as_object() From 249e941a5f9f9267f187ff2bf01d9186a2ae19b7 Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Wed, 17 Apr 2024 18:39:08 +0200 Subject: [PATCH 30/46] Make test_supergraph_timeout more robust (#4955) * Use an available port number * Add an artificial delay to make sure the short timeout is triggered The latter is an attempt to fix the test sometimes returning HTTP 200 instead of the expect 504 Gateway Timeout. I suspect either that the future wrapped in timeout would return `Ready` immediately (our `plugins::traffic_shaping::timeout` only polls the `sleep` future when the main future returns `Pending`), or the normal flow would complete within a kernel tick (which is likely much less frequent than 1ns), so `sleep` would measure zero elapsed time. Fixes https://github.com/apollographql/router/issues/4910 --- apollo-router/src/axum_factory/tests.rs | 34 ++++++++++++++++++++++--- apollo-router/src/configuration/mod.rs | 3 +-- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs index c3ac8eb2cc..e4140ce2dc 100644 --- a/apollo-router/src/axum_factory/tests.rs +++ b/apollo-router/src/axum_factory/tests.rs @@ -69,6 +69,7 @@ use crate::query_planner::BridgeQueryPlannerPool; use crate::router_factory::create_plugins; use crate::router_factory::Endpoint; use crate::router_factory::RouterFactory; +use crate::services::execution; use crate::services::layers::persisted_queries::PersistedQueryLayer; use crate::services::layers::query_analysis::QueryAnalysisLayer; use crate::services::layers::static_page::home_page_content; @@ -2295,6 +2296,7 @@ async fn test_supergraph_and_health_check_same_port_different_listener() { async fn test_supergraph_timeout() { let config = serde_json::json!({ "supergraph": { + "listen": "127.0.0.1:0", "defer_support": false, }, "traffic_shaping": { @@ -2318,10 +2320,32 @@ async fn test_supergraph_timeout() { // we do the entire supergraph rebuilding instead of using `from_supergraph_mock_callback_and_configuration` // because we need the plugins to apply on the supergraph - let plugins = create_plugins(&conf, &schema, planner.subgraph_schemas(), None, None) + let mut plugins = create_plugins(&conf, &schema, planner.subgraph_schemas(), None, None) .await .unwrap(); + plugins.insert("delay".into(), Box::new(Delay)); + + struct Delay; + + #[async_trait::async_trait] + impl crate::plugin::Plugin for Delay { + type Config = (); + + async fn new(_: crate::plugin::PluginInit<()>) -> Result { + Ok(Self) + } + + fn execution_service(&self, service: execution::BoxService) -> execution::BoxService { + service + .map_future(|fut| async { + tokio::time::sleep(Duration::from_millis(10)).await; + fut.await + }) + .boxed() + } + } + let builder = PluggableSupergraphServiceBuilder::new(planner) .with_configuration(conf.clone()) .with_subgraph_service("accounts", MockSubgraph::new(HashMap::new())); @@ -2343,10 +2367,14 @@ async fn test_supergraph_timeout() { .make(); // keep the server handle around otherwise it will immediately shutdown - let (_server, client) = init_with_config(service, conf.clone(), MultiMap::new()) + let (server, client) = init_with_config(service, conf.clone(), MultiMap::new()) .await .unwrap(); - let url = "http://localhost:4000/"; + let url = server + .graphql_listen_address() + .as_ref() + .unwrap() + .to_string(); let response = client .post(url) diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 561b62e998..5ece74cc65 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -288,8 +288,7 @@ fn default_graphql_listen() -> ListenAddr { SocketAddr::from_str("127.0.0.1:4000").unwrap().into() } -// This isn't dead code! we use it in buildstructor's fake_new -#[allow(dead_code)] +#[cfg(test)] fn test_listen() -> ListenAddr { SocketAddr::from_str("127.0.0.1:0").unwrap().into() } From bad87741892cbe9f367fab1f5cba7861a6629916 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Wed, 17 Apr 2024 22:11:00 +0300 Subject: [PATCH 31/46] =?UTF-8?q?docs:=20Update=20RELEASE=5FCHECKLIST.md?= =?UTF-8?q?=20to=20include=20cargo=20publish=20for=20pre-re=E2=80=A6=20(#4?= =?UTF-8?q?976)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit โ€ฆlease Also, some formatting and justification for verbosity. --- RELEASE_CHECKLIST.md | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/RELEASE_CHECKLIST.md b/RELEASE_CHECKLIST.md index 8b5fd81c05..c69c852b47 100644 --- a/RELEASE_CHECKLIST.md +++ b/RELEASE_CHECKLIST.md @@ -190,9 +190,9 @@ Start following the steps below to start a release PR. The process is **not ful 9. Push this commit up to the existing release PR: - ``` - git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "${APOLLO_ROUTER_RELEASE_VERSION}" - ``` + ``` + git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "${APOLLO_ROUTER_RELEASE_VERSION}" + ``` 10. Git tag the current commit and & push the branch and the pre-release tag simultaneously: @@ -203,6 +203,14 @@ Start following the steps below to start a release PR. The process is **not ful git push "${APOLLO_ROUTER_RELEASE_GIT_ORIGIN}" "${APOLLO_ROUTER_RELEASE_VERSION}" "v${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" ``` +11. Finally, publish the Crate from your local computer (this also needs to be moved to CI, but requires changing the release containers to be Rust-enabled and to restore the caches): + + > Note: This command may appear unnecessarily specific, but it will help avoid publishing a version to Crates.io that doesn't match what you're currently releasing. (e.g., in the event that you've changed branches in another window) + + ``` + cargo publish -p apollo-router@"${APOLLO_ROUTER_RELEASE_VERSION}${APOLLO_ROUTER_PRERELEASE_SUFFIX}" + ``` + ### Preparing the final release 1. Make sure you have all the [Software Requirements](#software-requirements) above fulfilled. @@ -433,8 +441,10 @@ Start following the steps below to start a release PR. The process is **not ful 17. Finally, publish the Crate from your local computer from the `main` branch (this also needs to be moved to CI, but requires changing the release containers to be Rust-enabled and to restore the caches): + > Note: This command may appear unnecessarily specific, but it will help avoid publishing a version to Crates.io that doesn't match what you're currently releasing. (e.g., in the event that you've changed branches in another window) + ``` - cargo publish -p apollo-router + cargo publish -p apollo-router@"${APOLLO_ROUTER_RELEASE_VERSION}" ``` 18. (Optional) To have a "social banner" for this release, run [this `htmlq` command](https://crates.io/crates/htmlq) (`cargo install htmlq`, or on MacOS `brew install htmlq`; its `jq` for HTML), open the link it produces, copy the image to your clipboard: From a75a0e2c4f2191fc08dbffbd8b0c394d9f4997d6 Mon Sep 17 00:00:00 2001 From: Shane Myrick Date: Wed, 17 Apr 2024 16:36:02 -0700 Subject: [PATCH 32/46] [docs] Update caching with caveats (#4872) Add some words about what the operation cache intentions are --------- Co-authored-by: Jesse Rosenberger Co-authored-by: Edward Huang Co-authored-by: Geoffroy Couprie --- .../docs_press_finish_musket_reindeer.md | 5 ++++ .../configuration/in-memory-caching.mdx | 3 +++ .../telemetry/exporters/metrics/overview.mdx | 2 +- .../telemetry/instrumentation/instruments.mdx | 25 ++++++++----------- .../instrumentation/standard-instruments.mdx | 2 +- .../configuration/telemetry/overview.mdx | 10 +++++--- docs/source/errors.mdx | 7 ++++++ 7 files changed, 34 insertions(+), 20 deletions(-) create mode 100644 .changesets/docs_press_finish_musket_reindeer.md diff --git a/.changesets/docs_press_finish_musket_reindeer.md b/.changesets/docs_press_finish_musket_reindeer.md new file mode 100644 index 0000000000..f345d072d5 --- /dev/null +++ b/.changesets/docs_press_finish_musket_reindeer.md @@ -0,0 +1,5 @@ +### [docs] Update caching with caveats ([PR #4872](https://github.com/apollographql/router/pull/4872)) + +Add some words about what the operation cache intentions are + +By [@smyrick](https://github.com/smyrick) in https://github.com/apollographql/router/pull/4872 diff --git a/docs/source/configuration/in-memory-caching.mdx b/docs/source/configuration/in-memory-caching.mdx index ef60b5bf59..9be7083f9e 100644 --- a/docs/source/configuration/in-memory-caching.mdx +++ b/docs/source/configuration/in-memory-caching.mdx @@ -16,6 +16,9 @@ If you have a GraphOS Enterprise plan, you can also configure a Redis-backed _di +## Performance improvements vs stability +The Router is a highly scalable and low-latency runtime. Even with all caching **disabled**, the time to process operations and query plans will be very minimal (nanoseconds to milliseconds) when compared to the overall supergraph request, except in the edge cases of extremely large operations and supergraphs. Caching offers stability to those running a large graph so that your overhead for given operations stays consistent, not that it dramatically improves. If you would like to validate the performance wins of operation caching, check out the [traces and metrics in the Router](/router/configuration/telemetry/instrumentation/standard-instruments#performance) to take measurements before and after. In extremely large edge cases though, we have seen the cache save 2-10x time to create the query plan, which is still a small part of the overall request. + ## Caching query plans Whenever your router receives an incoming GraphQL operation, it generates a [query plan](/federation/query-plans/) to determine which subgraphs it needs to query to resolve that operation. diff --git a/docs/source/configuration/telemetry/exporters/metrics/overview.mdx b/docs/source/configuration/telemetry/exporters/metrics/overview.mdx index 26918d9ef0..fad0471ff1 100644 --- a/docs/source/configuration/telemetry/exporters/metrics/overview.mdx +++ b/docs/source/configuration/telemetry/exporters/metrics/overview.mdx @@ -1,5 +1,5 @@ --- -title: Router Metrics +title: Metrics exporters subtitle: Export Apollo Router metrics description: Collect and export metrics from the Apollo Router for Prometheus, OpenTelemetry Protocol (OTLP), Datadog, and New Relic --- diff --git a/docs/source/configuration/telemetry/instrumentation/instruments.mdx b/docs/source/configuration/telemetry/instrumentation/instruments.mdx index cff1cfd7ff..6d26b18b67 100644 --- a/docs/source/configuration/telemetry/instrumentation/instruments.mdx +++ b/docs/source/configuration/telemetry/instrumentation/instruments.mdx @@ -8,25 +8,20 @@ import RouterServices from '../../../../shared/router-lifecycle-services.mdx'; -An **instrument** is used to collect data and report measurements to a metric backend. The Apollo Router supports the following metric instruments: +An _instrument_ in the router collects data and reports measurements to a metric backend. Supported instruments include standard instruments from OpenTelemetry, standard instruments for the router's request lifecycle, and custom instruments. Supported instrument kinds are counters and histograms. -* Counter -* Histogram +You can configure instruments in `router.yaml` with `telemetry.instrumentation.instruments`. -Instruments in the router are configurable in `router.yaml` as `telemetry.instruments`. +### OpenTelemetry standard instruments -### Standard instruments - -OpenTelemetry semantic conventions define several standard metric instruments that are available and configurable in the router: +OpenTelemetry specifies multiple [standard metric instruments](https://opentelemetry.io/docs/specs/semconv/http/http-metrics/) that are available in the router: * `http.server.active_requests` - The number of active requests in flight. * `http.server.request.body.size` - A histogram of request body sizes for requests handled by the router. * `http.server.request.duration` - A histogram of request durations for requests handled by the router. * `http.server.response.body.size` - A histogram of response body sizes for requests handled by the router. -For more information, see the [OpenTelemetry semantic conventions for HTTP metrics](https://opentelemetry.io/docs/specs/semconv/http/http-metrics/). - -These standard instruments are configurable in `router.yaml`: +These instruments are configurable in `router.yaml`: ```yaml title="router.yaml" telemetry: @@ -39,7 +34,7 @@ telemetry: http.server.response.body.size: true # (default false) ``` -Standard instruments can be customized by attaching or removing attributes. +They can be customized by attaching or removing attributes. See [attributes](#attributes) to learn more about configuring attributes. ```yaml title="router.yaml" telemetry: @@ -52,17 +47,19 @@ telemetry: http.request.method: true ``` -See the [attributes](#attributes) configuration for more information. +### Apollo standard instruments + +To learn about standard metric instruments for the router's request lifecycle, see [Apollo Router instruments](./standard-instruments). ### Custom instruments -You can define custom instruments on the router, supergraph and subgraph services in the router pipeline. +You can define custom instruments on the router, supergraph, and subgraph services in the router pipeline. -When defining a custom instrument, make sure to reference the [OpenTelemetry semantic conventions](https://opentelemetry.io/docs/specs/semconv/general/metrics/). +When defining a custom instrument, make sure to reference [OpenTelemetry semantic conventions](https://opentelemetry.io/docs/specs/semconv/general/metrics/). diff --git a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx index 25d55c165e..474b5f604d 100644 --- a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx +++ b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx @@ -6,7 +6,7 @@ description: Reference of standard metric instruments for the Apollo Router's re ## Standard metric instruments -The Apollo Router provides a set of non-configurable metric instruments that expose detailed information about the Router's request lifecycle. +The Apollo Router provides a set of non-configurable metric instruments that expose detailed information about the router's request lifecycle. These instruments can be consumed by configuring a [metrics exporter](../exporters/metrics/overview). diff --git a/docs/source/configuration/telemetry/overview.mdx b/docs/source/configuration/telemetry/overview.mdx index 821f4e707c..374ab96970 100644 --- a/docs/source/configuration/telemetry/overview.mdx +++ b/docs/source/configuration/telemetry/overview.mdx @@ -94,15 +94,17 @@ Logs can be consumed by [logging exporters](./exporters/logging/overview) and as ### Metrics and instruments -Metrics monitor aggregate information about the Apollo Router. Metrics include histograms, gauges, and counts. An individual metric is called an **instrument**. Examples of instruments include: +Metrics are measurements of the router's behavior that can be exported and monitored. Different kinds of metrics include histograms, gauges, and counts. + +Metrics can be consumed by _exporters_. See [Metrics exporters](./exporters/metrics/overview) for an overview of supported exporters. + +An individual metric is called an _instrument_. Example instruments of the router include: * Number of received requests * Histogram of request durations * Number of in-flight requests -You can find a full list of instruments in the [instrument documentation](./instrumentation/standard-instruments). - -Metrics can be consumed via [metrics exporters](./exporters/metrics/overview). +See [Instruments](./instrumentation/instruments) for an overview of available instruments and a guide for configuring and customizing instruments. ### Traces and spans diff --git a/docs/source/errors.mdx b/docs/source/errors.mdx index 52a2befbd7..7b7cb8d43b 100644 --- a/docs/source/errors.mdx +++ b/docs/source/errors.mdx @@ -42,6 +42,7 @@ A request's HTTP `Accept` header didn't contain any of the router's supported mi - `multipart/mixed;subscriptionSpec=1.0`. + Request traffic exceeded configured rate limits. See [client side traffic shaping](./configuration/traffic-shaping/#client-side-traffic-shaping). @@ -56,6 +57,12 @@ The request was canceled because the client closed the connection, possibly due The router encountered an unexpected issue. [Report](https://github.com/apollographql/router/issues/new?assignees=&labels=raised+by+user&projects=&template=bug_report.md&title=) this possible bug to the router team. + + + + +The request was not able to complete within a configured amount of time. See [client side traffic shaping timeouts](./configuration/traffic-shaping/#timeouts). + From 30b362c8f2117a37aa5210f8f4e88154ae8aa3e9 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Thu, 18 Apr 2024 05:29:43 +0000 Subject: [PATCH 33/46] prep release: v1.45.0-alpha.2 --- Cargo.lock | 6 +++--- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- apollo-router-scaffold/templates/base/Cargo.toml | 2 +- apollo-router-scaffold/templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- dockerfiles/tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- helm/chart/router/Chart.yaml | 4 ++-- helm/chart/router/README.md | 6 +++--- scripts/install.sh | 2 +- 12 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b88325a737..d4dec88f3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.45.0-alpha.1" +version = "1.45.0-alpha.2" dependencies = [ "access-json", "anyhow", @@ -411,7 +411,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.45.0-alpha.1" +version = "1.45.0-alpha.2" dependencies = [ "apollo-parser", "apollo-router", @@ -427,7 +427,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.45.0-alpha.1" +version = "1.45.0-alpha.2" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index da07367c9f..a00bebf65b 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.45.0-alpha.1" +version = "1.45.0-alpha.2" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index faaaa8362d..48958a8e44 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.45.0-alpha.1" +version = "1.45.0-alpha.2" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index f469446858..b815f39a29 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.45.0-alpha.1" +apollo-router = "1.45.0-alpha.2" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index d5e2cb17ea..e11424efa0 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-alpha.1" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-alpha.2" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 128f6ee101..6eb5a800d5 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.45.0-alpha.1" +version = "1.45.0-alpha.2" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 1b016b8fa4..bcf772395b 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.45.0-alpha.1 + image: ghcr.io/apollographql/router:v1.45.0-alpha.2 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 1c98c51c1f..612637aea3 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.45.0-alpha.1 + image: ghcr.io/apollographql/router:v1.45.0-alpha.2 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index f34bd46154..8c689988b9 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.45.0-alpha.1 + image: ghcr.io/apollographql/router:v1.45.0-alpha.2 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 92d52b04e7..c4f17db0b0 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.45.0-alpha.1 +version: 1.45.0-alpha.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.45.0-alpha.1" +appVersion: "v1.45.0-alpha.2" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index e4c7c8b7a6..811acfed85 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.45.0-alpha.1](https://img.shields.io/badge/Version-1.45.0--alpha.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-alpha.1](https://img.shields.io/badge/AppVersion-v1.45.0--alpha.1-informational?style=flat-square) +![Version: 1.45.0-alpha.2](https://img.shields.io/badge/Version-1.45.0--alpha.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-alpha.2](https://img.shields.io/badge/AppVersion-v1.45.0--alpha.2-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.1 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.2 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha. **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.1 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.2 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/scripts/install.sh b/scripts/install.sh index bf1017fad7..6d0da838bb 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.45.0-alpha.1" +PACKAGE_VERSION="v1.45.0-alpha.2" download_binary() { downloader --check From cbec4f27ebff24f45f97298a1df3f119853f42bd Mon Sep 17 00:00:00 2001 From: Nick Marsh Date: Thu, 18 Apr 2024 21:38:46 +1000 Subject: [PATCH 34/46] Update proto and related code/snapshots (#4967) Updates the protobuf to the latest (soon to be released) version, removes code that was using deprecated field, and updates test snapshots. --- .changesets/maint_240417_proto_update.md | 5 +++++ apollo-router/src/plugins/telemetry/apollo_exporter.rs | 9 +-------- apollo-router/src/plugins/telemetry/proto/reports.proto | 6 ++---- .../snapshots/apollo_reports__batch_send_header-2.snap | 2 -- .../snapshots/apollo_reports__batch_send_header.snap | 2 -- .../snapshots/apollo_reports__batch_trace_id-2.snap | 2 -- .../tests/snapshots/apollo_reports__batch_trace_id.snap | 2 -- .../tests/snapshots/apollo_reports__client_name-2.snap | 2 -- .../tests/snapshots/apollo_reports__client_name.snap | 2 -- .../snapshots/apollo_reports__client_version-2.snap | 2 -- .../tests/snapshots/apollo_reports__client_version.snap | 2 -- .../snapshots/apollo_reports__condition_else-2.snap | 2 -- .../tests/snapshots/apollo_reports__condition_else.snap | 2 -- .../tests/snapshots/apollo_reports__condition_if-2.snap | 2 -- .../tests/snapshots/apollo_reports__condition_if.snap | 2 -- .../tests/snapshots/apollo_reports__non_defer-2.snap | 2 -- .../tests/snapshots/apollo_reports__non_defer.snap | 2 -- .../tests/snapshots/apollo_reports__send_header-2.snap | 2 -- .../tests/snapshots/apollo_reports__send_header.snap | 2 -- .../snapshots/apollo_reports__send_variable_value-2.snap | 2 -- .../snapshots/apollo_reports__send_variable_value.snap | 2 -- apollo-router/tests/snapshots/apollo_reports__stats.snap | 2 -- .../tests/snapshots/apollo_reports__trace_id-2.snap | 2 -- .../tests/snapshots/apollo_reports__trace_id.snap | 2 -- 24 files changed, 8 insertions(+), 54 deletions(-) create mode 100644 .changesets/maint_240417_proto_update.md diff --git a/.changesets/maint_240417_proto_update.md b/.changesets/maint_240417_proto_update.md new file mode 100644 index 0000000000..e3050beba7 --- /dev/null +++ b/.changesets/maint_240417_proto_update.md @@ -0,0 +1,5 @@ +### Updates the Apollo reporting protobuf to the latest version ([PR 4967](https://github.com/apollographql/router/pull/4967)) + +Updates the protobuf file and related snapshots, and removes code that used the deprecated field. + +By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4967 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/apollo_exporter.rs b/apollo-router/src/plugins/telemetry/apollo_exporter.rs index 4f6e4b0dfb..531cc94dcb 100644 --- a/apollo-router/src/plugins/telemetry/apollo_exporter.rs +++ b/apollo-router/src/plugins/telemetry/apollo_exporter.rs @@ -240,17 +240,10 @@ impl ApolloExporter { let mut has_traces = false; for (_, traces_and_stats) in proto_report.traces_per_query.iter_mut() { - if !traces_and_stats.trace.is_empty() - || !traces_and_stats - .internal_traces_contributing_to_stats - .is_empty() - { + if !traces_and_stats.trace.is_empty() { has_traces = true; if self.strip_traces.load(Ordering::SeqCst) { traces_and_stats.trace.clear(); - traces_and_stats - .internal_traces_contributing_to_stats - .clear(); } } } diff --git a/apollo-router/src/plugins/telemetry/proto/reports.proto b/apollo-router/src/plugins/telemetry/proto/reports.proto index c07948573b..8e24ac18e0 100644 --- a/apollo-router/src/plugins/telemetry/proto/reports.proto +++ b/apollo-router/src/plugins/telemetry/proto/reports.proto @@ -516,12 +516,10 @@ message TracesAndStats { // (as FieldStats will include the concrete object type for fields referenced // via an interface type). map referenced_fields_by_type = 4; - // This field is used to validate that the algorithm used to construct `stats_with_context` - // matches similar algorithms in Apollo's servers. It is otherwise ignored and should not - // be included in reports. - repeated Trace internal_traces_contributing_to_stats = 3 [(js_preEncoded) = true]; // This is an optional field that is used to provide more context to the key of this object within the // traces_per_query map. If it's omitted, we assume the key is a standard operation name and signature key. QueryMetadata query_metadata = 5; + + reserved 3; } diff --git a/apollo-router/tests/snapshots/apollo_reports__batch_send_header-2.snap b/apollo-router/tests/snapshots/apollo_reports__batch_send_header-2.snap index e3f9d20c84..8c72378643 100644 --- a/apollo-router/tests/snapshots/apollo_reports__batch_send_header-2.snap +++ b/apollo-router/tests/snapshots/apollo_reports__batch_send_header-2.snap @@ -1087,10 +1087,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__batch_send_header.snap b/apollo-router/tests/snapshots/apollo_reports__batch_send_header.snap index e3f9d20c84..8c72378643 100644 --- a/apollo-router/tests/snapshots/apollo_reports__batch_send_header.snap +++ b/apollo-router/tests/snapshots/apollo_reports__batch_send_header.snap @@ -1087,10 +1087,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__batch_trace_id-2.snap b/apollo-router/tests/snapshots/apollo_reports__batch_trace_id-2.snap index 969be0b867..9d49165680 100644 --- a/apollo-router/tests/snapshots/apollo_reports__batch_trace_id-2.snap +++ b/apollo-router/tests/snapshots/apollo_reports__batch_trace_id-2.snap @@ -1081,10 +1081,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__batch_trace_id.snap b/apollo-router/tests/snapshots/apollo_reports__batch_trace_id.snap index 969be0b867..9d49165680 100644 --- a/apollo-router/tests/snapshots/apollo_reports__batch_trace_id.snap +++ b/apollo-router/tests/snapshots/apollo_reports__batch_trace_id.snap @@ -1081,10 +1081,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__client_name-2.snap b/apollo-router/tests/snapshots/apollo_reports__client_name-2.snap index 1d86bef692..05f1eb4350 100644 --- a/apollo-router/tests/snapshots/apollo_reports__client_name-2.snap +++ b/apollo-router/tests/snapshots/apollo_reports__client_name-2.snap @@ -548,10 +548,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__client_name.snap b/apollo-router/tests/snapshots/apollo_reports__client_name.snap index 1d86bef692..05f1eb4350 100644 --- a/apollo-router/tests/snapshots/apollo_reports__client_name.snap +++ b/apollo-router/tests/snapshots/apollo_reports__client_name.snap @@ -548,10 +548,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__client_version-2.snap b/apollo-router/tests/snapshots/apollo_reports__client_version-2.snap index f498319bb2..8b44f27880 100644 --- a/apollo-router/tests/snapshots/apollo_reports__client_version-2.snap +++ b/apollo-router/tests/snapshots/apollo_reports__client_version-2.snap @@ -548,10 +548,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__client_version.snap b/apollo-router/tests/snapshots/apollo_reports__client_version.snap index f498319bb2..8b44f27880 100644 --- a/apollo-router/tests/snapshots/apollo_reports__client_version.snap +++ b/apollo-router/tests/snapshots/apollo_reports__client_version.snap @@ -548,10 +548,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__condition_else-2.snap b/apollo-router/tests/snapshots/apollo_reports__condition_else-2.snap index 1533c1ffe0..10b01ce754 100644 --- a/apollo-router/tests/snapshots/apollo_reports__condition_else-2.snap +++ b/apollo-router/tests/snapshots/apollo_reports__condition_else-2.snap @@ -554,10 +554,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__condition_else.snap b/apollo-router/tests/snapshots/apollo_reports__condition_else.snap index 1533c1ffe0..10b01ce754 100644 --- a/apollo-router/tests/snapshots/apollo_reports__condition_else.snap +++ b/apollo-router/tests/snapshots/apollo_reports__condition_else.snap @@ -554,10 +554,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__condition_if-2.snap b/apollo-router/tests/snapshots/apollo_reports__condition_if-2.snap index 7647544107..71754c71bc 100644 --- a/apollo-router/tests/snapshots/apollo_reports__condition_if-2.snap +++ b/apollo-router/tests/snapshots/apollo_reports__condition_if-2.snap @@ -567,10 +567,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__condition_if.snap b/apollo-router/tests/snapshots/apollo_reports__condition_if.snap index 7647544107..71754c71bc 100644 --- a/apollo-router/tests/snapshots/apollo_reports__condition_if.snap +++ b/apollo-router/tests/snapshots/apollo_reports__condition_if.snap @@ -567,10 +567,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__non_defer-2.snap b/apollo-router/tests/snapshots/apollo_reports__non_defer-2.snap index f157a249fc..140015c24b 100644 --- a/apollo-router/tests/snapshots/apollo_reports__non_defer-2.snap +++ b/apollo-router/tests/snapshots/apollo_reports__non_defer-2.snap @@ -548,10 +548,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__non_defer.snap b/apollo-router/tests/snapshots/apollo_reports__non_defer.snap index f157a249fc..140015c24b 100644 --- a/apollo-router/tests/snapshots/apollo_reports__non_defer.snap +++ b/apollo-router/tests/snapshots/apollo_reports__non_defer.snap @@ -548,10 +548,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__send_header-2.snap b/apollo-router/tests/snapshots/apollo_reports__send_header-2.snap index a64d572e40..913116e1f5 100644 --- a/apollo-router/tests/snapshots/apollo_reports__send_header-2.snap +++ b/apollo-router/tests/snapshots/apollo_reports__send_header-2.snap @@ -551,10 +551,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__send_header.snap b/apollo-router/tests/snapshots/apollo_reports__send_header.snap index a64d572e40..913116e1f5 100644 --- a/apollo-router/tests/snapshots/apollo_reports__send_header.snap +++ b/apollo-router/tests/snapshots/apollo_reports__send_header.snap @@ -551,10 +551,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__send_variable_value-2.snap b/apollo-router/tests/snapshots/apollo_reports__send_variable_value-2.snap index 89b4799c2c..e0b887547a 100644 --- a/apollo-router/tests/snapshots/apollo_reports__send_variable_value-2.snap +++ b/apollo-router/tests/snapshots/apollo_reports__send_variable_value-2.snap @@ -550,10 +550,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__send_variable_value.snap b/apollo-router/tests/snapshots/apollo_reports__send_variable_value.snap index 89b4799c2c..e0b887547a 100644 --- a/apollo-router/tests/snapshots/apollo_reports__send_variable_value.snap +++ b/apollo-router/tests/snapshots/apollo_reports__send_variable_value.snap @@ -550,10 +550,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__stats.snap b/apollo-router/tests/snapshots/apollo_reports__stats.snap index 22a641e2c0..eb84bc40a2 100644 --- a/apollo-router/tests/snapshots/apollo_reports__stats.snap +++ b/apollo-router/tests/snapshots/apollo_reports__stats.snap @@ -119,7 +119,6 @@ traces_per_query: field_names: - name is_interface: false - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 @@ -128,4 +127,3 @@ operation_count_by_type: subtype: "" operation_count: 1 traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__trace_id-2.snap b/apollo-router/tests/snapshots/apollo_reports__trace_id-2.snap index f157a249fc..140015c24b 100644 --- a/apollo-router/tests/snapshots/apollo_reports__trace_id-2.snap +++ b/apollo-router/tests/snapshots/apollo_reports__trace_id-2.snap @@ -548,10 +548,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - diff --git a/apollo-router/tests/snapshots/apollo_reports__trace_id.snap b/apollo-router/tests/snapshots/apollo_reports__trace_id.snap index f157a249fc..140015c24b 100644 --- a/apollo-router/tests/snapshots/apollo_reports__trace_id.snap +++ b/apollo-router/tests/snapshots/apollo_reports__trace_id.snap @@ -548,10 +548,8 @@ traces_per_query: field_execution_weight: 1 stats_with_context: [] referenced_fields_by_type: {} - internal_traces_contributing_to_stats: [] query_metadata: ~ end_time: "[end_time]" operation_count: 0 operation_count_by_type: [] traces_pre_aggregated: true - From 9adbeb6229bafdfdcd9d3fd2ed9d417a2a5f7132 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Thu, 18 Apr 2024 12:05:48 +0000 Subject: [PATCH 35/46] prep release: v1.45.0-alpha.3 --- Cargo.lock | 6 +++--- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- apollo-router-scaffold/templates/base/Cargo.toml | 2 +- apollo-router-scaffold/templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- dockerfiles/tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- helm/chart/router/Chart.yaml | 4 ++-- helm/chart/router/README.md | 6 +++--- scripts/install.sh | 2 +- 12 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d4dec88f3d..2b4817a2f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.45.0-alpha.2" +version = "1.45.0-alpha.3" dependencies = [ "access-json", "anyhow", @@ -411,7 +411,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.45.0-alpha.2" +version = "1.45.0-alpha.3" dependencies = [ "apollo-parser", "apollo-router", @@ -427,7 +427,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.45.0-alpha.2" +version = "1.45.0-alpha.3" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index a00bebf65b..c9c1be7ebd 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.45.0-alpha.2" +version = "1.45.0-alpha.3" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 48958a8e44..d4a0423a4b 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.45.0-alpha.2" +version = "1.45.0-alpha.3" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index b815f39a29..8e7af00505 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.45.0-alpha.2" +apollo-router = "1.45.0-alpha.3" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index e11424efa0..5dc2278a8e 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-alpha.2" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-alpha.3" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 6eb5a800d5..8e75a1f9f9 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.45.0-alpha.2" +version = "1.45.0-alpha.3" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index bcf772395b..78c04a78a3 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.45.0-alpha.2 + image: ghcr.io/apollographql/router:v1.45.0-alpha.3 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 612637aea3..12118d60d4 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.45.0-alpha.2 + image: ghcr.io/apollographql/router:v1.45.0-alpha.3 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 8c689988b9..aa6a3ba964 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.45.0-alpha.2 + image: ghcr.io/apollographql/router:v1.45.0-alpha.3 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index c4f17db0b0..32c953da61 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.45.0-alpha.2 +version: 1.45.0-alpha.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.45.0-alpha.2" +appVersion: "v1.45.0-alpha.3" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 811acfed85..224a038dce 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.45.0-alpha.2](https://img.shields.io/badge/Version-1.45.0--alpha.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-alpha.2](https://img.shields.io/badge/AppVersion-v1.45.0--alpha.2-informational?style=flat-square) +![Version: 1.45.0-alpha.3](https://img.shields.io/badge/Version-1.45.0--alpha.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-alpha.3](https://img.shields.io/badge/AppVersion-v1.45.0--alpha.3-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.2 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.3 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha. **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.2 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.3 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/scripts/install.sh b/scripts/install.sh index 6d0da838bb..e508f71e6d 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.45.0-alpha.2" +PACKAGE_VERSION="v1.45.0-alpha.3" download_binary() { downloader --check From c33557d85d1cb202737343905596c7237736b54e Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Thu, 18 Apr 2024 17:31:37 +0300 Subject: [PATCH 36/46] ci: Only validate Helm against Kubernetes versions with "previous patches" (#4983) We test our Helm chart against known stable Kubernetes versions. [Kubernetes v1.30.0] came out last night, however, the [manifest] on which we rely in order to uses a particularly odd format when this happens, leaving both `previousPatches` empty, but also not a nice representation of "current version" that we can rely on without doing some funny inference. (YAML comes into play here) This fix wouldn't be necessary after the first patch (v1.30.1), but the work to fix this in a better way is disproportional to just not testing the very first patch version of a new Kubernetes minor, which is probably a totally fine trade-off for the first month of a brand new Kubernetes release. (v1.30.1 already shows in the manifest as coming on 2024-05-15). For now, this unblocks CI. We could consider other approaches in the future. [manifest]: https://raw.githubusercontent.com/kubernetes/website/main/data/releases/schedule.yaml [Kubernetes v1.30.0]: https://github.com/kubernetes/kubernetes/releases/tag/v1.30.0 --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 584a165dcc..acd03f9460 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -408,7 +408,7 @@ commands: # Create list of kube versions CURRENT_KUBE_VERSIONS=$(curl -s -L https://raw.githubusercontent.com/kubernetes/website/main/data/releases/schedule.yaml \ | yq -o json '.' \ - | jq --raw-output '.schedules[] | select((now | strftime("%Y-%m-%dT00:00:00Z")) as $date | .releaseDate < $date and .endOfLifeDate > $date) | .previousPatches[].release') + | jq --raw-output '.schedules[] | select((now | strftime("%Y-%m-%dT00:00:00Z")) as $date | .releaseDate < $date and .endOfLifeDate > $date) | select(.previousPatches != null) | .previousPatches[].release') TEMPLATE_DIR=$(mktemp -d) MINOR_VERSION="${kube_version%.*}" From 598b9de4feecf083d9b9f86297df39f78daca095 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Thu, 18 Apr 2024 17:34:50 +0100 Subject: [PATCH 37/46] Unify `query_panning` and `query_planner` (#4980) Both config and metrics have similar but differing names. These have now all been moved to `query_planning` In addition. Metrics have been updated with names that follow current naming conventions, and docs updated. --- **Checklist** Complete the checklist (and note appropriate exceptions) before the PR is marked ready-for-review. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]: It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]: Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]: Tick whichever testing boxes are applicable. If you are adding Manual Tests, please document the manual testing (extensively) in the Exceptions. --------- Co-authored-by: bryn Co-authored-by: Jesse Rosenberger --- .../exp_carton_ginger_magnet_beacon.md | 13 ++-- apollo-router/examples/router.yaml | 2 +- apollo-router/src/configuration/metrics.rs | 6 +- apollo-router/src/configuration/mod.rs | 62 ++++++++----------- ..._planner_parallelism_auto.router.yaml.snap | 2 +- ...lanner_parallelism_static.router.yaml.snap | 2 +- ...nfiguration__tests__schema_generation.snap | 57 +++++++---------- ...query_planner_parallelism_auto.router.yaml | 2 +- ...ery_planner_parallelism_static.router.yaml | 2 +- .../bridge_query_planner_pool.rs | 19 ++++-- apollo-router/src/router_factory.rs | 4 +- .../configuration/in-memory-caching.mdx | 4 +- docs/source/configuration/overview.mdx | 4 +- .../instrumentation/standard-instruments.mdx | 8 ++- 14 files changed, 88 insertions(+), 99 deletions(-) diff --git a/.changesets/exp_carton_ginger_magnet_beacon.md b/.changesets/exp_carton_ginger_magnet_beacon.md index 1d8ad7aef7..3a9981ed52 100644 --- a/.changesets/exp_carton_ginger_magnet_beacon.md +++ b/.changesets/exp_carton_ginger_magnet_beacon.md @@ -2,16 +2,19 @@ The router supports a new experimental feature: a pool of query planners to parallelize query planning. -You can configure query planner pools with the `supergraph.query_planner.experimental_parallelism` option: +You can configure query planner pools with the `supergraph.query_planning.experimental_parallelism` option: ```yaml supergraph: - query_planner: + query_planning: experimental_parallelism: auto # number of available cpus ``` -Its value is the number of query planners that run in parallel, and its default value is `1`. You can set it to the special value `auto` to automatically set it equal to the number of available CPUs. +Its value is the number of query planners that run in parallel, and its default value is `1`. You can set it to the +special value `auto` to automatically set it equal to the number of available CPUs. -You can discuss and comment about query planner pools in this [GitHub discussion](https://github.com/apollographql/router/discussions/4917). +You can discuss and comment about query planner pools in +this [GitHub discussion](https://github.com/apollographql/router/discussions/4917). -By [@xuorig](https://github.com/xuorig) and [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/4897 +By [@xuorig](https://github.com/xuorig) and [@o0Ignition0o](https://github.com/o0Ignition0o) +in https://github.com/apollographql/router/pull/4897 diff --git a/apollo-router/examples/router.yaml b/apollo-router/examples/router.yaml index 1981c5da3b..6936d57e32 100644 --- a/apollo-router/examples/router.yaml +++ b/apollo-router/examples/router.yaml @@ -1,7 +1,7 @@ supergraph: listen: 0.0.0.0:4100 introspection: true - query_planner: + query_planning: experimental_parallelism: auto # or any number plugins: experimental.expose_query_plan: true diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 9e3f894970..67b405b318 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -436,7 +436,7 @@ impl InstrumentData { ) { let query_planner_parallelism_config = configuration .supergraph - .query_planner + .query_planning .experimental_parallelism; if query_planner_parallelism_config != Default::default() { @@ -451,11 +451,11 @@ impl InstrumentData { .into(), ); self.data.insert( - "apollo.router.config.query_planner.parallelism".to_string(), + "apollo.router.config.query_planning.parallelism".to_string(), ( configuration .supergraph - .query_planner + .query_planning .experimental_query_planner_parallelism() .map(|n| { #[cfg(test)] diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 5ece74cc65..2b8eb42115 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -1,16 +1,4 @@ //! Logic for loading configuration in to an object model -pub(crate) mod cors; -pub(crate) mod expansion; -mod experimental; -pub(crate) mod metrics; -mod persisted_queries; -mod schema; -pub(crate) mod subgraph; -#[cfg(test)] -mod tests; -mod upgrade; -mod yaml; - use std::fmt; use std::io; use std::io::BufReader; @@ -70,6 +58,18 @@ use crate::plugins::subscription::APOLLO_SUBSCRIPTION_PLUGIN_NAME; use crate::uplink::UplinkConfig; use crate::ApolloRouterError; +pub(crate) mod cors; +pub(crate) mod expansion; +mod experimental; +pub(crate) mod metrics; +mod persisted_queries; +mod schema; +pub(crate) mod subgraph; +#[cfg(test)] +mod tests; +mod upgrade; +mod yaml; + // TODO: Talk it through with the teams #[cfg(not(test))] static HEARTBEAT_TIMEOUT_DURATION_SECONDS: u64 = 15; @@ -631,18 +631,6 @@ pub(crate) struct Supergraph { /// Log a message if the client closes the connection before the response is sent. /// Default: false. pub(crate) experimental_log_on_broken_pipe: bool, - - /// Configuration options pertaining to the query planner component. - pub(crate) query_planner: QueryPlanner, -} - -/// Configuration options pertaining to the query planner component. -#[derive(Debug, Clone, Default, Deserialize, Serialize, JsonSchema)] -#[serde(deny_unknown_fields)] -pub(crate) struct QueryPlanner { - /// Set the size of a pool of workers to enable query planning parallelism. - /// Default: 1. - pub(crate) experimental_parallelism: AvailableParallelism, } #[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] @@ -664,15 +652,6 @@ impl Default for AvailableParallelism { } } -impl QueryPlanner { - pub(crate) fn experimental_query_planner_parallelism(&self) -> io::Result { - match self.experimental_parallelism { - AvailableParallelism::Auto(Auto::Auto) => std::thread::available_parallelism(), - AvailableParallelism::Fixed(n) => Ok(n), - } - } -} - fn default_defer_support() -> bool { true } @@ -690,7 +669,6 @@ impl Supergraph { generate_query_fragments: Option, early_cancel: Option, experimental_log_on_broken_pipe: Option, - query_planner: Option, ) -> Self { Self { listen: listen.unwrap_or_else(default_graphql_listen), @@ -710,7 +688,6 @@ impl Supergraph { generate_query_fragments: generate_query_fragments.unwrap_or_default(), early_cancel: early_cancel.unwrap_or_default(), experimental_log_on_broken_pipe: experimental_log_on_broken_pipe.unwrap_or_default(), - query_planner: query_planner.unwrap_or_default(), } } } @@ -729,7 +706,6 @@ impl Supergraph { generate_query_fragments: Option, early_cancel: Option, experimental_log_on_broken_pipe: Option, - query_planner: Option, ) -> Self { Self { listen: listen.unwrap_or_else(test_listen), @@ -749,7 +725,6 @@ impl Supergraph { generate_query_fragments: generate_query_fragments.unwrap_or_default(), early_cancel: early_cancel.unwrap_or_default(), experimental_log_on_broken_pipe: experimental_log_on_broken_pipe.unwrap_or_default(), - query_planner: query_planner.unwrap_or_default(), } } } @@ -970,6 +945,19 @@ pub(crate) struct QueryPlanning { /// If cache warm up is configured, this will allow the router to keep a query plan created with /// the old schema, if it determines that the schema update does not affect the corresponding query pub(crate) experimental_reuse_query_plans: bool, + + /// Set the size of a pool of workers to enable query planning parallelism. + /// Default: 1. + pub(crate) experimental_parallelism: AvailableParallelism, +} + +impl QueryPlanning { + pub(crate) fn experimental_query_planner_parallelism(&self) -> io::Result { + match self.experimental_parallelism { + AvailableParallelism::Auto(Auto::Auto) => std::thread::available_parallelism(), + AvailableParallelism::Fixed(n) => Ok(n), + } + } } /// Cache configuration diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap index 36eeed98cf..b54b336914 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_auto.router.yaml.snap @@ -2,7 +2,7 @@ source: apollo-router/src/configuration/metrics.rs expression: "&metrics.non_zero()" --- -- name: apollo.router.config.query_planner.parallelism +- name: apollo.router.config.query_planning.parallelism data: datapoints: - value: 8 diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap index d9970a38fd..07bb2c2ea4 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@query_planner_parallelism_static.router.yaml.snap @@ -2,7 +2,7 @@ source: apollo-router/src/configuration/metrics.rs expression: "&metrics.non_zero()" --- -- name: apollo.router.config.query_planner.parallelism +- name: apollo.router.config.query_planning.parallelism data: datapoints: - value: 10 diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 3d3d7fd527..504605271e 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -2673,13 +2673,11 @@ expression: "&schema" "warmed_up_queries": null, "experimental_plans_limit": null, "experimental_paths_limit": null, - "experimental_reuse_query_plans": false + "experimental_reuse_query_plans": false, + "experimental_parallelism": 1 }, "early_cancel": false, - "experimental_log_on_broken_pipe": false, - "query_planner": { - "experimental_parallelism": 1 - } + "experimental_log_on_broken_pipe": false }, "type": "object", "properties": { @@ -2733,35 +2731,6 @@ expression: "&schema" "default": "/", "type": "string" }, - "query_planner": { - "description": "Configuration options pertaining to the query planner component.", - "default": { - "experimental_parallelism": 1 - }, - "type": "object", - "required": [ - "experimental_parallelism" - ], - "properties": { - "experimental_parallelism": { - "description": "Set the size of a pool of workers to enable query planning parallelism. Default: 1.", - "anyOf": [ - { - "type": "string", - "enum": [ - "auto" - ] - }, - { - "type": "integer", - "format": "uint", - "minimum": 1.0 - } - ] - } - }, - "additionalProperties": false - }, "query_planning": { "description": "Query planning options", "default": { @@ -2774,7 +2743,8 @@ expression: "&schema" "warmed_up_queries": null, "experimental_plans_limit": null, "experimental_paths_limit": null, - "experimental_reuse_query_plans": false + "experimental_reuse_query_plans": false, + "experimental_parallelism": 1 }, "type": "object", "properties": { @@ -2908,6 +2878,23 @@ expression: "&schema" }, "additionalProperties": false }, + "experimental_parallelism": { + "description": "Set the size of a pool of workers to enable query planning parallelism. Default: 1.", + "default": 1, + "anyOf": [ + { + "type": "string", + "enum": [ + "auto" + ] + }, + { + "type": "integer", + "format": "uint", + "minimum": 1.0 + } + ] + }, "experimental_paths_limit": { "description": "Before creating query plans, for each path of fields in the query we compute all the possible options to traverse that path via the subgraphs. Multiple options can arise because fields in the path can be provided by multiple subgraphs, and abstract types (i.e. unions and interfaces) returned by fields sometimes require the query planner to traverse through each constituent object type. The number of options generated in this computation can grow large if the schema or query are sufficiently complex, and that will increase the time spent planning.\n\nThis config allows specifying a per-path limit to the number of options considered. If any path's options exceeds this limit, query planning will abort and the operation will fail.\n\nThe default value is None, which specifies no limit.", "default": null, diff --git a/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml index d915dcdadc..e29357f06d 100644 --- a/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml +++ b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_auto.router.yaml @@ -1,3 +1,3 @@ supergraph: - query_planner: + query_planning: experimental_parallelism: auto diff --git a/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml index 586eb5abb8..8861ab2777 100644 --- a/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml +++ b/apollo-router/src/configuration/testdata/metrics/query_planner_parallelism_static.router.yaml @@ -1,3 +1,3 @@ supergraph: - query_planner: + query_planning: experimental_parallelism: 10 diff --git a/apollo-router/src/query_planner/bridge_query_planner_pool.rs b/apollo-router/src/query_planner/bridge_query_planner_pool.rs index cad419cf8f..4b7dd47fff 100644 --- a/apollo-router/src/query_planner/bridge_query_planner_pool.rs +++ b/apollo-router/src/query_planner/bridge_query_planner_pool.rs @@ -7,7 +7,7 @@ use apollo_compiler::validation::Valid; use async_channel::bounded; use async_channel::Sender; use futures::future::BoxFuture; -use opentelemetry::KeyValue; +use opentelemetry::metrics::MeterProvider; use router_bridge::planner::Planner; use tokio::sync::oneshot; use tokio::task::JoinSet; @@ -18,6 +18,7 @@ use super::bridge_query_planner::BridgeQueryPlanner; use super::QueryPlanResult; use crate::error::QueryPlannerError; use crate::error::ServiceBuildError; +use crate::metrics::meter_provider; use crate::services::QueryPlannerRequest; use crate::services::QueryPlannerResponse; use crate::spec::Schema; @@ -34,6 +35,7 @@ pub(crate) struct BridgeQueryPlannerPool { )>, schema: Arc, subgraph_schemas: Arc>>>, + _pool_size_gauge: opentelemetry::metrics::ObservableGauge, } impl BridgeQueryPlannerPool { @@ -122,22 +124,29 @@ impl BridgeQueryPlannerPool { let res = svc.call(request).await; f64_histogram!( - "apollo.router.query_planner.duration", + "apollo.router.query_planning.plan.duration", "Duration of the query planning.", start.elapsed().as_secs_f64(), - [KeyValue::new("workerId", worker_id.to_string())] + "workerId" = worker_id.to_string() ); let _ = res_sender.send(res); } }); } + let sender_for_gauge = sender.clone(); + let pool_size_gauge = meter_provider() + .meter("apollo/router") + .u64_observable_gauge("apollo.router.query_planning.queued") + .with_callback(move |m| m.observe(sender_for_gauge.len() as u64, &[])) + .init(); Ok(Self { planners, sender, schema, subgraph_schemas, + _pool_size_gauge: pool_size_gauge, }) } @@ -184,14 +193,12 @@ impl tower::Service for BridgeQueryPlannerPool { let start = Instant::now(); let _ = sender.send((req, response_sender)).await; - tracing::info!(value.apollo_router_query_planner_queue_size = sender.len()); let res = response_receiver .await .map_err(|_| QueryPlannerError::UnhandledPlannerResult)?; - tracing::info!(value.apollo_router_query_planner_queue_size = sender.len()); f64_histogram!( - "apollo_router_query_planning_time", + "apollo.router.query_planning.total.duration", "Duration of the time the router waited for a query plan, including both the queue time and planning time.", start.elapsed().as_secs_f64(), [] diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 58f31f9236..8274993c10 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -286,7 +286,7 @@ impl YamlRouterFactory { configuration.clone(), configuration .supergraph - .query_planner + .query_planning .experimental_query_planner_parallelism()?, ) .instrument(query_planner_span) @@ -299,7 +299,7 @@ impl YamlRouterFactory { configuration.clone(), configuration .supergraph - .query_planner + .query_planning .experimental_query_planner_parallelism()?, ) .instrument(query_planner_span) diff --git a/docs/source/configuration/in-memory-caching.mdx b/docs/source/configuration/in-memory-caching.mdx index 9be7083f9e..ccbb10ec3d 100644 --- a/docs/source/configuration/in-memory-caching.mdx +++ b/docs/source/configuration/in-memory-caching.mdx @@ -74,13 +74,13 @@ To get more information on the planning and warm-up process use the following me * `apollo_router_cache_miss_count{kind="query planner", storage="}` * histograms: - * `apollo_router_query_planning_time`: time spent planning queries + * `apollo.router.query_planning.plan.duration`: time spent planning queries * `apollo_router_schema_loading_time`: time spent loading a schema * `apollo_router_cache_hit_time{kind="query planner", storage="}`: time to get a value from the cache * `apollo_router_cache_miss_time{kind="query planner", storage="}` Typically, we would look at `apollo_router_cache_size` and the cache hit rate to define the right size of the in memory cache, -then look at `apollo_router_schema_loading_time` and `apollo_router_query_planning_time` to decide how much time we want to spend warming up queries. +then look at `apollo_router_schema_loading_time` and `apollo.router.query_planning.plan.duration` to decide how much time we want to spend warming up queries. #### Cache warm-up with distributed caching diff --git a/docs/source/configuration/overview.mdx b/docs/source/configuration/overview.mdx index f348c4072d..fd3bf99f30 100644 --- a/docs/source/configuration/overview.mdx +++ b/docs/source/configuration/overview.mdx @@ -559,11 +559,11 @@ You can improve the performance of the router's query planner by configuring par By default, the query planner plans one operation at a time. It plans one operation to completion before planning the next one. This serial planning can be problematic when an operation takes a long time to plan and consequently blocks the query planner from working on other operations. -To resolve such blocking scenarios, you can enable parallel query planning. Configure it in `router.yaml` with `supergraph.query_planner.experimental_parallelism`: +To resolve such blocking scenarios, you can enable parallel query planning. Configure it in `router.yaml` with `supergraph.query_planning.experimental_parallelism`: ```yaml title="router.yaml" supergraph: - query_planner: + query_planning: experimental_parallelism: auto # number of available cpus ``` diff --git a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx index 474b5f604d..c919845978 100644 --- a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx +++ b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx @@ -57,10 +57,14 @@ The coprocessor operations metric has the following attributes: ### Performance - `apollo_router_processing_time` - Time spent processing a request (outside of waiting for external or subgraph requests) in seconds. -- `apollo_router_query_planning_time` - Time spent planning queries in seconds. -- `apollo_router_query_planning_warmup_duration` - Time spent planning queries in seconds. - `apollo_router_schema_load_duration` - Time spent loading the schema in seconds. +### Query planning +- `apollo_router.query_planning.warmup.duration` - Time spent warming up the query planner queries in seconds. +- `apollo.router.query_planning.plan.duration` - Histogram of plan durations. +- `apollo.router.query_planning.total.duration` - Histogram of plan durations including queue time. +- `apollo.router.query_planning.queued` - A gauge of the number of queued plans requests. + ### Uplink From def4a2ec6ef1197f23acbfd45757325b11d8e55c Mon Sep 17 00:00:00 2001 From: Nick Marsh Date: Fri, 19 Apr 2024 03:21:35 +1000 Subject: [PATCH 38/46] Fix a small formatting issue with Rust signature generation (#4981) Fixes an issue with signature generation where commas were sometimes not inserted when they should be. --------- Co-authored-by: Jesse Rosenberger --- .../src/apollo_studio_interop/mod.rs | 12 +++-- .../testdata/schema_interop.graphql | 2 +- .../src/apollo_studio_interop/tests.rs | 38 +++++++++++++++ .../src/query_planner/bridge_query_planner.rs | 46 ++++++++----------- 4 files changed, 66 insertions(+), 32 deletions(-) diff --git a/apollo-router/src/apollo_studio_interop/mod.rs b/apollo-router/src/apollo_studio_interop/mod.rs index b1afad3f34..7cd27c3a7d 100644 --- a/apollo-router/src/apollo_studio_interop/mod.rs +++ b/apollo-router/src/apollo_studio_interop/mod.rs @@ -430,12 +430,14 @@ fn format_field(field: &Node, f: &mut fmt::Formatter) -> fmt::Result { for (index, arg_string) in arg_strings.iter().enumerate() { f.write_str(arg_string)?; - // We only need to insert a separating space it's not the last arg and if the string ends in an alphanumeric character + // We only need to insert a separating space it's not the last arg and if the string ends in an alphanumeric character. + // If it's a comma, we always need to insert it if it's not the last arg. if index < arg_strings.len() - 1 - && arg_string - .chars() - .last() - .map_or(true, |c| c.is_alphanumeric()) + && (separator == "," + || arg_string + .chars() + .last() + .map_or(true, |c| c.is_alphanumeric())) { f.write_str(separator)?; } diff --git a/apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql b/apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql index f97f128c10..e41e500782 100644 --- a/apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql +++ b/apollo-router/src/apollo_studio_interop/testdata/schema_interop.graphql @@ -177,7 +177,7 @@ type Query noInputQuery: EverythingResponse! basicInputTypeQuery(input: NestedInputType!): EverythingResponse! anotherInputTypeQuery(input: AnotherInputType): EverythingResponse! - enumInputQuery(enumInput: SomeEnum, inputType: EnumInputType): EverythingResponse! + enumInputQuery(enumInput: SomeEnum, inputType: EnumInputType, stringInput: String, anotherStr: String): EverythingResponse! basicResponseQuery: BasicResponse! scalarResponseQuery: String defaultArgQuery(stringInput: String! = "default", inputType: AnotherInputType = {anotherInput: "inputDefault"}): BasicResponse! diff --git a/apollo-router/src/apollo_studio_interop/tests.rs b/apollo-router/src/apollo_studio_interop/tests.rs index c3ae55ce6c..ac2dd78215 100644 --- a/apollo-router/src/apollo_studio_interop/tests.rs +++ b/apollo-router/src/apollo_studio_interop/tests.rs @@ -1196,6 +1196,44 @@ async fn test_operation_arg_always_commas() { assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; } +#[test(tokio::test)] +async fn test_comma_edge_case() { + let schema_str = include_str!("testdata/schema_interop.graphql"); + + let query_str = r#"query QueryCommaEdgeCase { + enumInputQuery (anotherStr:"",enumInput:SOME_VALUE_1,stringInput:"") { + enumResponse + } + }"#; + + let schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); + let doc = ExecutableDocument::parse(&schema, query_str, "query.graphql").unwrap(); + + let generated = + generate_usage_reporting(&doc, &doc, &Some("QueryCommaEdgeCase".into()), &schema); + + let expected_sig = "# QueryCommaEdgeCase\nquery QueryCommaEdgeCase{enumInputQuery(anotherStr:\"\",enumInput:SOME_VALUE_1,stringInput:\"\"){enumResponse}}"; + let expected_refs: HashMap = HashMap::from([ + ( + "Query".into(), + ReferencedFieldsForType { + field_names: vec!["enumInputQuery".into()], + is_interface: false, + }, + ), + ( + "EverythingResponse".into(), + ReferencedFieldsForType { + field_names: vec!["enumResponse".into()], + is_interface: false, + }, + ), + ]); + + assert_expected_results(&generated, expected_sig, &expected_refs); + assert_bridge_results(schema_str, query_str, expected_sig, &expected_refs).await; +} + #[test(tokio::test)] async fn test_compare() { let source = ComparableUsageReporting { diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index 5bba5e2a01..e740f0aa4c 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -464,10 +464,11 @@ impl BridgeQueryPlanner { UsageReportingComparisonResult::StatsReportKeyNotEqual | UsageReportingComparisonResult::BothNotEqual ) { - tracing::warn!( - monotonic_counter.apollo.router.operations.telemetry.studio.signature = 1u64, - generation.is_matched = false, - "Mismatch between the Apollo usage reporting signature generated in router and router-bridge" + u64_counter!( + "apollo.router.operations.telemetry.studio.signature", + "The match status of the Apollo reporting signature generated by the JS implementation vs the Rust implementation", + 1, + "generation.is_matched" = "false" ); tracing::debug!( "Different signatures generated between router and router-bridge:\n{}\n{}", @@ -475,15 +476,11 @@ impl BridgeQueryPlanner { usage_reporting.stats_report_key, ); } else { - tracing::info!( - monotonic_counter - .apollo - .router - .operations - .telemetry - .studio - .signature = 1u64, - generation.is_matched = true, + u64_counter!( + "apollo.router.operations.telemetry.studio.signature", + "The match status of the Apollo reporting signature generated by the JS implementation vs the Rust implementation", + 1, + "generation.is_matched" = "true" ); } @@ -492,10 +489,11 @@ impl BridgeQueryPlanner { UsageReportingComparisonResult::ReferencedFieldsNotEqual | UsageReportingComparisonResult::BothNotEqual ) { - tracing::warn!( - monotonic_counter.apollo.router.operations.telemetry.studio.references = 1u64, - generation.is_matched = false, - "Mismatch between the Apollo usage report referenced fields generated in router and router-bridge" + u64_counter!( + "apollo.router.operations.telemetry.studio.references", + "The match status of the Apollo reporting references generated by the JS implementation vs the Rust implementation", + 1, + "generation.is_matched" = "false" ); tracing::debug!( "Different referenced fields generated between router and router-bridge:\n{:?}\n{:?}", @@ -503,15 +501,11 @@ impl BridgeQueryPlanner { usage_reporting.referenced_fields_by_type, ); } else { - tracing::info!( - monotonic_counter - .apollo - .router - .operations - .telemetry - .studio - .references = 1u64, - generation.is_matched = true, + u64_counter!( + "apollo.router.operations.telemetry.studio.references", + "The match status of the Apollo reporting references generated by the JS implementation vs the Rust implementation", + 1, + "generation.is_matched" = "true" ); } } else if matches!( From e6ec38ab6e9fb5f5cedc627328475571d9f53d8b Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Thu, 18 Apr 2024 17:42:48 +0000 Subject: [PATCH 39/46] prep release: v1.45.0-alpha.4 --- Cargo.lock | 6 +++--- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- apollo-router-scaffold/templates/base/Cargo.toml | 2 +- apollo-router-scaffold/templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- dockerfiles/tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- helm/chart/router/Chart.yaml | 4 ++-- helm/chart/router/README.md | 6 +++--- scripts/install.sh | 2 +- 12 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b4817a2f7..53ea744e53 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.45.0-alpha.3" +version = "1.45.0-alpha.4" dependencies = [ "access-json", "anyhow", @@ -411,7 +411,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.45.0-alpha.3" +version = "1.45.0-alpha.4" dependencies = [ "apollo-parser", "apollo-router", @@ -427,7 +427,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.45.0-alpha.3" +version = "1.45.0-alpha.4" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index c9c1be7ebd..93941df80b 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.45.0-alpha.3" +version = "1.45.0-alpha.4" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index d4a0423a4b..b47d65298a 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.45.0-alpha.3" +version = "1.45.0-alpha.4" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index 8e7af00505..57783a03b0 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.45.0-alpha.3" +apollo-router = "1.45.0-alpha.4" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index 5dc2278a8e..ba557771bd 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-alpha.3" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-alpha.4" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 8e75a1f9f9..8e6df7a5d2 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.45.0-alpha.3" +version = "1.45.0-alpha.4" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 78c04a78a3..c7987a5c3e 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.45.0-alpha.3 + image: ghcr.io/apollographql/router:v1.45.0-alpha.4 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 12118d60d4..09036ba6f2 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.45.0-alpha.3 + image: ghcr.io/apollographql/router:v1.45.0-alpha.4 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index aa6a3ba964..9e13c1cd38 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.45.0-alpha.3 + image: ghcr.io/apollographql/router:v1.45.0-alpha.4 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 32c953da61..4a62722e74 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.45.0-alpha.3 +version: 1.45.0-alpha.4 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.45.0-alpha.3" +appVersion: "v1.45.0-alpha.4" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 224a038dce..49329ef0c3 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.45.0-alpha.3](https://img.shields.io/badge/Version-1.45.0--alpha.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-alpha.3](https://img.shields.io/badge/AppVersion-v1.45.0--alpha.3-informational?style=flat-square) +![Version: 1.45.0-alpha.4](https://img.shields.io/badge/Version-1.45.0--alpha.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-alpha.4](https://img.shields.io/badge/AppVersion-v1.45.0--alpha.4-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.3 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.4 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha. **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.3 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.4 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/scripts/install.sh b/scripts/install.sh index e508f71e6d..30b945c7f0 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.45.0-alpha.3" +PACKAGE_VERSION="v1.45.0-alpha.4" download_binary() { downloader --check From 0adad6608c2f274448630880b025b29ac6942cb6 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Fri, 19 Apr 2024 08:58:18 +0100 Subject: [PATCH 40/46] Add support for condition processing to subgraph batching (#4986) In certain use cases, ConditionNode will arrive in a batch and we need to decide how to deal with it. --- apollo-router/src/query_planner/plan.rs | 36 +++++++++++++++---- .../src/services/supergraph/service.rs | 22 ++++++------ 2 files changed, 40 insertions(+), 18 deletions(-) diff --git a/apollo-router/src/query_planner/plan.rs b/apollo-router/src/query_planner/plan.rs index 3ff16b0f5d..4d0f76e37d 100644 --- a/apollo-router/src/query_planner/plan.rs +++ b/apollo-router/src/query_planner/plan.rs @@ -74,6 +74,14 @@ impl QueryPlan { None => false, } } + + pub(crate) fn query_hashes( + &self, + operation: Option<&str>, + variables: &Object, + ) -> Result>, CacheResolverError> { + self.root.query_hashes(operation, variables, &self.query) + } } /// Query plans are composed of a set of nodes. @@ -206,7 +214,12 @@ impl PlanNode { /// supported, but it may be that PlanNode::Condition must eventually be supported (or other /// new nodes types that are introduced). Explicitly fail each type to provide extra error /// details and don't use _ so that future node types must be handled here. - pub(crate) fn query_hashes(&self) -> Result>, CacheResolverError> { + pub(crate) fn query_hashes( + &self, + operation: Option<&str>, + variables: &Object, + query: &Query, + ) -> Result>, CacheResolverError> { let mut query_hashes = vec![]; let mut new_targets = vec![self]; @@ -241,11 +254,22 @@ impl PlanNode { .to_string(), )) } - PlanNode::Condition { .. } => { - return Err(CacheResolverError::BatchingError( - "unexpected condition node encountered during query_hash processing" - .to_string(), - )) + PlanNode::Condition { + if_clause, + else_clause, + condition, + } => { + if query + .variable_value(operation, condition.as_str(), variables) + .map(|v| *v == Value::Bool(true)) + .unwrap_or(true) + { + if let Some(node) = if_clause { + new_targets.push(node); + } + } else if let Some(node) = else_clause { + new_targets.push(node); + } } } } diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index 4ba4c85455..426256536b 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -250,6 +250,16 @@ async fn service_call( *response.response.status_mut() = StatusCode::NOT_ACCEPTABLE; return Ok(response); } + // Now perform query batch analysis + let batching = context.extensions().lock().get::().cloned(); + if let Some(batch_query) = batching { + let query_hashes = plan.query_hashes(operation_name.as_deref(), &variables)?; + batch_query + .set_query_hashes(query_hashes) + .await + .map_err(|e| CacheResolverError::BatchingError(e.to_string()))?; + tracing::debug!("batch registered: {}", batch_query); + } } let ClientRequestAccepts { @@ -632,18 +642,6 @@ async fn plan_query( )) .await?; - let batching = context.extensions().lock().get::().cloned(); - if let Some(batch_query) = batching { - if let Some(QueryPlannerContent::Plan { plan, .. }) = &qpr.content { - let query_hashes = plan.root.query_hashes()?; - batch_query - .set_query_hashes(query_hashes) - .await - .map_err(|e| CacheResolverError::BatchingError(e.to_string()))?; - tracing::debug!("batch registered: {}", batch_query); - } - } - Ok(qpr) } From c078e5777be408062a46f468090c419ca3d167d2 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Fri, 19 Apr 2024 11:10:19 +0300 Subject: [PATCH 41/46] Apply suggestions from code review Co-authored-by: Edward Huang --- .changesets/docs_press_finish_musket_reindeer.md | 7 +++++-- .changesets/exp_experimental_rust_apollo_reporting.md | 6 ++++-- .changesets/feat_feat_sha256_in_rhai.md | 5 +++-- .changesets/feat_garypen_2002_subgraph_batching.md | 4 ++-- .changesets/feat_geal_remove_legacy_validation.md | 8 ++++---- .changesets/fix_geal_coprocessor_metrics.md | 6 ++++-- .changesets/fix_njm_p_681_pr_tweaks.md | 4 ++-- .changesets/fix_watcher_raccoon_meat_crop.md | 5 +---- 8 files changed, 25 insertions(+), 20 deletions(-) diff --git a/.changesets/docs_press_finish_musket_reindeer.md b/.changesets/docs_press_finish_musket_reindeer.md index f345d072d5..c9473ad35c 100644 --- a/.changesets/docs_press_finish_musket_reindeer.md +++ b/.changesets/docs_press_finish_musket_reindeer.md @@ -1,5 +1,8 @@ -### [docs] Update caching with caveats ([PR #4872](https://github.com/apollographql/router/pull/4872)) +### Documentation updates for caching and metrics instruments ([PR #4872](https://github.com/apollographql/router/pull/4872)) -Add some words about what the operation cache intentions are +Router documentation has been updated for a couple topics: +- [Performance improvements vs. stability concerns](https://www.apollographql.com/docs/router/configuration/in-memory-caching#performance-improvements-vs-stability) when using the router's operation cache +- [Overview of standard and custom metrics instruments](https://www.apollographql.com/docs/router/configuration/telemetry/instrumentation/instruments) + By [@smyrick](https://github.com/smyrick) in https://github.com/apollographql/router/pull/4872 diff --git a/.changesets/exp_experimental_rust_apollo_reporting.md b/.changesets/exp_experimental_rust_apollo_reporting.md index b1f94ee146..a6702e7800 100644 --- a/.changesets/exp_experimental_rust_apollo_reporting.md +++ b/.changesets/exp_experimental_rust_apollo_reporting.md @@ -1,5 +1,7 @@ -### Experimental implementation of Apollo usage report field generation ([PR 4796](https://github.com/apollographql/router/pull/4796)) +### Experimental: Rust implementation of Apollo usage report field generation ([PR 4796](https://github.com/apollographql/router/pull/4796)) -This adds a new and experimental Rust implementation of the generation of the stats report key and referenced fields that are sent in Apollo usage reports, as part of the effort to replace the router-bridge with native Rust code. For now, we recommend that the `experimental_apollo_metrics_generation_mode` setting should be left at the default value while we confirm that it generates identical payloads to router-bridge. +The router supports a new experimental Rust implementation for generating the stats report keys and referenced fields that are sent in Apollo usage reports. This implementation is one part of the effort to replace the router-bridge with native Rust code. + +The feature is configured with the `experimental_apollo_metrics_generation_mode` setting. We recommend that you use its default value, so we can verify that it generates the same payloads as the previous implementation. By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4796 \ No newline at end of file diff --git a/.changesets/feat_feat_sha256_in_rhai.md b/.changesets/feat_feat_sha256_in_rhai.md index 5d7e15fe73..ec7deb63d1 100644 --- a/.changesets/feat_feat_sha256_in_rhai.md +++ b/.changesets/feat_feat_sha256_in_rhai.md @@ -1,6 +1,8 @@ ### Add support for SHA256 hashing in Rhai ([Issue #4939](https://github.com/apollographql/router/issues/4939)) -This adds a new `sha256` module to create SHA256 hashes within Rhai scripts. An example looks like: +The router supports a new `sha256` module to create SHA256 hashes in Rhai scripts. The module supports the `sha256::digest` function. + +An example script that uses the module: ```rs fn supergraph_service(service){ @@ -12,6 +14,5 @@ fn supergraph_service(service){ } ``` -The only function currently is `digest`. By [@lleadbet](https://github.com/lleadbet) in https://github.com/apollographql/router/pull/4940 diff --git a/.changesets/feat_garypen_2002_subgraph_batching.md b/.changesets/feat_garypen_2002_subgraph_batching.md index 7a275093b9..c109c29d72 100644 --- a/.changesets/feat_garypen_2002_subgraph_batching.md +++ b/.changesets/feat_garypen_2002_subgraph_batching.md @@ -26,7 +26,7 @@ batching: # Disable batching on all subgraphs all: enabled: false - # Configure(over-ride) batching support per subgraph + # Configure (override) batching support per subgraph subgraphs: subgraph_1: enabled: true @@ -34,7 +34,7 @@ batching: enabled: true ``` -Note: `all` may be over-ridden by `subgraphs`. This applies in general for all router subgraph configuration options. +Note: `all` can be overridden by `subgraphs`. This applies in general for all router subgraph configuration options. To learn more, see [query batching in Apollo docs](https://www.apollographql.com/docs/router/executing-operations/query-batching/). diff --git a/.changesets/feat_geal_remove_legacy_validation.md b/.changesets/feat_geal_remove_legacy_validation.md index 7ef2bcbd24..28240e4c71 100644 --- a/.changesets/feat_geal_remove_legacy_validation.md +++ b/.changesets/feat_geal_remove_legacy_validation.md @@ -1,9 +1,9 @@ -### Remove legacy validation ([PR #4551](https://github.com/apollographql/router/pull/4551)) +### Query validation process with Rust ([PR #4551](https://github.com/apollographql/router/pull/4551)) -GraphQL query validation was initially performed by the query planner in JavaScript, which caused some performance issues. Here, we are introducing a new Rust-based validation process using `apollo-compiler` from the `apollo-rs` project. This validation is also happening much earlier in the process, inside the "router service" instead of the query planner, which will reduce the load on the query planner and give back some room in the query planner cache. +The router has been updated with a new Rust-based query validation process using `apollo-compiler` from the `apollo-rs` project. It replaces the Javascript implementation in the query planner. It improves query planner performance by moving the validation out of the query planner and into the router service, which frees up space in the query planner cache. -Because validation now happens early, some error paths deeper inside the router will no longer be hit, causing observable differences in error messages. The new messages should be clearer and more useful. +Because validation now happens earlier in the router service and not in the query planner, error paths in the query planner are no longer encountered. The new error messages should be clearer. -This new validation process has been running in production for months concurrently with the JavaScript version, allowing us to detect and fix any discrepancies in the new implementation. We now have enough confidence in the new Rust-based validation to entirely switch off the less performant, JavaScript validation. +We've tested the new validation process by running it for months in production, concurrently with the JavaScript implementation, and have now completely transitioned to the Rust-based implementation. By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4551 diff --git a/.changesets/fix_geal_coprocessor_metrics.md b/.changesets/fix_geal_coprocessor_metrics.md index 03248ee1ba..f0f3848c02 100644 --- a/.changesets/fix_geal_coprocessor_metrics.md +++ b/.changesets/fix_geal_coprocessor_metrics.md @@ -1,5 +1,7 @@ -### align coprocessor metric creation ([PR #4930](https://github.com/apollographql/router/pull/4930)) +### Fix compatibility of coprocessor metric creation ([PR #4930](https://github.com/apollographql/router/pull/4930)) -There are currently 2 different ways to create metrics, with slight incompatibilities. This makes sure that the execution stage coprocessor metrics are generated in the same way as the other stages +Previously, the router's execution stage created coprocessor metrics differently than other stages. This produced metrics with slight incompatibilities. + +This release fixes the issue by creating coprocessor metrics in the same way as all other stages. By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4930 \ No newline at end of file diff --git a/.changesets/fix_njm_p_681_pr_tweaks.md b/.changesets/fix_njm_p_681_pr_tweaks.md index 841ce5a42f..dc0c779747 100644 --- a/.changesets/fix_njm_p_681_pr_tweaks.md +++ b/.changesets/fix_njm_p_681_pr_tweaks.md @@ -1,5 +1,5 @@ -### Performance tweaks of Apollo usage report field generation ([PR 4951](https://github.com/apollographql/router/pull/4951)) +### Performance improvements for Apollo usage report field generation ([PR 4951](https://github.com/apollographql/router/pull/4951)) -Improves performance of the Apollo usage report signature/stats key and referenced field generation. +The performance of generating Apollo usage report signatures, stats keys, and referenced fields has been improved. By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4951 \ No newline at end of file diff --git a/.changesets/fix_watcher_raccoon_meat_crop.md b/.changesets/fix_watcher_raccoon_meat_crop.md index 51ddebc8b0..a1b6299ba2 100644 --- a/.changesets/fix_watcher_raccoon_meat_crop.md +++ b/.changesets/fix_watcher_raccoon_meat_crop.md @@ -1,8 +1,5 @@ ### Apply alias rewrites to arrays ([PR #TODO](https://github.com/apollographql/router/pull/4958)) - -[#2489](https://github.com/apollographql/router/pull/2489) introduced automatic aliasing rules to support `@interfaceObject`. - -These rules now properly apply to lists. +The automatic aliasing rules introduced in [#2489](https://github.com/apollographql/router/pull/2489) to support `@interfaceObject` are now properly applied to lists. By [@o0ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/4958 \ No newline at end of file From 47ce0ca9c4b55e775d95955f12926c15c313cd1b Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Fri, 19 Apr 2024 11:11:12 +0300 Subject: [PATCH 42/46] Delete .changesets/maint_240417_proto_update.md --- .changesets/maint_240417_proto_update.md | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 .changesets/maint_240417_proto_update.md diff --git a/.changesets/maint_240417_proto_update.md b/.changesets/maint_240417_proto_update.md deleted file mode 100644 index e3050beba7..0000000000 --- a/.changesets/maint_240417_proto_update.md +++ /dev/null @@ -1,5 +0,0 @@ -### Updates the Apollo reporting protobuf to the latest version ([PR 4967](https://github.com/apollographql/router/pull/4967)) - -Updates the protobuf file and related snapshots, and removes code that used the deprecated field. - -By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4967 \ No newline at end of file From 953333a886a6ffb9f8f2800eff13e7d1307c0f63 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Fri, 19 Apr 2024 09:38:42 +0000 Subject: [PATCH 43/46] prep release: v1.45.0-rc.0 --- Cargo.lock | 6 +++--- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- apollo-router-scaffold/templates/base/Cargo.toml | 2 +- apollo-router-scaffold/templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- dockerfiles/tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- helm/chart/router/Chart.yaml | 4 ++-- helm/chart/router/README.md | 6 +++--- scripts/install.sh | 2 +- 12 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53ea744e53..5828d75808 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.45.0-alpha.4" +version = "1.45.0-rc.0" dependencies = [ "access-json", "anyhow", @@ -411,7 +411,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.45.0-alpha.4" +version = "1.45.0-rc.0" dependencies = [ "apollo-parser", "apollo-router", @@ -427,7 +427,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.45.0-alpha.4" +version = "1.45.0-rc.0" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index 93941df80b..f145459c30 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.45.0-alpha.4" +version = "1.45.0-rc.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index b47d65298a..a64b85231e 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.45.0-alpha.4" +version = "1.45.0-rc.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index 57783a03b0..b172efc3d0 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.45.0-alpha.4" +apollo-router = "1.45.0-rc.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index ba557771bd..35ea21d458 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-alpha.4" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-rc.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 8e6df7a5d2..18b6eaa01b 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.45.0-alpha.4" +version = "1.45.0-rc.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index c7987a5c3e..0edcc36d09 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.45.0-alpha.4 + image: ghcr.io/apollographql/router:v1.45.0-rc.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 09036ba6f2..fc5a4cefd8 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.45.0-alpha.4 + image: ghcr.io/apollographql/router:v1.45.0-rc.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 9e13c1cd38..33f44e3f05 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.45.0-alpha.4 + image: ghcr.io/apollographql/router:v1.45.0-rc.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 4a62722e74..1267b08f2b 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.45.0-alpha.4 +version: 1.45.0-rc.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.45.0-alpha.4" +appVersion: "v1.45.0-rc.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 49329ef0c3..2581185c02 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.45.0-alpha.4](https://img.shields.io/badge/Version-1.45.0--alpha.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-alpha.4](https://img.shields.io/badge/AppVersion-v1.45.0--alpha.4-informational?style=flat-square) +![Version: 1.45.0-rc.0](https://img.shields.io/badge/Version-1.45.0--rc.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-rc.0](https://img.shields.io/badge/AppVersion-v1.45.0--rc.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.4 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-rc.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha. **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-alpha.4 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-rc.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/scripts/install.sh b/scripts/install.sh index 30b945c7f0..8562c919fa 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.45.0-alpha.4" +PACKAGE_VERSION="v1.45.0-rc.0" download_binary() { downloader --check From 8cfb485177dd0196932e70fdd5e27520fb1cb9f1 Mon Sep 17 00:00:00 2001 From: Taylor Ninesling Date: Sat, 20 Apr 2024 06:18:20 -0500 Subject: [PATCH 44/46] chore(deps,security): update rustls to v0.21.11 (#4993) While the Router does use `rustls`, [RUSTSEC-2024-0336] (also known as [CVE-2024-32650] and [GHSA-6g7w-8wpp-frhj]) DOES NOT affect the Router since it uses `tokio-rustls` which is specifically called out in the advisory as unaffected. Despite the lack of impact, we update `rustls` version v0.21.10 to [rustls v0.21.11] which includes a patch. [RUSTSEC-2024-0336]: https://rustsec.org/advisories/RUSTSEC-2024-0336.html [CVE-2024-32650]: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-32650 [GHSA-6g7w-8wpp-frhj]: https://github.com/advisories/GHSA-6g7w-8wpp-frhj [rustls v0.21.11]: https://github.com/rustls/rustls/releases/tag/v%2F0.21.11 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5828d75808..d9d6a5b0ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5889,9 +5889,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", "ring 0.17.5", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 18b6eaa01b..91f85dcfb2 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -183,7 +183,7 @@ reqwest.workspace = true # note: this dependency should _always_ be pinned, prefix the version with an `=` router-bridge = "=0.5.18+v2.7.2" rust-embed = "8.2.0" -rustls = "0.21.10" +rustls = "0.21.11" rustls-native-certs = "0.6.3" rustls-pemfile = "1.0.4" schemars.workspace = true From 1e2d8615286d8cc04be7ae6efd3f0b8ef98c5ba8 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Mon, 22 Apr 2024 09:09:14 +0000 Subject: [PATCH 45/46] prep release: v1.45.0-rc.1 --- Cargo.lock | 6 +++--- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- apollo-router-scaffold/templates/base/Cargo.toml | 2 +- apollo-router-scaffold/templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- dockerfiles/tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- helm/chart/router/Chart.yaml | 4 ++-- helm/chart/router/README.md | 6 +++--- scripts/install.sh | 2 +- 12 files changed, 17 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d9d6a5b0ca..b2f1e852c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.45.0-rc.0" +version = "1.45.0-rc.1" dependencies = [ "access-json", "anyhow", @@ -411,7 +411,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.45.0-rc.0" +version = "1.45.0-rc.1" dependencies = [ "apollo-parser", "apollo-router", @@ -427,7 +427,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.45.0-rc.0" +version = "1.45.0-rc.1" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index f145459c30..adb00747e7 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.45.0-rc.0" +version = "1.45.0-rc.1" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index a64b85231e..fc7b8fb48f 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.45.0-rc.0" +version = "1.45.0-rc.1" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index b172efc3d0..8e53bd688c 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.45.0-rc.0" +apollo-router = "1.45.0-rc.1" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index 35ea21d458..00ad71d548 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-rc.0" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-rc.1" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 91f85dcfb2..5750b53b97 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.45.0-rc.0" +version = "1.45.0-rc.1" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 0edcc36d09..d1e00e411e 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.45.0-rc.0 + image: ghcr.io/apollographql/router:v1.45.0-rc.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index fc5a4cefd8..39fe2ea650 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.45.0-rc.0 + image: ghcr.io/apollographql/router:v1.45.0-rc.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 33f44e3f05..56e0bdf9c9 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.45.0-rc.0 + image: ghcr.io/apollographql/router:v1.45.0-rc.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 1267b08f2b..892a1c3a29 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.45.0-rc.0 +version: 1.45.0-rc.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.45.0-rc.0" +appVersion: "v1.45.0-rc.1" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 2581185c02..ecfd55452d 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.45.0-rc.0](https://img.shields.io/badge/Version-1.45.0--rc.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-rc.0](https://img.shields.io/badge/AppVersion-v1.45.0--rc.0-informational?style=flat-square) +![Version: 1.45.0-rc.1](https://img.shields.io/badge/Version-1.45.0--rc.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-rc.1](https://img.shields.io/badge/AppVersion-v1.45.0--rc.1-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-rc.0 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-rc.1 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-rc.0 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-rc.0 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-rc.1 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/scripts/install.sh b/scripts/install.sh index 8562c919fa..6387030707 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.45.0-rc.0" +PACKAGE_VERSION="v1.45.0-rc.1" download_binary() { downloader --check From 3ab962d2511946715bdb02e2c62f73616e8da1e0 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Mon, 22 Apr 2024 15:29:15 +0300 Subject: [PATCH 46/46] prep release: v1.45.0 --- .../docs_press_finish_musket_reindeer.md | 8 - .../exp_carton_ginger_magnet_beacon.md | 20 --- .../exp_experimental_rust_apollo_reporting.md | 7 - .changesets/feat_feat_sha256_in_rhai.md | 18 --- .../feat_garypen_2002_subgraph_batching.md | 41 ----- .../feat_geal_remove_legacy_validation.md | 9 -- .changesets/fix_geal_coprocessor_metrics.md | 7 - .changesets/fix_njm_p_681_pr_tweaks.md | 5 - .changesets/fix_watcher_raccoon_meat_crop.md | 5 - CHANGELOG.md | 148 ++++++++++++++++++ Cargo.lock | 6 +- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- .../templates/base/Cargo.toml | 2 +- .../templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- .../tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- docs/source/federation-version-support.mdx | 10 +- helm/chart/router/Chart.yaml | 4 +- helm/chart/router/README.md | 6 +- scripts/install.sh | 2 +- 23 files changed, 174 insertions(+), 138 deletions(-) delete mode 100644 .changesets/docs_press_finish_musket_reindeer.md delete mode 100644 .changesets/exp_carton_ginger_magnet_beacon.md delete mode 100644 .changesets/exp_experimental_rust_apollo_reporting.md delete mode 100644 .changesets/feat_feat_sha256_in_rhai.md delete mode 100644 .changesets/feat_garypen_2002_subgraph_batching.md delete mode 100644 .changesets/feat_geal_remove_legacy_validation.md delete mode 100644 .changesets/fix_geal_coprocessor_metrics.md delete mode 100644 .changesets/fix_njm_p_681_pr_tweaks.md delete mode 100644 .changesets/fix_watcher_raccoon_meat_crop.md diff --git a/.changesets/docs_press_finish_musket_reindeer.md b/.changesets/docs_press_finish_musket_reindeer.md deleted file mode 100644 index c9473ad35c..0000000000 --- a/.changesets/docs_press_finish_musket_reindeer.md +++ /dev/null @@ -1,8 +0,0 @@ -### Documentation updates for caching and metrics instruments ([PR #4872](https://github.com/apollographql/router/pull/4872)) - -Router documentation has been updated for a couple topics: -- [Performance improvements vs. stability concerns](https://www.apollographql.com/docs/router/configuration/in-memory-caching#performance-improvements-vs-stability) when using the router's operation cache -- [Overview of standard and custom metrics instruments](https://www.apollographql.com/docs/router/configuration/telemetry/instrumentation/instruments) - - -By [@smyrick](https://github.com/smyrick) in https://github.com/apollographql/router/pull/4872 diff --git a/.changesets/exp_carton_ginger_magnet_beacon.md b/.changesets/exp_carton_ginger_magnet_beacon.md deleted file mode 100644 index 3a9981ed52..0000000000 --- a/.changesets/exp_carton_ginger_magnet_beacon.md +++ /dev/null @@ -1,20 +0,0 @@ -### Experimental: Introduce a pool of query planners ([PR #4897](https://github.com/apollographql/router/pull/4897)) - -The router supports a new experimental feature: a pool of query planners to parallelize query planning. - -You can configure query planner pools with the `supergraph.query_planning.experimental_parallelism` option: - -```yaml -supergraph: - query_planning: - experimental_parallelism: auto # number of available cpus -``` - -Its value is the number of query planners that run in parallel, and its default value is `1`. You can set it to the -special value `auto` to automatically set it equal to the number of available CPUs. - -You can discuss and comment about query planner pools in -this [GitHub discussion](https://github.com/apollographql/router/discussions/4917). - -By [@xuorig](https://github.com/xuorig) and [@o0Ignition0o](https://github.com/o0Ignition0o) -in https://github.com/apollographql/router/pull/4897 diff --git a/.changesets/exp_experimental_rust_apollo_reporting.md b/.changesets/exp_experimental_rust_apollo_reporting.md deleted file mode 100644 index a6702e7800..0000000000 --- a/.changesets/exp_experimental_rust_apollo_reporting.md +++ /dev/null @@ -1,7 +0,0 @@ -### Experimental: Rust implementation of Apollo usage report field generation ([PR 4796](https://github.com/apollographql/router/pull/4796)) - -The router supports a new experimental Rust implementation for generating the stats report keys and referenced fields that are sent in Apollo usage reports. This implementation is one part of the effort to replace the router-bridge with native Rust code. - -The feature is configured with the `experimental_apollo_metrics_generation_mode` setting. We recommend that you use its default value, so we can verify that it generates the same payloads as the previous implementation. - -By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4796 \ No newline at end of file diff --git a/.changesets/feat_feat_sha256_in_rhai.md b/.changesets/feat_feat_sha256_in_rhai.md deleted file mode 100644 index ec7deb63d1..0000000000 --- a/.changesets/feat_feat_sha256_in_rhai.md +++ /dev/null @@ -1,18 +0,0 @@ -### Add support for SHA256 hashing in Rhai ([Issue #4939](https://github.com/apollographql/router/issues/4939)) - -The router supports a new `sha256` module to create SHA256 hashes in Rhai scripts. The module supports the `sha256::digest` function. - -An example script that uses the module: - -```rs -fn supergraph_service(service){ - service.map_request(|request|{ - log_info("hello world"); - let sha = sha256::digest("hello world"); - log_info(sha); - }); -} -``` - - -By [@lleadbet](https://github.com/lleadbet) in https://github.com/apollographql/router/pull/4940 diff --git a/.changesets/feat_garypen_2002_subgraph_batching.md b/.changesets/feat_garypen_2002_subgraph_batching.md deleted file mode 100644 index c109c29d72..0000000000 --- a/.changesets/feat_garypen_2002_subgraph_batching.md +++ /dev/null @@ -1,41 +0,0 @@ -### Subgraph support for query batching ([Issue #2002](https://github.com/apollographql/router/issues/2002)) - -As an extension to the ongoing work to support [client-side query batching in the router](https://github.com/apollographql/router/issues/126), the router now supports batching of subgraph requests. Each subgraph batch request retains the same external format as a client batch request. This optimization reduces the number of round-trip requests from the router to subgraphs. - -Also, batching in the router is now a generally available feature: the `experimental_batching` router configuration option has been deprecated and is replaced by the `batching` option. - -Previously, the router preserved the concept of a batch until a `RouterRequest` finished processing. From that point, the router converted each batch request item into a separate `SupergraphRequest`, and the router planned and executed those requests concurrently within the router, then reassembled them into a batch of `RouterResponse` to return to the client. Now with the implementation in this release, the concept of a batch is extended so that batches are issued to configured subgraphs (all or named). Each batch request item is planned and executed separately, but the queries issued to subgraphs are optimally assembled into batches which observe the query constraints of the various batch items. - -To configure subgraph batching, you can enable `batching.subgraph.all` for all subgraphs. You can also enable batching per subgraph with `batching.subgraph.subgraphs.*`. For example: - -```yaml -batching: - enabled: true - mode: batch_http_link - subgraph: - # Enable batching on all subgraphs - all: - enabled: true -``` - -```yaml -batching: - enabled: true - mode: batch_http_link - subgraph: - # Disable batching on all subgraphs - all: - enabled: false - # Configure (override) batching support per subgraph - subgraphs: - subgraph_1: - enabled: true - subgraph_2: - enabled: true -``` - -Note: `all` can be overridden by `subgraphs`. This applies in general for all router subgraph configuration options. - -To learn more, see [query batching in Apollo docs](https://www.apollographql.com/docs/router/executing-operations/query-batching/). - -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/4661 diff --git a/.changesets/feat_geal_remove_legacy_validation.md b/.changesets/feat_geal_remove_legacy_validation.md deleted file mode 100644 index 28240e4c71..0000000000 --- a/.changesets/feat_geal_remove_legacy_validation.md +++ /dev/null @@ -1,9 +0,0 @@ -### Query validation process with Rust ([PR #4551](https://github.com/apollographql/router/pull/4551)) - -The router has been updated with a new Rust-based query validation process using `apollo-compiler` from the `apollo-rs` project. It replaces the Javascript implementation in the query planner. It improves query planner performance by moving the validation out of the query planner and into the router service, which frees up space in the query planner cache. - -Because validation now happens earlier in the router service and not in the query planner, error paths in the query planner are no longer encountered. The new error messages should be clearer. - -We've tested the new validation process by running it for months in production, concurrently with the JavaScript implementation, and have now completely transitioned to the Rust-based implementation. - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4551 diff --git a/.changesets/fix_geal_coprocessor_metrics.md b/.changesets/fix_geal_coprocessor_metrics.md deleted file mode 100644 index f0f3848c02..0000000000 --- a/.changesets/fix_geal_coprocessor_metrics.md +++ /dev/null @@ -1,7 +0,0 @@ -### Fix compatibility of coprocessor metric creation ([PR #4930](https://github.com/apollographql/router/pull/4930)) - -Previously, the router's execution stage created coprocessor metrics differently than other stages. This produced metrics with slight incompatibilities. - -This release fixes the issue by creating coprocessor metrics in the same way as all other stages. - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4930 \ No newline at end of file diff --git a/.changesets/fix_njm_p_681_pr_tweaks.md b/.changesets/fix_njm_p_681_pr_tweaks.md deleted file mode 100644 index dc0c779747..0000000000 --- a/.changesets/fix_njm_p_681_pr_tweaks.md +++ /dev/null @@ -1,5 +0,0 @@ -### Performance improvements for Apollo usage report field generation ([PR 4951](https://github.com/apollographql/router/pull/4951)) - -The performance of generating Apollo usage report signatures, stats keys, and referenced fields has been improved. - -By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4951 \ No newline at end of file diff --git a/.changesets/fix_watcher_raccoon_meat_crop.md b/.changesets/fix_watcher_raccoon_meat_crop.md deleted file mode 100644 index a1b6299ba2..0000000000 --- a/.changesets/fix_watcher_raccoon_meat_crop.md +++ /dev/null @@ -1,5 +0,0 @@ -### Apply alias rewrites to arrays ([PR #TODO](https://github.com/apollographql/router/pull/4958)) - -The automatic aliasing rules introduced in [#2489](https://github.com/apollographql/router/pull/2489) to support `@interfaceObject` are now properly applied to lists. - -By [@o0ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/4958 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 09891b855c..ef575a10b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,154 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.45.0] - 2024-04-22 + +## ๐Ÿš€ Features + +### Query validation process with Rust ([PR #4551](https://github.com/apollographql/router/pull/4551)) + +The router has been updated with a new Rust-based query validation process using `apollo-compiler` from the `apollo-rs` project. It replaces the Javascript implementation in the query planner. It improves query planner performance by moving the validation out of the query planner and into the router service, which frees up space in the query planner cache. + +Because validation now happens earlier in the router service and not in the query planner, error paths in the query planner are no longer encountered. Some messages in error responses returned from invalid queries should now be more clear. + +We've tested the new validation process by running it for months in production, concurrently with the JavaScript implementation, and have now completely transitioned to the Rust-based implementation. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4551 + +### Add support for SHA256 hashing in Rhai ([Issue #4939](https://github.com/apollographql/router/issues/4939)) + +The router supports a new `sha256` module to create SHA256 hashes in Rhai scripts. The module supports the `sha256::digest` function. + +An example script that uses the module: + +```rs +fn supergraph_service(service){ + service.map_request(|request|{ + log_info("hello world"); + let sha = sha256::digest("hello world"); + log_info(sha); + }); +} +``` + + +By [@lleadbet](https://github.com/lleadbet) in https://github.com/apollographql/router/pull/4940 + +### Subgraph support for query batching ([Issue #2002](https://github.com/apollographql/router/issues/2002)) + +As an extension to the ongoing work to support [client-side query batching in the router](https://github.com/apollographql/router/issues/126), the router now supports batching of subgraph requests. Each subgraph batch request retains the same external format as a client batch request. This optimization reduces the number of round-trip requests from the router to subgraphs. + +Also, batching in the router is now a generally available feature: the `experimental_batching` router configuration option has been deprecated and is replaced by the `batching` option. + +Previously, the router preserved the concept of a batch until a `RouterRequest` finished processing. From that point, the router converted each batch request item into a separate `SupergraphRequest`, and the router planned and executed those requests concurrently within the router, then reassembled them into a batch of `RouterResponse` to return to the client. Now with the implementation in this release, the concept of a batch is extended so that batches are issued to configured subgraphs (all or named). Each batch request item is planned and executed separately, but the queries issued to subgraphs are optimally assembled into batches which observe the query constraints of the various batch items. + +To configure subgraph batching, you can enable `batching.subgraph.all` for all subgraphs. You can also enable batching per subgraph with `batching.subgraph.subgraphs.*`. For example: + +```yaml +batching: + enabled: true + mode: batch_http_link + subgraph: + # Enable batching on all subgraphs + all: + enabled: true +``` + +```yaml +batching: + enabled: true + mode: batch_http_link + subgraph: + # Disable batching on all subgraphs + all: + enabled: false + # Configure (override) batching support per subgraph + subgraphs: + subgraph_1: + enabled: true + subgraph_2: + enabled: true +``` + +Note: `all` can be overridden by `subgraphs`. This applies in general for all router subgraph configuration options. + +To learn more, see [query batching in Apollo docs](https://www.apollographql.com/docs/router/executing-operations/query-batching/). + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/4661 + +## ๐Ÿ› Fixes + +### Update `rustls` to v0.21.11, the latest v0.21.x patch ([PR #4993](https://github.com/apollographql/router/pull/4993)) + +While the Router **does** use `rustls`, [RUSTSEC-2024-0336] (also known as [CVE-2024-32650] and [GHSA-6g7w-8wpp-frhj]) **DOES NOT affect the Router** since it uses `tokio-rustls` which is specifically called out in the advisory as **unaffected**. + +Despite the lack of impact, we update `rustls` version v0.21.10 to [rustls v0.21.11] which includes a patch. + +[RUSTSEC-2024-0336]: https://rustsec.org/advisories/RUSTSEC-2024-0336.html +[CVE-2024-32650]: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-32650 +[GHSA-6g7w-8wpp-frhj]: https://github.com/advisories/GHSA-6g7w-8wpp-frhj +[rustls v0.21.11]: https://github.com/rustls/rustls/releases/tag/v%2F0.21.11 + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/4993 + +### Performance improvements for Apollo usage report field generation ([PR 4951](https://github.com/apollographql/router/pull/4951)) + +The performance of generating Apollo usage report signatures, stats keys, and referenced fields has been improved. + +By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4951 + +### Apply alias rewrites to arrays ([PR #4958](https://github.com/apollographql/router/pull/4958)) + +The automatic aliasing rules introduced in [#2489](https://github.com/apollographql/router/pull/2489) to support `@interfaceObject` are now properly applied to lists. + +By [@o0ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/4958 + +### Fix compatibility of coprocessor metric creation ([PR #4930](https://github.com/apollographql/router/pull/4930)) + +Previously, the router's execution stage created coprocessor metrics differently than other stages. This produced metrics with slight incompatibilities. + +This release fixes the issue by creating coprocessor metrics in the same way as all other stages. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/4930 + +## ๐Ÿ“š Documentation + +### Documentation updates for caching and metrics instruments ([PR #4872](https://github.com/apollographql/router/pull/4872)) + +Router documentation has been updated for a couple topics: +- [Performance improvements vs. stability concerns](https://www.apollographql.com/docs/router/configuration/in-memory-caching#performance-improvements-vs-stability) when using the router's operation cache +- [Overview of standard and custom metrics instruments](https://www.apollographql.com/docs/router/configuration/telemetry/instrumentation/instruments) + +By [@smyrick](https://github.com/smyrick) in https://github.com/apollographql/router/pull/4872 + +## ๐Ÿงช Experimental + +### Experimental: Introduce a pool of query planners ([PR #4897](https://github.com/apollographql/router/pull/4897)) + +The router supports a new experimental feature: a pool of query planners to parallelize query planning. + +You can configure query planner pools with the `supergraph.query_planning.experimental_parallelism` option: + +```yaml +supergraph: + query_planning: + experimental_parallelism: auto # number of available CPUs +``` + +Its value is the number of query planners that run in parallel, and its default value is `1`. You can set it to the special value `auto` to automatically set it equal to the number of available CPUs. + +You can discuss and comment about query planner pools in this [GitHub discussion](https://github.com/apollographql/router/discussions/4917). + +By [@xuorig](https://github.com/xuorig) and [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/4897 + +### Experimental: Rust implementation of Apollo usage report field generation ([PR 4796](https://github.com/apollographql/router/pull/4796)) + +The router supports a new experimental Rust implementation for generating the stats report keys and referenced fields that are sent in Apollo usage reports. This implementation is one part of the effort to replace the router-bridge with native Rust code. + +The feature is configured with the `experimental_apollo_metrics_generation_mode` setting. We recommend that you use its default value, so we can verify that it generates the same payloads as the previous implementation. + +By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/router/pull/4796 + # [1.44.0] - 2024-04-12 ## ๐Ÿš€ Features diff --git a/Cargo.lock b/Cargo.lock index b2f1e852c7..156bf8d353 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.45.0-rc.1" +version = "1.45.0" dependencies = [ "access-json", "anyhow", @@ -411,7 +411,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.45.0-rc.1" +version = "1.45.0" dependencies = [ "apollo-parser", "apollo-router", @@ -427,7 +427,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.45.0-rc.1" +version = "1.45.0" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index adb00747e7..f33eea0f89 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.45.0-rc.1" +version = "1.45.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index fc7b8fb48f..e7772cd9a1 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.45.0-rc.1" +version = "1.45.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index 8e53bd688c..47ff9108a6 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.45.0-rc.1" +apollo-router = "1.45.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index 00ad71d548..bdace89219 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0-rc.1" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 5750b53b97..8070eee9ee 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.45.0-rc.1" +version = "1.45.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index d1e00e411e..f8558665b5 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.45.0-rc.1 + image: ghcr.io/apollographql/router:v1.45.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 39fe2ea650..6d224643a0 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.45.0-rc.1 + image: ghcr.io/apollographql/router:v1.45.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 56e0bdf9c9..8dbfeb308f 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.45.0-rc.1 + image: ghcr.io/apollographql/router:v1.45.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/federation-version-support.mdx b/docs/source/federation-version-support.mdx index dc40a79af5..7e44a9dee6 100644 --- a/docs/source/federation-version-support.mdx +++ b/docs/source/federation-version-support.mdx @@ -35,7 +35,15 @@ The table below shows which version of federation each router release is compile - v1.39.0 and later (see latest releases) + v1.45.0 and later (see latest releases) + + + 2.7.2 + + + + + v1.39.0 - v1.44.0 2.7.1 diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 892a1c3a29..f5080d2a41 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.45.0-rc.1 +version: 1.45.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.45.0-rc.1" +appVersion: "v1.45.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index ecfd55452d..bb2f7a13b7 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.45.0-rc.1](https://img.shields.io/badge/Version-1.45.0--rc.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0-rc.1](https://img.shields.io/badge/AppVersion-v1.45.0--rc.1-informational?style=flat-square) +![Version: 1.45.0](https://img.shields.io/badge/Version-1.45.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0](https://img.shields.io/badge/AppVersion-v1.45.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-rc.1 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-rc.1 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0-rc.1 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/scripts/install.sh b/scripts/install.sh index 6387030707..8f5c6c2fac 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.45.0-rc.1" +PACKAGE_VERSION="v1.45.0" download_binary() { downloader --check