diff --git a/.cargo/config.toml b/.cargo/config.toml index d9af243cb8..306074c6f0 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,7 @@ [alias] xtask = "run --locked --package xtask --manifest-path xtask/Cargo.toml --" fed = "run -p apollo-federation-cli --" + +[profile.profiling] +inherits = "release" +debug = true diff --git a/.changesets/docs_eh_docs_router_renaming_conventions.md b/.changesets/docs_eh_docs_router_renaming_conventions.md deleted file mode 100644 index 6f3390317e..0000000000 --- a/.changesets/docs_eh_docs_router_renaming_conventions.md +++ /dev/null @@ -1,8 +0,0 @@ -### Update router naming conventions ([PR #5400](https://github.com/apollographql/router/pull/5400)) - -Renames our router product to distinguish between our non-commercial and commercial offerings. Instead of referring to the **Apollo Router**, we now refer to the following: -- **Apollo Router Core** is Apollo’s free-and-open (ELv2 licensed) implementation of a routing runtime for supergraphs. -- **GraphOS Router** is based on the Apollo Router Core and fully integrated with GraphOS. GraphOS Routers provide access to GraphOS’s commercial runtime features. - - -By [@shorgi](https://github.com/shorgi) in https://github.com/apollographql/router/pull/5400 diff --git a/.changesets/maint_garypen_modify_batch_for_tracing.md b/.changesets/maint_garypen_modify_batch_for_tracing.md deleted file mode 100644 index 145fb07f40..0000000000 --- a/.changesets/maint_garypen_modify_batch_for_tracing.md +++ /dev/null @@ -1,14 +0,0 @@ -### Improve testing by avoiding cache effects and redacting tracing details ([PR #5638](https://github.com/apollographql/router/pull/5638)) - -We've had some problems with flaky tests and this PR addresses some of them. - -The router executes in parallel and concurrently. Many of our tests use snapshots to try and make assertions that functionality is continuing to work correctly. Unfortunately, concurrent/parallel execution and static snapshots don't co-operate very well. Results may appear in pseudo-random order (compared to snapshot expectations) and so tests become flaky and fail without obvious cause. - -The problem becomes particularly acute with features which are specifically designed for highly concurrent operation, such as batching. - -This set of changes addresses some of the router testing problems by: - -1. Making items in a batch test different enough that caching effects are avoided. -2. Redacting various details so that sequencing is not as much of an issue in the otel traces tests. - -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5638 \ No newline at end of file diff --git a/.config/nextest.toml b/.config/nextest.toml index cff09c8ef6..9deb55fd49 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -14,28 +14,60 @@ retries = 2 filter = ''' ( binary_id(=apollo-router) & test(=axum_factory::axum_http_server_factory::tests::request_cancel_log) ) or ( binary_id(=apollo-router) & test(=axum_factory::axum_http_server_factory::tests::request_cancel_no_log) ) +or ( binary_id(=apollo-router) & test(=axum_factory::tests::cors_origin_default) ) +or ( binary_id(=apollo-router) & test(=axum_factory::tests::cors_origin_list) ) +or ( binary_id(=apollo-router) & test(=axum_factory::tests::cors_origin_regex) ) +or ( binary_id(=apollo-router) & test(=axum_factory::tests::it_answers_to_custom_endpoint) ) +or ( binary_id(=apollo-router) & test(=axum_factory::tests::it_compress_response_body) ) +or ( binary_id(=apollo-router) & test(=axum_factory::tests::response) ) +or ( binary_id(=apollo-router) & test(=axum_factory::tests::response_with_custom_endpoint) ) +or ( binary_id(=apollo-router) & test(=axum_factory::tests::response_with_custom_endpoint_wildcard) ) +or ( binary_id(=apollo-router) & test(=axum_factory::tests::response_with_custom_prefix_endpoint) ) +or ( binary_id(=apollo-router) & test(=axum_factory::tests::response_with_root_wildcard) ) or ( binary_id(=apollo-router) & test(=notification::tests::it_test_ttl) ) +or ( binary_id(=apollo-router) & test(=plugins::authentication::subgraph::test::test_credentials_provider_refresh_on_stale) ) +or ( binary_id(=apollo-router) & test(=plugins::telemetry::config_new::instruments::tests::test_instruments) ) or ( binary_id(=apollo-router) & test(=plugins::telemetry::metrics::apollo::test::apollo_metrics_enabled) ) or ( binary_id(=apollo-router) & test(=plugins::telemetry::tests::it_test_prometheus_metrics) ) +or ( binary_id(=apollo-router) & test(=router::tests::basic_event_stream_test) ) +or ( binary_id(=apollo-router) & test(=router::tests::schema_update_test) ) or ( binary_id(=apollo-router) & test(=services::subgraph_service::tests::test_subgraph_service_websocket_with_error) ) or ( binary_id(=apollo-router) & test(=uplink::license_stream::test::license_expander_claim_pause_claim) ) or ( binary_id(=apollo-router) & test(=uplink::persisted_queries_manifest_stream::test::integration_test) ) +or ( binary_id(=apollo-router) & test(=uplink::schema_stream::test::integration_test) ) or ( binary_id(=apollo-router-benchmarks) & test(=tests::test) ) +or ( binary_id(=apollo-router::apollo_otel_traces) & test(=non_defer) ) or ( binary_id(=apollo-router::apollo_otel_traces) & test(=test_batch_send_header) ) or ( binary_id(=apollo-router::apollo_otel_traces) & test(=test_batch_trace_id) ) +or ( binary_id(=apollo-router::apollo_otel_traces) & test(=test_client_name) ) +or ( binary_id(=apollo-router::apollo_otel_traces) & test(=test_client_version) ) +or ( binary_id(=apollo-router::apollo_otel_traces) & test(=test_condition_else) ) or ( binary_id(=apollo-router::apollo_otel_traces) & test(=test_condition_if) ) +or ( binary_id(=apollo-router::apollo_otel_traces) & test(=test_send_header) ) +or ( binary_id(=apollo-router::apollo_otel_traces) & test(=test_send_variable_value) ) or ( binary_id(=apollo-router::apollo_otel_traces) & test(=test_trace_id) ) or ( binary_id(=apollo-router::apollo_reports) & test(=non_defer) ) +or ( binary_id(=apollo-router::apollo_reports) & test(=test_batch_send_header) ) or ( binary_id(=apollo-router::apollo_reports) & test(=test_batch_stats) ) +or ( binary_id(=apollo-router::apollo_reports) & test(=test_batch_trace_id) ) or ( binary_id(=apollo-router::apollo_reports) & test(=test_client_name) ) or ( binary_id(=apollo-router::apollo_reports) & test(=test_client_version) ) +or ( binary_id(=apollo-router::apollo_reports) & test(=test_condition_else) ) or ( binary_id(=apollo-router::apollo_reports) & test(=test_condition_if) ) +or ( binary_id(=apollo-router::apollo_reports) & test(=test_demand_control_stats) ) +or ( binary_id(=apollo-router::apollo_reports) & test(=test_demand_control_trace) ) +or ( binary_id(=apollo-router::apollo_reports) & test(=test_demand_control_trace_batched) ) +or ( binary_id(=apollo-router::apollo_reports) & test(=test_new_field_stats) ) or ( binary_id(=apollo-router::apollo_reports) & test(=test_send_header) ) +or ( binary_id(=apollo-router::apollo_reports) & test(=test_send_variable_value) ) +or ( binary_id(=apollo-router::apollo_reports) & test(=test_stats) ) or ( binary_id(=apollo-router::apollo_reports) & test(=test_trace_id) ) or ( binary_id(=apollo-router::integration_tests) & test(=api_schema_hides_field) ) or ( binary_id(=apollo-router::integration_tests) & test(=automated_persisted_queries) ) or ( binary_id(=apollo-router::integration_tests) & test(=defer_default_variable) ) +or ( binary_id(=apollo-router::integration_tests) & test(=defer_empty_primary_response) ) or ( binary_id(=apollo-router::integration_tests) & test(=defer_path) ) +or ( binary_id(=apollo-router::integration_tests) & test(=defer_path_in_array) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::batching::it_batches_with_errors_in_multi_graph) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::batching::it_batches_with_errors_in_single_graph) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::batching::it_handles_cancelled_by_coprocessor) ) @@ -55,24 +87,46 @@ or ( binary_id(=apollo-router::integration_tests) & test(=integration::file_uplo or ( binary_id(=apollo-router::integration_tests) & test(=integration::file_upload::it_fails_with_file_count_limits) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::file_upload::it_fails_with_file_size_limit) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::file_upload::it_fails_with_no_boundary_in_multipart) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::file_upload::it_supports_compression) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::file_upload::it_supports_nested_file) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::file_upload::it_uploads_to_multiple_subgraphs) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle::test_graceful_shutdown) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle::test_happy) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle::test_plugin_ordering) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle::test_reload_config_valid) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle::test_reload_config_with_broken_plugin) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::lifecycle::test_reload_config_with_broken_plugin_recovery) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::apq) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::connection_failure_blocks_startup) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::entity_cache) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::entity_cache_authorization) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::query_planner_redis_update_defer) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::query_planner_redis_update_query_fragments) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::query_planner_redis_update_reuse_query_fragments) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::redis::test::connection_failure_blocks_startup) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::datadog::test_basic) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::datadog::test_resource_mapping_default) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::datadog::test_span_metrics) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::jaeger::test_decimal_trace_id) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::jaeger::test_remote_root) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::logging::test_json) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::logging::test_json_sampler_off) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::logging::test_text) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::logging::test_text_sampler_off) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::metrics::test_bad_queries) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::metrics::test_graphql_metrics) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::metrics::test_metrics_bad_query) ) +or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::metrics::test_metrics_reloading) ) or ( binary_id(=apollo-router::integration_tests) & test(=integration::telemetry::metrics::test_subgraph_auth_metrics) ) +or ( binary_id(=apollo-router::integration_tests) & test(=normal_query_with_defer_accept_header) ) +or ( binary_id(=apollo-router::integration_tests) & test(=persisted_queries) ) +or ( binary_id(=apollo-router::integration_tests) & test(=queries_should_work_over_get) ) +or ( binary_id(=apollo-router::integration_tests) & test(=queries_should_work_over_post) ) +or ( binary_id(=apollo-router::integration_tests) & test(=queries_should_work_with_compression) ) +or ( binary_id(=apollo-router::integration_tests) & test(=query_just_under_recursion_limit) ) +or ( binary_id(=apollo-router::integration_tests) & test(=query_just_under_token_limit) ) +or ( binary_id(=apollo-router::samples) & test(=/basic/query1) ) +or ( binary_id(=apollo-router::samples) & test(=/basic/query2) ) or ( binary_id(=apollo-router::samples) & test(=/enterprise/entity-cache/invalidation) ) or ( binary_id(=apollo-router::samples) & test(=/enterprise/entity-cache/invalidation-subgraph) ) or ( binary_id(=apollo-router::samples) & test(=/enterprise/entity-cache/invalidation-subgraph-type) ) @@ -85,6 +139,10 @@ or ( binary_id(=apollo-router::set_context) & test(=test_set_context_no_typename or ( binary_id(=apollo-router::set_context) & test(=test_set_context_type_mismatch) ) or ( binary_id(=apollo-router::set_context) & test(=test_set_context_union) ) or ( binary_id(=apollo-router::set_context) & test(=test_set_context_unrelated_fetch_failure) ) +or ( binary_id(=apollo-router::set_context) & test(=test_set_context_with_null) ) +or ( binary_id(=apollo-router::type_conditions) & test(=test_type_conditions_disabled) ) +or ( binary_id(=apollo-router::type_conditions) & test(=test_type_conditions_enabled) ) +or ( binary_id(=apollo-router::type_conditions) & test(=test_type_conditions_enabled_generate_query_fragments) ) ''' [profile.ci] diff --git a/.gitignore b/.gitignore index 14a36f867f..f2ff697ba0 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,9 @@ # These are backup files generated by rustfmt **/*.rs.bk +# Data file for apollo-federation profiling with `xtask fed-flame` +/profile.json + # IDE .idea *.iml diff --git a/CHANGELOG.md b/CHANGELOG.md index de369468ce..c2db241d92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,218 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.52.0] - 2024-07-30 + +## 🚀 Features + +### Provide helm support for when router's health_check's default path is not being used([Issue #5652](https://github.com/apollographql/router/issues/5652)) + +When helm chart is defining the liveness and readiness check probes, if the router has been configured to use a non-default health_check path, use that rather than the default ( /health ) + +By [Jon Christiansen](https://github.com/theJC) in https://github.com/apollographql/router/pull/5653 + +### Support new span and metrics formats for entity caching ([PR #5625](https://github.com/apollographql/router/pull/5625)) + +Metrics of the router's entity cache have been converted to the latest format with support for custom telemetry. + +The following example configuration shows the the `cache` instrument, the `cache` selector in the subgraph service, and the `cache` attribute of a subgraph span: + +```yaml +telemetry: + instrumentation: + instruments: + default_requirement_level: none + cache: + apollo.router.operations.entity.cache: + attributes: + entity.type: true + subgraph.name: + subgraph_name: true + supergraph.operation.name: + supergraph_operation_name: string + subgraph: + only_cache_hit_on_subgraph_products: + type: counter + value: + cache: hit + unit: hit + description: counter of subgraph request cache hit on subgraph products + condition: + all: + - eq: + - subgraph_name: true + - products + - gt: + - cache: hit + - 0 + attributes: + subgraph.name: true + supergraph.operation.name: + supergraph_operation_name: string + +``` + +To learn more, go to [Entity caching docs](https://www.apollographql.com/docs/router/configuration/entity-caching). + +By [@Geal](https://github.com/Geal) and [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5625 + +### Helm: Support renaming key for retrieving APOLLO_KEY secret ([Issue #5661](https://github.com/apollographql/router/issues/5661)) + +A user of the router Helm chart can now rename the key used to retrieve the value of the secret key referenced by `APOLLO_KEY`. + +Previously, the router Helm chart hardcoded the key name to `managedFederationApiKey`. This didn't support users whose infrastructure required custom key names when getting secrets, such as Kubernetes users who need to use specific key names to access a `secretStore` or `externalSecret`. This change provides a user the ability to control the name of the key to use in retrieving that value. + +By [Jon Christiansen](https://github.com/theJC) in https://github.com/apollographql/router/pull/5662 + +## 🐛 Fixes + +### Prevent Datadog timeout errors in logs ([Issue #2058](https://github.com/apollographql/router/issue/2058)) + +The router's Datadog exporter has been updated to reduce the frequency of logged errors related to connection pools. + +Previously, the connection pools used by the Datadog exporter frequently timed out, and each timeout logged an error like the following: + +``` +2024-07-19T15:28:22.970360Z ERROR OpenTelemetry trace error occurred: error sending request for url (http://127.0.0.1:8126/v0.5/traces): connection error: Connection reset by peer (os error 54) +``` + +Now, the pool timeout for the Datadog exporter has been changed so that timeout errors happen much less frequently. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5692 + +### Allow service version overrides ([PR #5689](https://github.com/apollographql/router/pull/5689)) + +The router now supports configuration of `service.version` via YAML file configuration. This enables users to produce custom versioned builds of the router. + + +The following example overrides the version to be `1.0`: +```yaml +telemetry: + exporters: + tracing: + common: + resource: + service.version: 1.0 +``` + + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5689 + +### Populate Datadog `span.kind` ([PR #5609](https://github.com/apollographql/router/pull/5609)) + +Because Datadog traces use `span.kind` to differentiate between different types of spans, the router now ensures that `span.kind` is correctly populated using the OpenTelemetry span kind, which has a 1-2-1 mapping to those set out in [dd-trace](https://github.com/DataDog/dd-trace-go/blob/main/ddtrace/ext/span_kind.go). + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5609 + +### Remove unnecessary internal metric events from traces and spans ([PR #5649](https://github.com/apollographql/router/pull/5649)) + +The router no longer includes some internal metric events in traces and spans that shouldn't have been included originally. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5649 + +### Support Datadog span metrics ([PR #5609](https://github.com/apollographql/router/pull/5609)) + +When using the APM view in Datadog, the router now displays span metrics for top-level spans or spans with the `_dd.measured` flag set. + +The router sets the `_dd.measured` flag by default for the following spans: + +* `request` +* `router` +* `supergraph` +* `subgraph` +* `subgraph_request` +* `http_request` +* `query_planning` +* `execution` +* `query_parsing` + +To enable or disable span metrics for any span, configure `span_metrics` for the Datadog exporter: + +```yaml +telemetry: + exporters: + tracing: + datadog: + enabled: true + span_metrics: + # Disable span metrics for supergraph + supergraph: false + # Enable span metrics for my_custom_span + my_custom_span: true +``` + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5609 and https://github.com/apollographql/router/pull/5703 + +### Use spawn_blocking for query parsing and validation ([PR #5235](https://github.com/apollographql/router/pull/5235)) + +To prevent its executor threads from blocking on large queries, the router now runs query parsing and validation in a Tokio blocking task. + +By [@xuorig](https://github.com/xuorig) in https://github.com/apollographql/router/pull/5235 + +## 🛠 Maintenance + +### chore: Update rhai to latest release (1.19.0) ([PR #5655](https://github.com/apollographql/router/pull/5655)) + +In Rhai 1.18.0, there were changes to how exceptions within functions were created. For details see: https://github.com/rhaiscript/rhai/blob/7e0ac9d3f4da9c892ed35a211f67553a0b451218/CHANGELOG.md?plain=1#L12 + +We've modified how we handle errors raised by Rhai to comply with this change, which means error message output is affected. The change means that errors in functions will no longer document which function the error occurred in, for example: + +```diff +- "rhai execution error: 'Runtime error: I have raised an error (line 223, position 5)\nin call to function 'process_subgraph_response_string''" ++ "rhai execution error: 'Runtime error: I have raised an error (line 223, position 5)'" +``` + +Making this change allows us to keep up with the latest version (1.19.0) of Rhai. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5655 + +### Add version in the entity cache hash ([PR #5701](https://github.com/apollographql/router/pull/5701)) + +The hashing algorithm of the router's entity cache has been updated to include the entity cache version. + +[!IMPORTANT] +If you have previously enabled [entity caching](https://www.apollographql.com/docs/router/configuration/entity-caching), you should expect additional cache regeneration costs when updating to this version of the router while the new hashing algorithm comes into service. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5701 + +### Improve testing by avoiding cache effects and redacting tracing details ([PR #5638](https://github.com/apollographql/router/pull/5638)) + +We've had some problems with flaky tests and this PR addresses some of them. + +The router executes in parallel and concurrently. Many of our tests use snapshots to try and make assertions that functionality is continuing to work correctly. Unfortunately, concurrent/parallel execution and static snapshots don't co-operate very well. Results may appear in pseudo-random order (compared to snapshot expectations) and so tests become flaky and fail without obvious cause. + +The problem becomes particularly acute with features which are specifically designed for highly concurrent operation, such as batching. + +This set of changes addresses some of the router testing problems by: + +1. Making items in a batch test different enough that caching effects are avoided. +2. Redacting various details so that sequencing is not as much of an issue in the otel traces tests. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5638 + +## 📚 Documentation + +### Update router naming conventions ([PR #5400](https://github.com/apollographql/router/pull/5400)) + +Renames our router product to distinguish between our non-commercial and commercial offerings. Instead of referring to the **Apollo Router**, we now refer to the following: +- **Apollo Router Core** is Apollo’s free-and-open (ELv2 licensed) implementation of a routing runtime for supergraphs. +- **GraphOS Router** is based on the Apollo Router Core and fully integrated with GraphOS. GraphOS Routers provide access to GraphOS’s commercial runtime features. + + +By [@shorgi](https://github.com/shorgi) in https://github.com/apollographql/router/pull/5400 + +## 🧪 Experimental + +### Enable Rust-based API schema implementation ([PR #5623](https://github.com/apollographql/router/pull/5623)) + +The router has transitioned to solely using a Rust-based API schema generation implementation. + +Previously, the router used a Javascript-based implementation. After testing for a few months, we've validated the improved performance and robustness of the new Rust-based implementation, so the router now only uses it. + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/5623 + + + # [1.51.0] - 2024-07-16 ## 🚀 Features diff --git a/Cargo.lock b/Cargo.lock index 0884397df0..604247d2a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,14 +26,14 @@ dependencies = [ [[package]] name = "actix" -version = "0.13.3" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb72882332b6d6282f428b77ba0358cb2687e61a6f6df6a6d3871e8a177c2d4f" +checksum = "de7fa236829ba0841304542f7614c42b80fca007455315c45c785ccfa873a85b" dependencies = [ "actix-macros", "actix-rt", "actix_derive", - "bitflags 2.4.0", + "bitflags 2.6.0", "bytes", "crossbeam-channel", "futures-core", @@ -55,7 +55,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "bytes", "futures-core", "futures-sink", @@ -68,9 +68,9 @@ dependencies = [ [[package]] name = "actix-http" -version = "3.7.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb9843d84c775696c37d9a418bbb01b932629d01870722c0f13eb3f95e2536d" +checksum = "3ae682f693a9cd7b058f2b0b5d9a6d7728a8555779bedbbc35dd88528611d020" dependencies = [ "actix-codec", "actix-rt", @@ -78,7 +78,7 @@ dependencies = [ "actix-utils", "ahash", "base64 0.22.1", - "bitflags 2.4.0", + "bitflags 2.6.0", "brotli 6.0.0", "bytes", "bytestring", @@ -87,7 +87,7 @@ dependencies = [ "flate2", "futures-core", "h2", - "http 0.2.11", + "http 0.2.12", "httparse", "httpdate", "itoa", @@ -111,8 +111,8 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ - "quote 1.0.35", - "syn 2.0.48", + "quote", + "syn 2.0.71", ] [[package]] @@ -123,7 +123,7 @@ checksum = "13d324164c51f63867b57e73ba5936ea151b8a41a1d23d1031eeb9f70d0236f8" dependencies = [ "bytestring", "cfg-if 1.0.0", - "http 0.2.11", + "http 0.2.12", "regex", "regex-lite", "serde", @@ -132,9 +132,9 @@ dependencies = [ [[package]] name = "actix-rt" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28f32d40287d3f402ae0028a9d54bef51af15c8769492826a69d28f81893151d" +checksum = "24eda4e2a6e042aa4e55ac438a2ae052d3b5da0ecf83d7411e1a368946925208" dependencies = [ "futures-core", "tokio", @@ -142,9 +142,9 @@ dependencies = [ [[package]] name = "actix-server" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb13e7eef0423ea6eab0e59f6c72e7cb46d33691ad56a726b3cd07ddec2c2d4" +checksum = "b02303ce8d4e8be5b855af6cf3c3a08f3eff26880faad82bab679c22d3650cb5" dependencies = [ "actix-rt", "actix-service", @@ -152,7 +152,7 @@ dependencies = [ "futures-core", "futures-util", "mio", - "socket2 0.5.5", + "socket2 0.5.7", "tokio", "tracing", ] @@ -214,7 +214,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "smallvec", - "socket2 0.5.5", + "socket2 0.5.7", "time", "url", ] @@ -244,9 +244,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f591380e2e68490b5dfaf1dd1aa0ebe78d84ba7067078512b4ea6e4492d622b8" dependencies = [ "actix-router", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -255,9 +255,9 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c7db3d5a9718568e4cf4a537cfd7070e6e6ff7481510d0237fb529ac850f6d3" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -266,7 +266,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -274,9 +274,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.20.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -295,7 +295,7 @@ checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", "const-random", - "getrandom 0.2.10", + "getrandom 0.2.15", "once_cell", "serde", "version_check", @@ -304,9 +304,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.4" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6748e8def348ed4d14996fa801f4122cd763fff530258cdc03f64b25f89d3a5a" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -328,9 +328,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "anes" @@ -340,47 +340,48 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.1" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -394,10 +395,11 @@ checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "apollo-compiler" -version = "1.0.0-beta.18" +version = "1.0.0-beta.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16a61580d9ee85ec35b892efb1f3eec193c520fc957b612989dc823551e2639d" +checksum = "3b21b81064ebf506f5a4073f5ef7a3a9cfdba29904814fa3f42612b9055b37f2" dependencies = [ + "ahash", "apollo-parser", "ariadne", "indexmap 2.2.6", @@ -406,6 +408,7 @@ dependencies = [ "serde_json_bytes", "thiserror", "triomphe", + "typed-arena", "uuid", ] @@ -421,7 +424,7 @@ dependencies = [ [[package]] name = "apollo-federation" -version = "1.51.0" +version = "1.52.0" dependencies = [ "apollo-compiler", "derive_more", @@ -437,11 +440,12 @@ dependencies = [ "serde_json", "serde_json_bytes", "sha1", - "strum 0.26.2", - "strum_macros 0.26.1", + "strum 0.26.3", + "strum_macros 0.26.4", "tempfile", "thiserror", "time", + "tracing", "url", ] @@ -467,9 +471,10 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.51.0" +version = "1.52.0" dependencies = [ "access-json", + "ahash", "anyhow", "apollo-compiler", "apollo-federation", @@ -495,7 +500,7 @@ dependencies = [ "clap", "console", "console-subscriber", - "cookie 0.18.0", + "cookie 0.18.1", "crossbeam-channel", "dashmap", "derivative", @@ -513,7 +518,7 @@ dependencies = [ "heck 0.4.1", "hex", "hmac", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "http-serde", "humantime", @@ -524,6 +529,7 @@ dependencies = [ "indexmap 2.2.6", "insta", "itertools 0.12.1", + "itoa", "jsonpath-rust", "jsonpath_lib", "jsonschema", @@ -570,12 +576,14 @@ dependencies = [ "regex", "reqwest", "rhai", + "rmp", "router-bridge", "rstack", "rust-embed", "rustls", "rustls-native-certs", "rustls-pemfile", + "ryu", "schemars", "semver 1.0.23", "serde", @@ -631,7 +639,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.51.0" +version = "1.52.0" dependencies = [ "apollo-parser", "apollo-router", @@ -647,7 +655,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.51.0" +version = "1.52.0" dependencies = [ "anyhow", "cargo-scaffold", @@ -692,9 +700,9 @@ dependencies = [ [[package]] name = "apollo-smith" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "901bd689b4c67883d0fde26d7af952a4b2a50815c6b92c790d3fae8f6ea46cd3" +checksum = "4ae8c0ec27715028b24a0a98ac53e88ac4a980e6d519cdb37265d2f2c76c864a" dependencies = [ "apollo-compiler", "apollo-parser", @@ -757,18 +765,18 @@ dependencies = [ [[package]] name = "askama_derive" -version = "0.12.1" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22fbe0413545c098358e56966ff22cdd039e10215ae213cfbd65032b119fc94" +checksum = "19fe8d6cb13c4714962c072ea496f3392015f0989b1a2847bb4b2d9effd71d83" dependencies = [ + "askama_parser", "basic-toml", "mime", "mime_guess", - "nom", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "serde", - "syn 2.0.48", + "syn 2.0.71", ] [[package]] @@ -777,6 +785,15 @@ version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "619743e34b5ba4e9703bba34deac3427c72507c7159f5fd030aea8cac0cfe341" +[[package]] +name = "askama_parser" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acb1161c6b64d1c3d83108213c2a2533a342ac225aabd0bda218278c2ddb00c0" +dependencies = [ + "nom", +] + [[package]] name = "assert-json-diff" version = "2.0.2" @@ -794,7 +811,7 @@ dependencies = [ "anyhow", "apollo-router", "async-trait", - "http 0.2.11", + "http 0.2.12", "schemars", "serde", "serde_json", @@ -816,12 +833,11 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.1.1" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca33f4bc4ed1babef42cad36cc1f51fa88be00420404e5b1e80ab1b18f7678c" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener 4.0.0", "event-listener-strategy", "futures-core", "pin-project-lite", @@ -829,11 +845,11 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.6" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a116f46a969224200a0a97f29cfd4c50e7534e4b4826bd23ea2c3c533039c82c" +checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" dependencies = [ - "brotli 3.5.0", + "brotli 6.0.0", "flate2", "futures-core", "memchr", @@ -843,30 +859,29 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.8.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" dependencies = [ - "async-lock 3.1.2", "async-task", "concurrent-queue", - "fastrand 2.0.1", - "futures-lite 2.0.0", + "fastrand 2.1.0", + "futures-lite 2.3.0", "slab", ] [[package]] name = "async-global-executor" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b4353121d5644cdf2beb5726ab752e79a8db1ebb52031770ec47db31d245526" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.1.1", + "async-channel 2.3.1", "async-executor", - "async-io 2.2.1", - "async-lock 3.1.2", + "async-io 2.3.3", + "async-lock 3.4.0", "blocking", - "futures-lite 2.0.0", + "futures-lite 2.3.0", "once_cell", ] @@ -887,7 +902,7 @@ dependencies = [ "fnv", "futures-util", "handlebars 4.5.0", - "http 0.2.11", + "http 0.2.12", "indexmap 1.9.3", "mime", "multer", @@ -931,8 +946,8 @@ dependencies = [ "async-graphql-parser", "darling", "proc-macro-crate", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "syn 1.0.109", "thiserror", ] @@ -977,24 +992,24 @@ dependencies = [ "polling 2.8.0", "rustix 0.37.27", "slab", - "socket2 0.4.9", + "socket2 0.4.10", "waker-fn", ] [[package]] name = "async-io" -version = "2.2.1" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6d3b15875ba253d1110c740755e246537483f152fa334f91abd7fe84c88b3ff" +checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" dependencies = [ - "async-lock 3.1.2", + "async-lock 3.4.0", "cfg-if 1.0.0", "concurrent-queue", "futures-io", - "futures-lite 2.0.0", + "futures-lite 2.3.0", "parking", - "polling 3.3.1", - "rustix 0.38.31", + "polling 3.7.2", + "rustix 0.38.34", "slab", "tracing", "windows-sys 0.52.0", @@ -1011,11 +1026,11 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.1.2" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea8b3453dd7cc96711834b75400d671b73e3656975fa68d9f277163b7f7e316" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 4.0.0", + "event-listener 5.3.1", "event-listener-strategy", "pin-project-lite", ] @@ -1033,26 +1048,26 @@ dependencies = [ "cfg-if 1.0.0", "event-listener 3.1.0", "futures-lite 1.13.0", - "rustix 0.38.31", + "rustix 0.38.34", "windows-sys 0.48.0", ] [[package]] name = "async-signal" -version = "0.2.5" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" +checksum = "dfb3634b73397aa844481f814fad23bbf07fdb0eabec10f2eb95e58944b1ec32" dependencies = [ - "async-io 2.2.1", - "async-lock 2.8.0", + "async-io 2.3.3", + "async-lock 3.4.0", "atomic-waker", "cfg-if 1.0.0", "futures-core", "futures-io", - "rustix 0.38.31", + "rustix 0.38.34", "signal-hook-registry", "slab", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1066,7 +1081,7 @@ dependencies = [ "async-io 1.13.0", "async-lock 2.8.0", "async-process", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.20", "futures-channel", "futures-core", "futures-io", @@ -1099,26 +1114,26 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] name = "async-task" -version = "4.5.0" +version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4eb2cdb97421e01129ccb49169d8279ed21e829929144f4a22a6e54ac549ca1" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -1140,9 +1155,9 @@ dependencies = [ [[package]] name = "auth-git2" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41e7771d4ab6635cbd685ce8db215b29c78a468098126de77c57f3b2e6eb3757" +checksum = "e51bd0e4592409df8631ca807716dc1e5caafae5d01ce0157c966c71c7e49c3c" dependencies = [ "dirs", "git2", @@ -1151,15 +1166,15 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aws-config" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2368fb843e9eec932f7789d64d0e05850f4a79067188c657e572f1f5a7589df0" +checksum = "caf6cfe2881cb1fcbba9ae946fb9a6480d3b7a714ca84c74925014a89ef3387a" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1174,11 +1189,11 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.0.1", + "fastrand 2.1.0", "hex", - "http 0.2.11", + "http 0.2.12", "hyper", - "ring 0.17.5", + "ring", "time", "tokio", "tracing", @@ -1200,9 +1215,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a4a5e448145999d7de17bf44a886900ecb834953408dae8aaf90465ce91c1dd" +checksum = "87c5f920ffd1e0526ec9e70e50bf444db50b204395a0fa7016bbf9e31ea1698f" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -1212,8 +1227,8 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.0.1", - "http 0.2.11", + "fastrand 2.1.0", + "http 0.2.12", "http-body 0.4.6", "percent-encoding", "pin-project-lite", @@ -1223,9 +1238,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.33.0" +version = "1.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8aee358b755b2738b3ffb8a5b54ee991b28c8a07483a0ff7d49a58305cc2609" +checksum = "fc3ef4ee9cdd19ec6e8b10d963b79637844bbf41c31177b77a188eaa941e69f7" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1237,7 +1252,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "http 0.2.11", + "http 0.2.12", "once_cell", "regex-lite", "tracing", @@ -1245,9 +1260,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.34.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d5ce026f0ae73e06b20be5932150dd0e9b063417fd7c3acf5ca97018b9cbd64" +checksum = "527f3da450ea1f09f95155dba6153bd0d83fe0923344a12e1944dfa5d0b32064" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1259,7 +1274,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "http 0.2.11", + "http 0.2.12", "once_cell", "regex-lite", "tracing", @@ -1267,9 +1282,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.33.0" +version = "1.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c820248cb02e4ea83630ad2e43d0721cdbccedba5ac902cd0b6fb84d7271f205" +checksum = "94316606a4aa2cb7a302388411b8776b3fbd254e8506e2dc43918286d8212e9b" dependencies = [ "aws-credential-types", "aws-runtime", @@ -1282,7 +1297,7 @@ dependencies = [ "aws-smithy-types", "aws-smithy-xml", "aws-types", - "http 0.2.11", + "http 0.2.12", "once_cell", "regex-lite", "tracing", @@ -1290,9 +1305,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31eed8d45759b2c5fe7fd304dd70739060e9e0de509209036eabea14d0720cce" +checksum = "5df1b0fa6be58efe9d4ccc257df0a53b89cd8909e86591a13ca54817c87517be" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -1302,8 +1317,8 @@ dependencies = [ "form_urlencoded", "hex", "hmac", - "http 0.2.11", - "http 1.0.0", + "http 0.2.12", + "http 1.1.0", "once_cell", "percent-encoding", "sha2", @@ -1333,7 +1348,7 @@ dependencies = [ "bytes", "bytes-utils", "futures-core", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "once_cell", "percent-encoding", @@ -1363,20 +1378,20 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.6.1" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3df4217d39fe940066174e6238310167bf466bfbebf3be0661e53cacccde6313" +checksum = "ce87155eba55e11768b8c1afa607f3e864ae82f03caf63258b37455b0ad02537" dependencies = [ "aws-smithy-async", "aws-smithy-http", "aws-smithy-runtime-api", "aws-smithy-types", "bytes", - "fastrand 2.0.1", + "fastrand 2.1.0", "h2", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", "hyper", "hyper-rustls", @@ -1397,8 +1412,8 @@ dependencies = [ "aws-smithy-async", "aws-smithy-types", "bytes", - "http 0.2.11", - "http 1.0.0", + "http 0.2.12", + "http 1.1.0", "pin-project-lite", "tokio", "tracing", @@ -1414,10 +1429,10 @@ dependencies = [ "base64-simd", "bytes", "bytes-utils", - "http 0.2.11", - "http 1.0.0", + "http 0.2.12", + "http 1.1.0", "http-body 0.4.6", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "itoa", "num-integer", @@ -1439,9 +1454,9 @@ dependencies = [ [[package]] name = "aws-types" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2009a9733865d0ebf428a314440bbe357cc10d0c16d86a8e15d32e9b47c1e80e" +checksum = "5221b91b3e441e6675310829fd8984801b772cb1546ef6c0e54dec9f1ac13fef" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -1464,7 +1479,7 @@ dependencies = [ "bytes", "futures-util", "headers", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "hyper", "itoa", @@ -1496,7 +1511,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "mime", "rustversion", @@ -1506,9 +1521,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", @@ -1597,9 +1612,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "block-buffer" @@ -1612,18 +1627,15 @@ dependencies = [ [[package]] name = "blocking" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel 2.1.1", - "async-lock 3.1.2", + "async-channel 2.3.1", "async-task", - "fastrand 2.0.1", "futures-io", - "futures-lite 2.0.0", + "futures-lite 2.3.0", "piper", - "tracing", ] [[package]] @@ -1633,7 +1645,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc0bdbcf2078e0ba8a74e1fe0cf36f54054a04485759b61dfd60b174658e9607" dependencies = [ "bit-vec 0.7.0", - "getrandom 0.2.10", + "getrandom 0.2.15", "siphasher", ] @@ -1645,7 +1657,7 @@ checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", - "brotli-decompressor 2.5.0", + "brotli-decompressor 2.5.1", ] [[package]] @@ -1661,9 +1673,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da74e2b81409b1b743f8f0c62cc6254afefb8b8e50bbfe3735550f7aeefa3448" +checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1681,9 +1693,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.6.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6798148dccfbff0fae41c7574d2fa8f1ef3492fba0face179de5d8d447d67b05" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", "serde", @@ -1696,46 +1708,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3907aac66c65520545ae3cb3c195306e20d5ed5c90bfbb992e061cf12a104d0" dependencies = [ "lazy_static", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "str_inflector", - "syn 2.0.48", + "syn 2.0.71", "thiserror", "try_match", ] [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytecount" -version = "0.6.3" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" +checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "a12916984aab3fa6e39d655a33e09c0071eb36d6ab3aea5c2d78551f1df6d952" dependencies = [ "serde", ] [[package]] name = "bytes-utils" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e47d3a8076e283f3acd27400535992edb3ba4b5bb72f8891ad8fbe7932a7d4b9" +checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" dependencies = [ "bytes", "either", @@ -1765,7 +1777,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -1802,7 +1814,7 @@ dependencies = [ "dialoguer", "git2", "globset", - "handlebars 5.1.0", + "handlebars 5.1.2", "indicatif", "md5", "serde", @@ -1832,9 +1844,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "324c74f2155653c90b04f25b2a47a8a631360cb908f92a772695f430c7e31052" dependencies = [ "jobserver", "libc", @@ -1865,9 +1877,9 @@ dependencies = [ [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -1876,15 +1888,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", @@ -1892,9 +1904,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.8" +version = "4.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b3edb18336f4df585bc9aa31dd99c036dfa5dc5e9a2939a722a188f3a8970d" +checksum = "64acc1846d54c1fe936a78dc189c34e28d3f5afc348403f28ecf53660b9b8462" dependencies = [ "clap_builder", "clap_derive", @@ -1902,14 +1914,14 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.8" +version = "4.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c09dd5ada6c6c78075d6fd0da3f90d8080651e2d6cc8eb2f1aaa4034ced708" +checksum = "6fb8393d67ba2e7bfaf28a23458e4e2b543cc73a99595511eb207fdb8aede942" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.0", + "strsim 0.11.1", ] [[package]] @@ -1919,16 +1931,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bac35c6dafb060fd4d275d9a4ffae97917c13a6327903a8be2153cd964f7085" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" [[package]] name = "cmake" @@ -1941,9 +1953,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "combine" @@ -1980,11 +1992,11 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.20", ] [[package]] @@ -2021,7 +2033,7 @@ checksum = "7481d4c57092cd1c19dd541b92bdce883de840df30aa5d03fd48a3935c01842e" dependencies = [ "console-api", "crossbeam-channel", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.20", "futures-task", "hdrhistogram", "humantime", @@ -2039,9 +2051,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-random" @@ -2058,7 +2070,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.15", "once_cell", "tiny-keccak", ] @@ -2070,7 +2082,7 @@ dependencies = [ "anyhow", "apollo-router", "async-trait", - "http 0.2.11", + "http 0.2.12", "tower", "tracing", ] @@ -2094,9 +2106,9 @@ dependencies = [ [[package]] name = "cookie" -version = "0.18.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd91cf61412820176e137621345ee43b3f4423e589e7ae4e50d601d93e35ef8" +checksum = "4ddef33a339a91ea89fb53151bd0a4689cfce27055c291dfa69945475d22c747" dependencies = [ "time", "version_check", @@ -2104,9 +2116,12 @@ dependencies = [ [[package]] name = "cookie-factory" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396de984970346b0d9e93d1415082923c679e5ae5c3ee3dcbd104f5610af126b" +checksum = "9885fa71e26b8ab7855e2ec7cae6e9b380edff76cd052e07c683a0319d51b3a2" +dependencies = [ + "futures", +] [[package]] name = "cookies-to-headers" @@ -2114,7 +2129,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -2131,9 +2146,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -2141,9 +2156,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "countme" @@ -2153,9 +2168,9 @@ checksum = "7704b5fdd17b18ae31c4c1da5a2e0305a2bf17b5249300a9ee9ed7b72114c636" [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -2168,9 +2183,9 @@ checksum = "338089f42c427b86394a5ee60ff321da23a5c89c9d89514c829687b26359fcff" [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if 1.0.0", ] @@ -2215,23 +2230,21 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.20", ] [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.15", - "crossbeam-utils 0.8.16", + "crossbeam-epoch 0.9.18", + "crossbeam-utils 0.8.20", ] [[package]] @@ -2251,15 +2264,11 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.16", - "memoffset 0.9.0", - "scopeguard", + "crossbeam-utils 0.8.20", ] [[package]] @@ -2275,12 +2284,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -2316,7 +2322,7 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "quote 1.0.35", + "quote", "syn 1.0.109", ] @@ -2348,8 +2354,8 @@ checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "strsim 0.10.0", "syn 1.0.109", ] @@ -2361,7 +2367,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", - "quote 1.0.35", + "quote", "syn 1.0.109", ] @@ -2381,9 +2387,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "deadpool" @@ -2400,9 +2406,9 @@ dependencies = [ [[package]] name = "deadpool-runtime" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaa37046cc0f6c3cc6090fbdbf73ef0b8ef4cfcc37f6befc0020f63e8cf121e1" +checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" [[package]] name = "debugid" @@ -2421,8 +2427,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c65c2ffdafc1564565200967edc4851c7b55422d3913466688907efd05ea26f" dependencies = [ "deno-proc-macro-rules-macros", - "proc-macro2 1.0.76", - "syn 2.0.48", + "proc-macro2", + "syn 2.0.71", ] [[package]] @@ -2432,9 +2438,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3047b312b7451e3190865713a4dd6e1f821aed614ada219766ebc3024a690435" dependencies = [ "once_cell", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -2483,13 +2489,13 @@ dependencies = [ "once_cell", "pmutil", "proc-macro-crate", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "regex", "strum 0.25.0", "strum_macros 0.25.3", "syn 1.0.109", - "syn 2.0.48", + "syn 2.0.71", "thiserror", ] @@ -2532,9 +2538,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "pem-rfc7468", @@ -2543,9 +2549,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", "serde", @@ -2557,8 +2563,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -2568,22 +2574,22 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] name = "derive_more" -version = "0.99.17" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "rustc_version 0.4.0", - "syn 1.0.109", + "syn 2.0.71", ] [[package]] @@ -2683,13 +2689,13 @@ dependencies = [ [[package]] name = "displaydoc" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -2727,9 +2733,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.13" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfc4744c1b8f2a09adc0e55242f60b1af195d88596bd8700be74418c056c555" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "ecdsa" @@ -2747,9 +2753,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "elliptic-curve" @@ -2793,9 +2799,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -2842,18 +2848,18 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "erased-serde" -version = "0.3.29" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc978899517288e3ebbd1a3bfc1d9537dbb87eeab149e53ea490e63bcdff561a" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ "serde", ] [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -2870,12 +2876,9 @@ dependencies = [ [[package]] name = "escape8259" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4911e3666fcd7826997b4745c8224295a6f3072f1418c3067b97a67557ee" -dependencies = [ - "rustversion", -] +checksum = "5692dd7b5a1978a5aeb0ce83b7655c58ca8efdcb79d21036ea249da95afec2c6" [[package]] name = "event-listener" @@ -2896,9 +2899,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "4.0.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", @@ -2907,11 +2910,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 4.0.0", + "event-listener 5.3.1", "pin-project-lite", ] @@ -2940,7 +2943,7 @@ dependencies = [ "apollo-router", "async-trait", "futures", - "http 0.2.11", + "http 0.2.12", "hyper", "multimap 0.9.1", "schemars", @@ -2981,9 +2984,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "ff" @@ -2997,14 +3000,14 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.22" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0" +checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.3.5", - "windows-sys 0.48.0", + "redox_syscall 0.4.1", + "windows-sys 0.52.0", ] [[package]] @@ -3046,7 +3049,7 @@ dependencies = [ "anyhow", "apollo-router", "async-trait", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -3059,7 +3062,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -3081,9 +3084,9 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -3138,7 +3141,7 @@ dependencies = [ "rustls-native-certs", "rustls-webpki", "semver 1.0.23", - "socket2 0.5.5", + "socket2 0.5.7", "tokio", "tokio-rustls", "tokio-stream", @@ -3232,17 +3235,15 @@ dependencies = [ [[package]] name = "futures-lite" -version = "2.0.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c1155db57329dca6d018b61e76b1488ce9a2e5e44028cac420a5898f4fcef63" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.0.1", + "fastrand 2.1.0", "futures-core", "futures-io", - "memchr", "parking", "pin-project-lite", - "waker-fn", ] [[package]] @@ -3251,9 +3252,9 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -3287,9 +3288,9 @@ dependencies = [ [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -3333,9 +3334,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3346,28 +3347,28 @@ dependencies = [ [[package]] name = "ghost" -version = "0.1.14" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba330b70a5341d3bc730b8e205aaee97ddab5d9c448c4f51a7c2d924266fa8f9" +checksum = "b0e085ded9f1267c32176b40921b9754c474f7dd96f7e808d4a982e48aa1e854" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] name = "gimli" -version = "0.27.3" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "git2" -version = "0.18.2" +version = "0.18.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b3ba52851e73b46a4c3df1d89343741112003f0f6f13beb0dfac9e457c3fdcd" +checksum = "232e6a7bfe35766bf715e55a88b39a700596c0ccfd88cd3680b4cdb40d66ef70" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "libc", "libgit2-sys", "log", @@ -3384,15 +3385,15 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759c97c1e17c55525b57192c06a267cda0ac5210b222d6b82189a2338fa1c13d" +checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" dependencies = [ "aho-corasick", "bstr", - "fnv", "log", - "regex", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -3447,8 +3448,8 @@ dependencies = [ "graphql-parser", "heck 0.4.1", "lazy_static", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "serde", "serde_json", "syn 1.0.109", @@ -3461,7 +3462,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00bda454f3d313f909298f626115092d348bc231025699f557b27e248475f48c" dependencies = [ "graphql_client_codegen", - "proc-macro2 1.0.76", + "proc-macro2", "syn 1.0.109", ] @@ -3487,7 +3488,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 0.2.11", + "http 0.2.12", "indexmap 2.2.6", "slab", "tokio", @@ -3497,9 +3498,13 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if 1.0.0", + "crunchy", +] [[package]] name = "handlebars" @@ -3517,9 +3522,9 @@ dependencies = [ [[package]] name = "handlebars" -version = "5.1.0" +version = "5.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab283476b99e66691dee3f1640fea91487a8d81f50fb5ecc75538f8f8879a1e4" +checksum = "d08485b96a0e6393e9e4d1b8d48cf74ad6c063cd905eb33f42c1ce3f0377539b" dependencies = [ "log", "pest", @@ -3560,15 +3565,14 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" +checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", + "base64 0.21.7", "bytes", "headers-core", - "http 0.2.11", + "http 0.2.12", "httpdate", "mime", "sha1", @@ -3580,7 +3584,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http 0.2.11", + "http 0.2.12", ] [[package]] @@ -3621,9 +3625,15 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hex" @@ -3665,9 +3675,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -3676,9 +3686,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -3692,18 +3702,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http 0.2.11", + "http 0.2.12", "pin-project-lite", ] [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.0.0", + "http 1.1.0", ] [[package]] @@ -3714,8 +3724,8 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.0.0", - "http-body 1.0.0", + "http 1.1.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -3731,7 +3741,7 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f560b665ad9f1572cfcaf034f7fb84338a7ce945216d64a90fd81f046a3caee" dependencies = [ - "http 0.2.11", + "http 0.2.12", "serde", ] @@ -3745,7 +3755,7 @@ dependencies = [ "async-channel 1.9.0", "base64 0.13.1", "futures-lite 1.13.0", - "http 0.2.11", + "http 0.2.12", "infer", "pin-project-lite", "rand 0.7.3", @@ -3758,9 +3768,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -3795,22 +3805,22 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -3824,7 +3834,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http 0.2.11", + "http 0.2.12", "hyper", "log", "rustls", @@ -3968,9 +3978,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if 1.0.0", ] @@ -3997,7 +4007,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -4008,7 +4018,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.5", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg", @@ -4016,21 +4026,27 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ - "hermit-abi 0.3.2", - "rustix 0.38.31", - "windows-sys 0.48.0", + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.52.0", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "iso8601" version = "0.6.1" @@ -4069,24 +4085,24 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -4127,7 +4143,7 @@ dependencies = [ "bytecount", "fancy-regex", "fraction", - "getrandom 0.2.10", + "getrandom 0.2.15", "iso8601", "itoa", "memchr", @@ -4152,7 +4168,7 @@ dependencies = [ "base64 0.21.7", "js-sys", "pem", - "ring 0.17.5", + "ring", "serde", "serde_json", "simple_asn1", @@ -4164,7 +4180,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -4222,8 +4238,8 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8edfc11b8f56ce85e207e62ea21557cfa09bb24a8f6b04ae181b086ff8611c22" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "regex", "syn 1.0.109", ] @@ -4267,9 +4283,19 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", +] [[package]] name = "libssh2-sys" @@ -4309,9 +4335,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" dependencies = [ "cc", "libc", @@ -4343,9 +4369,9 @@ version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8dccda732e04fa3baf2e17cf835bfe2601c7c2edafd64417c627dabae3a8cda" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -4356,9 +4382,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "local-channel" @@ -4379,9 +4405,9 @@ checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -4390,9 +4416,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" dependencies = [ "value-bag", ] @@ -4447,9 +4473,9 @@ dependencies = [ [[package]] name = "matchit" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "maybe-uninit" @@ -4486,18 +4512,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" -dependencies = [ - "autocfg", -] - -[[package]] -name = "memoffset" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" dependencies = [ "autocfg", ] @@ -4520,9 +4537,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -4536,22 +4553,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", ] [[package]] name = "mintex" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd7c5ba1c3b5a23418d7bbf98c71c3d4946a0125002129231da8d6b723d559cb" -dependencies = [ - "once_cell", - "sys-info", -] +checksum = "9bec4598fddb13cc7b528819e697852653252b760f1228b7642679bf2ff2cd07" [[package]] name = "mio" @@ -4587,8 +4600,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if 1.0.0", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -4602,7 +4615,7 @@ dependencies = [ "async-lock 2.8.0", "crossbeam-channel", "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.20", "futures-util", "num_cpus", "once_cell", @@ -4626,12 +4639,12 @@ dependencies = [ "bytes", "encoding_rs", "futures-util", - "http 0.2.11", + "http 0.2.12", "httparse", "log", "memchr", "mime", - "spin 0.9.8", + "spin", "version_check", ] @@ -4681,7 +4694,7 @@ version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "filetime", "inotify", "kqueue", @@ -4713,9 +4726,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ "num-bigint", "num-complex", @@ -4727,11 +4740,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", "rand 0.8.5", @@ -4745,9 +4757,9 @@ checksum = "63335b2e2c34fae2fb0aa2cecfd9f0832a1e24b3b32ecec612c3426d46dc8aaa" [[package]] name = "num-complex" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] @@ -4760,19 +4772,18 @@ checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-iter" -version = "0.1.43" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -4781,11 +4792,10 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg", "num-bigint", "num-integer", "num-traits", @@ -4806,7 +4816,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.9", "libc", ] @@ -4827,9 +4837,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.31.1" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" dependencies = [ "memchr", ] @@ -4842,9 +4852,9 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-name-to-header" @@ -4852,7 +4862,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -4866,18 +4876,18 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "111.27.0+1.1.1v" +version = "300.3.1+3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e8f197c82d7511c5b014030c9b1efeda40d7d5f99d23b4ceed3524a5e63f02" +checksum = "7259953d42a81bf137fbbd73bd30a8e1914d6dce43c2b90ed575783a22608b91" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.91" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "866b5f16f90776b9bb8dc1e1802ac6f0513de3a7a7465867bfbc563dc737faac" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -4928,7 +4938,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5f4ecf595095d3b641dd2761a0c3d1f175d3d6c28f38e65418d8004ea3255dd" dependencies = [ "futures-core", - "http 0.2.11", + "http 0.2.12", "indexmap 1.9.3", "itertools 0.10.5", "once_cell", @@ -4949,7 +4959,7 @@ checksum = "c7594ec0e11d8e33faf03530a4c49af7064ebba81c1480e01be67d90b356508b" dependencies = [ "async-trait", "bytes", - "http 0.2.11", + "http 0.2.12", "opentelemetry_api", "reqwest", ] @@ -4964,7 +4974,7 @@ dependencies = [ "futures-core", "futures-util", "headers", - "http 0.2.11", + "http 0.2.12", "opentelemetry 0.20.0", "opentelemetry-http", "opentelemetry-semantic-conventions", @@ -4981,7 +4991,7 @@ checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275" dependencies = [ "async-trait", "futures-core", - "http 0.2.11", + "http 0.2.12", "opentelemetry-http", "opentelemetry-proto 0.3.0", "opentelemetry-semantic-conventions", @@ -5064,7 +5074,7 @@ checksum = "eb966f01235207a6933c0aec98374fe9782df1c1d2b3d1db35c458451d138143" dependencies = [ "async-trait", "futures-core", - "http 0.2.11", + "http 0.2.12", "once_cell", "opentelemetry 0.20.0", "opentelemetry-http", @@ -5130,7 +5140,7 @@ dependencies = [ "glob", "once_cell", "opentelemetry 0.22.0", - "ordered-float 4.2.0", + "ordered-float 4.2.1", "percent-encoding", "rand 0.8.5", "thiserror", @@ -5144,9 +5154,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7940cf2ca942593318d07fcf2596cdca60a85c9e7fab408a5e21a4f9dcd40d87" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" dependencies = [ "num-traits", ] @@ -5162,9 +5172,9 @@ dependencies = [ [[package]] name = "ordered-float" -version = "4.2.0" +version = "4.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" +checksum = "19ff2cf528c6c03d9ed653d6c4ce1dc0582dc4af309790ad92f07c1cd551b0be" dependencies = [ "num-traits", ] @@ -5195,9 +5205,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" @@ -5211,15 +5221,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.3.5", + "redox_syscall 0.5.3", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -5230,11 +5240,11 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pem" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "serde", ] @@ -5255,19 +5265,20 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.2" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1acb4a4365a13f749a93f1a094a7805e5cfa0955373a9de860d962eaa3a5fe5a" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" dependencies = [ + "memchr", "thiserror", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.2" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "666d00490d4ac815001da55838c500eafb0320019bbaa44444137c48b443a853" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" dependencies = [ "pest", "pest_generator", @@ -5275,22 +5286,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.2" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] name = "pest_meta" -version = "2.7.2" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56af0a30af74d0445c0bf6d9d051c979b516a1a5af790d251daee76005420a48" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" dependencies = [ "once_cell", "pest", @@ -5299,9 +5310,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", "indexmap 2.2.6", @@ -5311,22 +5322,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -5343,12 +5354,12 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +checksum = "ae1d5c74c9876f070d3e8fd503d748c7d974c3e48da8f41350fa5222ef9b4391" dependencies = [ "atomic-waker", - "fastrand 2.0.1", + "fastrand 2.1.0", "futures-io", ] @@ -5364,15 +5375,15 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plotters" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" dependencies = [ "num-traits", "plotters-backend", @@ -5383,15 +5394,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" +checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" [[package]] name = "plotters-svg" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" dependencies = [ "plotters-backend", ] @@ -5402,9 +5413,9 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52a40bc70c2c58040d2d8b167ba9a5ff59fc9dab7ad44771cfde3dcfde7a09c6" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -5425,14 +5436,15 @@ dependencies = [ [[package]] name = "polling" -version = "3.3.1" +version = "3.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf63fa624ab313c11656b4cda960bfc46c410187ad493c41f6ba2d8c1e991c9e" +checksum = "a3ed00ed3fbf728b5816498ecd316d1716eecaced9c0c8d2c5a6740ca214985b" dependencies = [ "cfg-if 1.0.0", "concurrent-queue", + "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.31", + "rustix 0.38.34", "tracing", "windows-sys 0.52.0", ] @@ -5491,7 +5503,7 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.76", + "proc-macro2", "syn 1.0.109", ] @@ -5516,27 +5528,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "proc-macro2" -version = "1.0.76" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" dependencies = [ "cfg-if 1.0.0", "fnv", @@ -5554,7 +5557,7 @@ dependencies = [ "anyhow", "apollo-router", "async-trait", - "http 0.2.11", + "http 0.2.12", "schemars", "serde", "serde_json", @@ -5612,8 +5615,8 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -5625,9 +5628,9 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.12.1", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -5674,7 +5677,7 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "memchr", "unicase", ] @@ -5685,7 +5688,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27" dependencies = [ - "crossbeam-utils 0.8.16", + "crossbeam-utils 0.8.20", "libc", "mach", "once_cell", @@ -5703,20 +5706,11 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "0.6.13" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ - "proc-macro2 0.4.30", -] - -[[package]] -name = "quote" -version = "1.0.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" -dependencies = [ - "proc-macro2 1.0.76", + "proc-macro2", ] [[package]] @@ -5778,7 +5772,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.15", ] [[package]] @@ -5801,9 +5795,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.7.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -5811,14 +5805,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.16", - "num_cpus", + "crossbeam-utils 0.8.20", ] [[package]] @@ -5837,30 +5829,30 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.16" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.10", - "redox_syscall 0.2.16", + "getrandom 0.2.15", + "libredox", "thiserror", ] @@ -5872,8 +5864,8 @@ checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -5887,20 +5879,20 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.4", ] [[package]] name = "regex-lite" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b661b2f27137bdbc16f00eda72866a92bb28af1753ffbd56744fb6e2e9cd8e" +checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" [[package]] name = "regex-syntax" @@ -5910,9 +5902,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "reqwest" @@ -5927,7 +5919,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "hyper", "hyper-rustls", @@ -5988,12 +5980,12 @@ dependencies = [ [[package]] name = "rhai" -version = "1.17.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6273372244d04a8a4b0bec080ea1e710403e88c5d9d83f9808b2bfa64f0982a" +checksum = "61797318be89b1a268a018a92a7657096d83f3ecb31418b9e9c16dcbb043b702" dependencies = [ "ahash", - "bitflags 2.4.0", + "bitflags 2.6.0", "instant", "num-traits", "once_cell", @@ -6010,7 +6002,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -6022,7 +6014,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -6034,7 +6026,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -6046,7 +6038,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -6058,7 +6050,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "serde_json", "tokio", "tower", @@ -6070,45 +6062,31 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] name = "ring" -version = "0.17.5" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", - "getrandom 0.2.10", + "cfg-if 1.0.0", + "getrandom 0.2.15", "libc", - "spin 0.9.8", - "untrusted 0.9.0", - "windows-sys 0.48.0", + "spin", + "untrusted", + "windows-sys 0.52.0", ] [[package]] name = "rmp" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9860a6cc38ed1da53456442089b4dfa35e7cedaa326df63017af88385e6b20" +checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" dependencies = [ "byteorder", "num-traits", @@ -6147,10 +6125,10 @@ dependencies = [ "apollo-compiler", "apollo-parser", "apollo-router", - "apollo-smith 0.8.0", + "apollo-smith 0.9.0", "async-trait", "env_logger 0.10.2", - "http 0.2.11", + "http 0.2.12", "libfuzzer-sys", "log", "reqwest", @@ -6165,13 +6143,13 @@ dependencies = [ [[package]] name = "rowan" -version = "0.15.11" +version = "0.15.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64449cfef9483a475ed56ae30e2da5ee96448789fb2aa240a04beb6a055078bf" +checksum = "32a58fa8a7ccff2aec4f39cc45bf5f985cec7125ab271cf681c279fd00192b49" dependencies = [ "countme", - "hashbrown 0.12.3", - "memoffset 0.8.0", + "hashbrown 0.14.5", + "memoffset 0.9.1", "rustc-hash", "text-size", ] @@ -6191,9 +6169,9 @@ dependencies = [ [[package]] name = "rust-embed" -version = "8.4.0" +version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19549741604902eb99a7ed0ee177a0663ee1eda51a29f71401f166e47e77806a" +checksum = "fa66af4a4fdd5e7ebc276f115e895611a34739a9c1c01028383d612d550953c0" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -6202,22 +6180,22 @@ dependencies = [ [[package]] name = "rust-embed-impl" -version = "8.4.0" +version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9f96e283ec64401f30d3df8ee2aaeb2561f34c824381efa24a35f79bf40ee4" +checksum = "6125dbc8867951125eec87294137f4e9c2c96566e61bf72c45095a7c77761478" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "rust-embed-utils", - "syn 2.0.48", + "syn 2.0.71", "walkdir", ] [[package]] name = "rust-embed-utils" -version = "8.4.0" +version = "8.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38c74a686185620830701348de757fd36bef4aa9680fd23c49fc539ddcc1af32" +checksum = "2e5347777e9aacb56039b0e1f28785929a8a3b709e87482e7442c72e7c12529d" dependencies = [ "globset", "sha2", @@ -6226,9 +6204,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -6270,14 +6248,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.13", + "linux-raw-sys 0.4.14", "windows-sys 0.52.0", ] @@ -6288,7 +6266,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring 0.17.5", + "ring", "rustls-webpki", "sct", ] @@ -6320,21 +6298,21 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.5", - "untrusted 0.9.0", + "ring", + "untrusted", ] [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" @@ -6347,20 +6325,20 @@ dependencies = [ [[package]] name = "scc" -version = "2.1.1" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76ad2bbb0ae5100a07b7a6f2ed7ab5fd0045551a4c507989b7a620046ea3efdc" +checksum = "a4465c22496331e20eb047ff46e7366455bc01c0c02015c4a376de0b2cd3a1af" dependencies = [ "sdd", ] [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6391,10 +6369,10 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "serde_derive_internals", - "syn 2.0.48", + "syn 2.0.71", ] [[package]] @@ -6405,19 +6383,19 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "ring", + "untrusted", ] [[package]] name = "sdd" -version = "0.2.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84345e4c9bd703274a082fb80caaa99b7612be48dfaa1dd9266577ec412309d" +checksum = "8eb0dde0ccd15e337a3cf738a9a38115c6d8e74795d074e73973dad3d229a897" [[package]] name = "sec1" @@ -6435,11 +6413,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -6448,9 +6426,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" dependencies = [ "core-foundation-sys", "libc", @@ -6482,54 +6460,54 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.12" +version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] name = "serde_derive_default" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a99485caec5e9d587a3780cd6ed06a546f9924071315ab7280e148536e7ab148" +checksum = "afb2522c2a87137bf6c2b3493127fed12877ef1b9476f074d6664edc98acd8a7" dependencies = [ - "quote 0.6.13", + "quote", "regex", - "syn 0.15.44", + "syn 2.0.71", "thiserror", ] [[package]] name = "serde_derive_internals" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "330f01ce65a3a5fe59a60c82f3c9a024b573b8a6e875bd233fe5f934e71d54e3" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -6561,9 +6539,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ "itoa", "serde", @@ -6582,9 +6560,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -6649,9 +6627,9 @@ version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -6678,9 +6656,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] @@ -6702,9 +6680,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -6760,18 +6738,18 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "smallvec" -version = "1.11.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" dependencies = [ "serde", ] @@ -6790,9 +6768,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -6800,12 +6778,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6824,12 +6802,6 @@ dependencies = [ "url", ] -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - [[package]] name = "spin" version = "0.9.8" @@ -6838,9 +6810,9 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", @@ -6876,9 +6848,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" @@ -6891,9 +6863,9 @@ dependencies = [ [[package]] name = "strum" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" [[package]] name = "strum_macros" @@ -6902,30 +6874,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "rustversion", - "syn 2.0.48", + "syn 2.0.71", ] [[package]] name = "strum_macros" -version = "0.26.1" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.4.1", - "proc-macro2 1.0.76", - "quote 1.0.35", + "heck 0.5.0", + "proc-macro2", + "quote", "rustversion", - "syn 2.0.48", + "syn 2.0.71", ] [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "supergraph_sdl" @@ -6939,36 +6911,25 @@ dependencies = [ "tracing", ] -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "unicode-xid", -] - [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.48" +version = "2.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "b146dcf730474b4bcd16c311627b31ede9ab149045db4d6088b3becaea046462" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "unicode-ident", ] @@ -7022,16 +6983,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if 1.0.0", - "fastrand 2.0.1", - "rustix 0.38.31", + "fastrand 2.1.0", + "rustix 0.38.34", "windows-sys 0.52.0", ] [[package]] name = "termcolor" -version = "1.2.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -7068,9 +7029,9 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -7100,8 +7061,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f972445f2c781bb6d47ee4a715db3a0e404a79d977f751fd4eb2b0d44c6eb9d" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -7122,22 +7083,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -7148,9 +7109,9 @@ checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820" [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -7174,7 +7135,7 @@ dependencies = [ "byteorder", "integer-encoding", "log", - "ordered-float 2.10.0", + "ordered-float 2.10.1", "threadpool", ] @@ -7184,7 +7145,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-router", - "http 0.2.11", + "http 0.2.12", "hyper", "serde_json", "tokio", @@ -7265,9 +7226,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -7280,9 +7241,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.38.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "eb2caba9f80616f438e09748d5acda951967e1ea58508ef53d9c6402485a46df" dependencies = [ "backtrace", "bytes", @@ -7292,7 +7253,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2 0.5.7", "tokio-macros", "tracing", "windows-sys 0.48.0", @@ -7314,9 +7275,9 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -7343,9 +7304,9 @@ dependencies = [ [[package]] name = "tokio-test" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53474327ae5e166530d17f2d956afcb4f8a004de581b3cae10f12006bc8163e3" +checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" dependencies = [ "async-stream", "bytes", @@ -7385,21 +7346,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.10" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" +checksum = "ac2caab0bf757388c6c0ae23b3293fdb463fee59434529014f85e3263b995c28" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.4", + "toml_edit 0.22.16", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -7412,20 +7373,20 @@ checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ "indexmap 2.2.6", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.4" +version = "0.22.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9ffdf896f8daaabf9b66ba8e77ea1ed5ed0f72821b398aba62352e95062951" +checksum = "278f3d518e152219c994ce877758516bca5e118eaed6996192a774fb9fbf0788" dependencies = [ "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.13", ] [[package]] @@ -7443,7 +7404,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "hyper", "hyper-timeout", @@ -7473,7 +7434,7 @@ dependencies = [ "base64 0.21.7", "bytes", "h2", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "hyper", "hyper-timeout", @@ -7497,7 +7458,7 @@ dependencies = [ "async-trait", "base64 0.21.7", "bytes", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "percent-encoding", "pin-project", @@ -7516,9 +7477,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" dependencies = [ "prettyplease", - "proc-macro2 1.0.76", + "proc-macro2", "prost-build", - "quote 1.0.35", + "quote", "syn 1.0.109", ] @@ -7550,11 +7511,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", - "bitflags 2.4.0", + "bitflags 2.6.0", "bytes", "futures-core", "futures-util", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "http-range-header", "pin-project-lite", @@ -7609,9 +7570,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -7722,8 +7683,8 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ - "quote 1.0.35", - "syn 2.0.48", + "quote", + "syn 2.0.71", ] [[package]] @@ -7784,9 +7745,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "try_match" @@ -7803,9 +7764,9 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0a91713132798caecb23c977488945566875e7b61b902fb111979871cbff34e" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] @@ -7817,7 +7778,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 0.2.11", + "http 0.2.12", "httparse", "log", "rand 0.8.5", @@ -7828,22 +7789,28 @@ dependencies = [ "utf-8", ] +[[package]] +name = "typed-arena" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a" + [[package]] name = "typed-builder" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6179333b981641242a768f30f371c9baccbfcc03749627000c500ab88bf4528b" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "syn 1.0.109", ] [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "typetag" @@ -7864,8 +7831,8 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e60147782cc30833c05fba3bab1d9b5771b2685a2557672ac96fa5d154099c0e" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2", + "quote", "syn 1.0.109", ] @@ -7936,9 +7903,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-id" @@ -7948,30 +7915,24 @@ checksum = "b1b6def86329695390197b82c1e244a54a131ceb66c996f2088a3876e2ae083f" [[package]] name = "unicode-ident" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" - -[[package]] -name = "unicode-xid" -version = "0.1.0" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" +checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" [[package]] name = "unreachable" @@ -7982,12 +7943,6 @@ dependencies = [ "void", ] -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - [[package]] name = "untrusted" version = "0.9.0" @@ -8033,17 +7988,17 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.15", "serde", "wasm-bindgen", ] @@ -8068,9 +8023,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.4.2" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a72e1902dde2bd6441347de2b70b7f5d59bf157c6c62f0c44572607a1d55bbe" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" [[package]] name = "vcpkg" @@ -8098,9 +8053,9 @@ checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" [[package]] name = "waker-fn" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" [[package]] name = "walkdir" @@ -8141,9 +8096,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -8151,24 +8106,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -8178,32 +8133,32 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ - "quote 1.0.35", + "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-streams" @@ -8220,9 +8175,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -8230,9 +8185,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.2" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "which" @@ -8243,14 +8198,14 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.31", + "rustix 0.38.34", ] [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -8270,11 +8225,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -8307,7 +8262,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", ] [[package]] @@ -8342,17 +8297,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -8369,9 +8325,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -8387,9 +8343,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -8405,9 +8361,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -8423,9 +8385,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -8441,9 +8403,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -8459,9 +8421,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -8477,15 +8439,24 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] [[package]] name = "winnow" -version = "0.5.14" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d09770118a7eb1ccaf4a594a221334119a44a814fcb0d31c5b85e83e97227a97" +checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" dependencies = [ "memchr", ] @@ -8530,9 +8501,9 @@ checksum = "f8dab7ac864710bdea6594becbea5b5050333cf34fefb0dc319567eb347950d4" [[package]] name = "xmlparser" -version = "0.13.5" +version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25c75bf9ea12c4040a97f829154768bbbce366287e2dc044af160cd79a13fd" +checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" [[package]] name = "yaml-rust" @@ -8551,53 +8522,53 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "zerocopy" -version = "0.7.31" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.31" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.48", + "proc-macro2", + "quote", + "syn 2.0.71", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" [[package]] name = "zstd" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.1.0" +version = "7.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" +checksum = "fa556e971e7b568dc775c136fc9de8c779b1c2fc3a63defaafadffdbd3181afa" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.11+zstd.1.5.6" +version = "2.0.12+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75652c55c0b6f3e6f12eb786fe1bc960396bf05a1eb3bf1f3691c3610ac2e6d4" +checksum = "0a4e40c320c3cb459d9a9ff6de98cff88f4751ee9275d140e2be94a2b74e4c13" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index e3ab292476..760349ac52 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ members = [ "examples/coprocessor-subgraph/rust", "examples/throw-error/rhai", "fuzz", - "fuzz/subgraph" + "fuzz/subgraph", # Note that xtask is not in the workspace member because it relies on dependencies that are incompatible with the router. Notably hyperx but there are others. ] @@ -49,7 +49,7 @@ debug = 1 # Dependencies used in more than one place are specified here in order to keep versions in sync: # https://doc.rust-lang.org/cargo/reference/workspaces.html#the-dependencies-table [workspace.dependencies] -apollo-compiler = "=1.0.0-beta.18" +apollo-compiler = "=1.0.0-beta.19" apollo-parser = "0.7.6" apollo-smith = { version = "0.5.0", features = ["parser-impl"] } async-trait = "0.1.77" diff --git a/README.md b/README.md index a4a8ad8f8c..0f87d6d343 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,13 @@ [![CircleCI](https://circleci.com/gh/apollographql/router/tree/main.svg?style=shield)](https://circleci.com/gh/apollographql/router/tree/main) +--- + +**Announcement:** +Join 1000+ engineers at GraphQL Summit for talks, workshops, and office hours, Oct 8-10 in NYC. [Get your pass here ->](https://summit.graphql.com/?utm_campaign=github_federation_readme) + +--- + # Apollo Router Core The **Apollo Router Core** is a configurable, high-performance **graph router** written in Rust to run a [federated supergraph](https://www.apollographql.com/docs/federation/) that uses [Apollo Federation 2](https://www.apollographql.com/docs/federation/v2/federation-2/new-in-federation-2). @@ -61,11 +68,11 @@ Options: ## Who is Apollo? -[Apollo](https://apollographql.com/) is building software and a graph platform to unify GraphQL across your apps and services. We help you ship faster with: +[Apollo](https://apollographql.com/) builds open-source tools and commercial services to make application development easier, better, and accessible to more people. We help you ship faster with: -* [Apollo GraphOS Studio](https://www.apollographql.com/graphos) – A free, end-to-end platform for managing your GraphQL lifecycle. Track your GraphQL schemas in a hosted registry to create a source of truth for everything in your graph. GraphOS Studio provides an IDE (Apollo Explorer) so you can explore data, collaborate on queries, observe usage, and safely make schema changes. GraphOS Router is a scalable runtime for supergraphs that's fully integrated with GraphOS Studio and based on the Apollo Router Core. -* [Apollo Federation](https://www.apollographql.com/docs/federation/) – The industry-standard open architecture for building a distributed graph. Compose and manage your graphs using [Rover](https://www.apollographql.com/docs/rover) and then use Apollo Router to query plan and route requests across multiple subgraphs. -* [Apollo Client](https://www.apollographql.com/docs/react/) – The most popular GraphQL client for the web. Apollo also builds and maintains [Apollo iOS](https://github.com/apollographql/apollo-ios) and [Apollo Android](https://github.com/apollographql/apollo-android). +* [GraphOS](https://www.apollographql.com/graphos) - The platform for building, managing, and scaling a supergraph: a unified network of your organization's microservices and their data sources—all composed into a single distributed API. +* [Apollo Federation](https://www.apollographql.com/federation) – The industry-standard open architecture for building a distributed graph. Use Apollo’s gateway to compose a unified graph from multiple subgraphs, determine a query plan, and route requests across your services. +* [Apollo Client](https://github.com/apollographql/apollo-client) – The most popular GraphQL client for the web. Apollo also builds and maintains [Apollo iOS](https://github.com/apollographql/apollo-ios) and [Apollo Kotlin](https://github.com/apollographql/apollo-kotlin). * [Apollo Server](https://www.apollographql.com/docs/apollo-server/) – A production-ready JavaScript GraphQL server that connects to any microservice, API, or database. Compatible with all popular JavaScript frameworks and deployable in serverless environments. ## Learn how to build with Apollo diff --git a/apollo-federation/Cargo.toml b/apollo-federation/Cargo.toml index 49868b2989..a43735f123 100644 --- a/apollo-federation/Cargo.toml +++ b/apollo-federation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-federation" -version = "1.51.0" +version = "1.52.0" authors = ["The Apollo GraphQL Contributors"] edition = "2021" description = "Apollo Federation" @@ -9,18 +9,25 @@ repository = "https://github.com/apollographql/router" license = "Elastic-2.0" autotests = false # Integration tests are modules of tests/main.rs +[features] +# This feature adds the `#[instrument]` macro to many function as well as +# logging statements that capture serialized versions of key data structures. +# This logging is gated behind a feature to avoid any unnecessary (even if +# small) runtime costs where this data will not be desired. +snapshot_tracing = [] + [dependencies] apollo-compiler.workspace = true time = { version = "0.3.34", default-features = false, features = [ "local-offset", ] } derive_more = "0.99.17" -indexmap = "2.2.6" +indexmap = { version = "2.2.6", features = ["serde"] } itertools = "0.13.0" lazy_static = "1.4.0" multimap = "0.10.0" nom = "7.1.3" -petgraph = "0.6.4" +petgraph = { version = "0.6.4", features = ["serde-1"] } serde.workspace = true serde_json.workspace = true serde_json_bytes.workspace = true @@ -28,6 +35,7 @@ strum = "0.26.0" strum_macros = "0.26.0" thiserror = "1.0" url = "2" +tracing = "0.1.40" [dev-dependencies] hex.workspace = true diff --git a/apollo-federation/src/compat.rs b/apollo-federation/src/compat.rs index a4e7242629..4af8e90010 100644 --- a/apollo-federation/src/compat.rs +++ b/apollo-federation/src/compat.rs @@ -8,6 +8,7 @@ //! graphql-js schema would. use apollo_compiler::ast::Value; +use apollo_compiler::collections::IndexMap; use apollo_compiler::schema::Directive; use apollo_compiler::schema::ExtendedType; use apollo_compiler::schema::InputValueDefinition; @@ -15,7 +16,6 @@ use apollo_compiler::schema::Type; use apollo_compiler::Name; use apollo_compiler::Node; use apollo_compiler::Schema; -use indexmap::IndexMap; /// Return true if a directive application is "semantic", meaning it's observable in introspection. fn is_semantic_directive_application(directive: &Directive) -> bool { diff --git a/apollo-federation/src/display_helpers.rs b/apollo-federation/src/display_helpers.rs new file mode 100644 index 0000000000..f89693123a --- /dev/null +++ b/apollo-federation/src/display_helpers.rs @@ -0,0 +1,145 @@ +use std::fmt; +use std::fmt::Debug; +use std::fmt::Display; +use std::ops::Deref; + +use serde::Serializer; + +pub(crate) struct State<'fmt, 'fmt2> { + indent_level: usize, + output: &'fmt mut fmt::Formatter<'fmt2>, +} + +impl<'a, 'b> State<'a, 'b> { + pub(crate) fn new(output: &'a mut fmt::Formatter<'b>) -> State<'a, 'b> { + Self { + indent_level: 0, + output, + } + } + + pub(crate) fn indent_level(&self) -> usize { + self.indent_level + } + + pub(crate) fn write(&mut self, value: T) -> fmt::Result { + write!(self.output, "{}", value) + } + + pub(crate) fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { + self.output.write_fmt(args) + } + + pub(crate) fn new_line(&mut self) -> fmt::Result { + self.write("\n")?; + for _ in 0..self.indent_level { + self.write(" ")? + } + Ok(()) + } + + pub(crate) fn indent_no_new_line(&mut self) { + self.indent_level += 1; + } + + pub(crate) fn indent(&mut self) -> fmt::Result { + self.indent_no_new_line(); + self.new_line() + } + + pub(crate) fn dedent(&mut self) -> fmt::Result { + self.indent_level -= 1; + self.new_line() + } +} + +pub(crate) fn write_indented_lines( + state: &mut State<'_, '_>, + values: &[T], + mut write_line: impl FnMut(&mut State<'_, '_>, &T) -> fmt::Result, +) -> fmt::Result { + if !values.is_empty() { + state.indent_no_new_line(); + for value in values { + state.new_line()?; + write_line(state, value)?; + } + state.dedent()?; + } + Ok(()) +} + +pub(crate) struct DisplaySlice<'a, T>(pub(crate) &'a [T]); + +impl<'a, T: Display> Display for DisplaySlice<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "[")?; + let mut iter = self.0.iter(); + if let Some(item) = iter.next() { + write!(f, "{item}")?; + } + iter.try_for_each(|item| write!(f, ", {item}"))?; + write!(f, "]") + } +} + +pub(crate) struct DisplayOption(pub(crate) Option); + +impl DisplayOption { + pub(crate) fn new(inner: &Option) -> DisplayOption<&T> { + DisplayOption(inner.as_ref()) + } +} + +impl Display for DisplayOption { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + Some(item) => write!(f, "Some({item})"), + None => write!(f, "None"), + } + } +} + +pub(crate) fn serialize_as_debug_string(data: &T, ser: S) -> Result +where + T: Debug, + S: Serializer, +{ + ser.serialize_str(&format!("{data:?}")) +} + +pub(crate) fn serialize_as_string(data: &T, ser: S) -> Result +where + T: ToString, + S: Serializer, +{ + ser.serialize_str(&data.to_string()) +} + +pub(crate) fn serialize_option_as_string(data: &Option, ser: S) -> Result +where + T: Display, + S: Serializer, +{ + serialize_as_string(&DisplayOption(data.as_ref()), ser) +} + +pub(crate) fn serialize_vec_as_string(data: &P, ser: S) -> Result +where + P: Deref>, + T: Display, + S: Serializer, +{ + serialize_as_string(&DisplaySlice(data), ser) +} + +pub(crate) fn serialize_optional_vec_as_string( + data: &Option>, + ser: S, +) -> Result +where + T: Display, + S: Serializer, +{ + serialize_as_string(&DisplayOption(data.as_deref().map(DisplaySlice)), ser) +} diff --git a/apollo-federation/src/error/mod.rs b/apollo-federation/src/error/mod.rs index edbcd43951..f13ec59757 100644 --- a/apollo-federation/src/error/mod.rs +++ b/apollo-federation/src/error/mod.rs @@ -38,8 +38,12 @@ pub enum SingleFederationError { #[error("An internal error has occurred, please report this bug to Apollo. Details: {0}")] #[allow(private_interfaces)] // users should not inspect this. InternalRebaseError(#[from] crate::operation::RebaseError), - #[error("{message}")] - InvalidGraphQL { message: String }, + #[error("{diagnostics}")] + InvalidGraphQL { diagnostics: DiagnosticList }, + #[error(transparent)] + InvalidGraphQLName(#[from] InvalidNameError), + #[error("Subgraph invalid: {message}")] + InvalidSubgraph { message: String }, #[error("{message}")] DirectiveDefinitionInvalid { message: String }, #[error("{message}")] @@ -203,7 +207,9 @@ impl SingleFederationError { match self { SingleFederationError::Internal { .. } => ErrorCode::Internal, SingleFederationError::InternalRebaseError { .. } => ErrorCode::Internal, - SingleFederationError::InvalidGraphQL { .. } => ErrorCode::InvalidGraphQL, + SingleFederationError::InvalidGraphQL { .. } + | SingleFederationError::InvalidGraphQLName(_) => ErrorCode::InvalidGraphQL, + SingleFederationError::InvalidSubgraph { .. } => ErrorCode::InvalidGraphQL, SingleFederationError::DirectiveDefinitionInvalid { .. } => { ErrorCode::DirectiveDefinitionInvalid } @@ -379,14 +385,6 @@ impl SingleFederationError { } } -impl From for SingleFederationError { - fn from(err: InvalidNameError) -> Self { - SingleFederationError::InvalidGraphQL { - message: format!("Invalid GraphQL name \"{}\"", err.name), - } - } -} - impl From for FederationError { fn from(err: InvalidNameError) -> Self { SingleFederationError::from(err).into() @@ -405,9 +403,7 @@ impl From for FederationError { FederationSpecError::UnsupportedFederationDirective { .. } => { SingleFederationError::UnsupportedFederationDirective { message }.into() } - FederationSpecError::InvalidGraphQLName(message) => { - SingleFederationError::InvalidGraphQL { message }.into() - } + FederationSpecError::InvalidGraphQLName(message) => message.into(), } } } @@ -450,16 +446,9 @@ impl Display for MultipleFederationErrors { } } -impl From for MultipleFederationErrors { - fn from(value: DiagnosticList) -> Self { - Self { - errors: value - .iter() - .map(|e| SingleFederationError::InvalidGraphQL { - message: e.error.to_string(), - }) - .collect(), - } +impl From for SingleFederationError { + fn from(diagnostics: DiagnosticList) -> Self { + SingleFederationError::InvalidGraphQL { diagnostics } } } @@ -538,8 +527,7 @@ impl From for FederationError { impl From for FederationError { fn from(value: DiagnosticList) -> Self { - let value: MultipleFederationErrors = value.into(); - value.into() + SingleFederationError::from(value).into() } } diff --git a/apollo-federation/src/indented_display.rs b/apollo-federation/src/indented_display.rs deleted file mode 100644 index 920b21dbee..0000000000 --- a/apollo-federation/src/indented_display.rs +++ /dev/null @@ -1,65 +0,0 @@ -use std::fmt; - -pub(crate) struct State<'fmt, 'fmt2> { - indent_level: usize, - output: &'fmt mut fmt::Formatter<'fmt2>, -} - -impl<'a, 'b> State<'a, 'b> { - pub(crate) fn new(output: &'a mut fmt::Formatter<'b>) -> State<'a, 'b> { - Self { - indent_level: 0, - output, - } - } - - pub(crate) fn indent_level(&self) -> usize { - self.indent_level - } - - pub(crate) fn write(&mut self, value: T) -> fmt::Result { - write!(self.output, "{}", value) - } - - pub(crate) fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result { - self.output.write_fmt(args) - } - - pub(crate) fn new_line(&mut self) -> fmt::Result { - self.write("\n")?; - for _ in 0..self.indent_level { - self.write(" ")? - } - Ok(()) - } - - pub(crate) fn indent_no_new_line(&mut self) { - self.indent_level += 1; - } - - pub(crate) fn indent(&mut self) -> fmt::Result { - self.indent_no_new_line(); - self.new_line() - } - - pub(crate) fn dedent(&mut self) -> fmt::Result { - self.indent_level -= 1; - self.new_line() - } -} - -pub(crate) fn write_indented_lines( - state: &mut State<'_, '_>, - values: &[T], - mut write_line: impl FnMut(&mut State<'_, '_>, &T) -> fmt::Result, -) -> fmt::Result { - if !values.is_empty() { - state.indent_no_new_line(); - for value in values { - state.new_line()?; - write_line(state, value)?; - } - state.dedent()?; - } - Ok(()) -} diff --git a/apollo-federation/src/lib.rs b/apollo-federation/src/lib.rs index 4a92c92f4a..4eb4afa5be 100644 --- a/apollo-federation/src/lib.rs +++ b/apollo-federation/src/lib.rs @@ -13,20 +13,23 @@ //! See [Router documentation](https://www.apollographql.com/docs/router/federation-version-support/) //! for which Federation versions are supported by which Router versions. -#![allow(dead_code)] // TODO: This is fine while we're iterating, but should be removed later. +// TODO: This is fine while we're iterating, but should be removed later. +#![allow(dead_code)] +// TODO: silence false positives (apollo_compiler::Name) and investigate the rest +#![allow(clippy::mutable_key_type)] mod api_schema; mod compat; +mod display_helpers; pub mod error; -mod indented_display; pub mod link; pub mod merge; pub(crate) mod operation; pub mod query_graph; pub mod query_plan; pub mod schema; -pub mod sources; pub mod subgraph; +pub(crate) mod utils; use apollo_compiler::ast::NamedType; use apollo_compiler::validation::Valid; diff --git a/apollo-federation/src/link/database.rs b/apollo-federation/src/link/database.rs index e15b80f10d..d96c8ecc4b 100644 --- a/apollo-federation/src/link/database.rs +++ b/apollo-federation/src/link/database.rs @@ -62,7 +62,7 @@ pub fn links_metadata(schema: &Schema) -> Result, LinkErro .insert(link.url.identity.clone(), Arc::clone(&link)) .is_some() { - // TODO: we may want to lessen that limitation at some point. Including the same feature for 2 different major versions should be ok. + // XXX(Sylvain): We may want to loosen this limitation at some point. Including the same feature for 2 different major versions should be ok. return Err(LinkError::BootstrapError(format!( "duplicate @link inclusion of specification \"{}\"", link.url.identity diff --git a/apollo-federation/src/link/graphql_definition.rs b/apollo-federation/src/link/graphql_definition.rs index 260bec67b3..31226d6a7d 100644 --- a/apollo-federation/src/link/graphql_definition.rs +++ b/apollo-federation/src/link/graphql_definition.rs @@ -5,6 +5,7 @@ use apollo_compiler::executable::Directive; use apollo_compiler::name; use apollo_compiler::Name; use apollo_compiler::Node; +use serde::Serialize; use crate::error::FederationError; use crate::link::argument::directive_optional_string_argument; @@ -31,7 +32,7 @@ pub(crate) fn defer_directive_arguments( /// a `Vec`, and superfluous struct instances aren't elided; `Conditions` is the more appropriate /// struct when trying to evaluate `@skip`/`@include` conditions (e.g. merging and short-circuiting /// logic). -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] pub(crate) struct OperationConditional { pub(crate) kind: OperationConditionalKind, pub(crate) value: BooleanOrVariable, @@ -46,6 +47,7 @@ pub(crate) struct OperationConditional { strum_macros::Display, strum_macros::EnumIter, strum_macros::IntoStaticStr, + Serialize, )] pub(crate) enum OperationConditionalKind { #[strum(to_string = "include")] @@ -63,7 +65,7 @@ impl OperationConditionalKind { } } -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] pub(crate) enum BooleanOrVariable { Boolean(bool), Variable(Name), diff --git a/apollo-federation/src/link/inaccessible_spec_definition.rs b/apollo-federation/src/link/inaccessible_spec_definition.rs index 28887af686..c6e0fbf149 100644 --- a/apollo-federation/src/link/inaccessible_spec_definition.rs +++ b/apollo-federation/src/link/inaccessible_spec_definition.rs @@ -1,5 +1,7 @@ use std::fmt; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::name; use apollo_compiler::schema::Component; use apollo_compiler::schema::ComponentName; @@ -12,8 +14,6 @@ use apollo_compiler::schema::InputValueDefinition; use apollo_compiler::schema::Value; use apollo_compiler::Name; use apollo_compiler::Node; -use indexmap::IndexMap; -use indexmap::IndexSet; use lazy_static::lazy_static; use crate::error::FederationError; diff --git a/apollo-federation/src/merge.rs b/apollo-federation/src/merge.rs index 2a71aa92c7..0aed048c60 100644 --- a/apollo-federation/src/merge.rs +++ b/apollo-federation/src/merge.rs @@ -13,6 +13,8 @@ use apollo_compiler::ast::EnumValueDefinition; use apollo_compiler::ast::FieldDefinition; use apollo_compiler::ast::NamedType; use apollo_compiler::ast::Value; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::name; use apollo_compiler::schema::Component; use apollo_compiler::schema::EnumType; @@ -31,8 +33,6 @@ use apollo_compiler::Schema; use indexmap::map::Entry::Occupied; use indexmap::map::Entry::Vacant; use indexmap::map::Iter; -use indexmap::IndexMap; -use indexmap::IndexSet; use itertools::Itertools; use crate::error::FederationError; @@ -45,8 +45,12 @@ use crate::link::federation_spec_definition::FEDERATION_OVERRIDE_DIRECTIVE_NAME_ use crate::link::federation_spec_definition::FEDERATION_OVERRIDE_LABEL_ARGUMENT_NAME; use crate::link::federation_spec_definition::FEDERATION_PROVIDES_DIRECTIVE_NAME_IN_SPEC; use crate::link::federation_spec_definition::FEDERATION_REQUIRES_DIRECTIVE_NAME_IN_SPEC; +use crate::link::inaccessible_spec_definition::InaccessibleSpecDefinition; +use crate::link::inaccessible_spec_definition::INACCESSIBLE_DIRECTIVE_NAME_IN_SPEC; use crate::link::join_spec_definition::JOIN_OVERRIDE_LABEL_ARGUMENT_NAME; use crate::link::spec::Identity; +use crate::link::spec::Version; +use crate::link::spec_definition::SpecDefinition; use crate::link::LinksMetadata; use crate::schema::ValidFederationSchema; use crate::subgraph::ValidSubgraph; @@ -59,6 +63,7 @@ type MergeError = String; struct Merger { errors: Vec, composition_hints: Vec, + needs_inaccessible: bool, } pub struct MergeSuccess { @@ -120,6 +125,7 @@ impl Merger { Merger { composition_hints: Vec::new(), errors: Vec::new(), + needs_inaccessible: false, } } fn merge(&mut self, subgraphs: ValidFederationSubgraphs) -> Result { @@ -167,6 +173,7 @@ impl Merger { // TODO merge directives let metadata = subgraph.schema.metadata(); + let relevant_directives = DirectiveNames::for_metadata(&metadata); for (type_name, ty) in &subgraph.schema.schema().types { if ty.is_built_in() || !is_mergeable_type(type_name) { @@ -177,32 +184,35 @@ impl Merger { match ty { ExtendedType::Enum(value) => self.merge_enum_type( &mut supergraph.types, + &relevant_directives, subgraph_name.clone(), type_name.clone(), value, ), ExtendedType::InputObject(value) => self.merge_input_object_type( &mut supergraph.types, + &relevant_directives, subgraph_name.clone(), type_name.clone(), value, ), ExtendedType::Interface(value) => self.merge_interface_type( &mut supergraph.types, - &metadata, + &relevant_directives, subgraph_name.clone(), type_name.clone(), value, ), ExtendedType::Object(value) => self.merge_object_type( &mut supergraph.types, - &metadata, + &relevant_directives, subgraph_name.clone(), type_name.clone(), value, ), ExtendedType::Union(value) => self.merge_union_type( &mut supergraph.types, + &relevant_directives, subgraph_name.clone(), type_name.clone(), value, @@ -211,6 +221,7 @@ impl Merger { if !value.is_built_in() { self.merge_scalar_type( &mut supergraph.types, + &relevant_directives, subgraph_name.clone(), type_name.clone(), value, @@ -228,6 +239,10 @@ impl Merger { } } + if self.needs_inaccessible { + add_core_feature_inaccessible(&mut supergraph); + } + if self.errors.is_empty() { // TODO: validate here and extend `MergeFailure` to propagate validation errors let supergraph = Valid::assume_valid(supergraph); @@ -282,6 +297,7 @@ impl Merger { fn merge_enum_type( &mut self, types: &mut IndexMap, + metadata: &DirectiveNames, subgraph_name: Name, enum_name: NamedType, enum_type: &Node, @@ -289,11 +305,18 @@ impl Merger { let existing_type = types .entry(enum_name.clone()) .or_insert(copy_enum_type(enum_name, enum_type)); + if let ExtendedType::Enum(e) = existing_type { let join_type_directives = join_type_applied_directive(subgraph_name.clone(), iter::empty(), false); e.make_mut().directives.extend(join_type_directives); + self.add_inaccessible( + metadata, + &mut e.make_mut().directives, + &enum_type.directives, + ); + self.merge_descriptions(&mut e.make_mut().description, &enum_type.description); // TODO we need to merge those fields LAST so we know whether enum is used as input/output/both as different merge rules will apply @@ -309,6 +332,13 @@ impl Merger { directives: Default::default(), })); self.merge_descriptions(&mut ev.make_mut().description, &enum_value.description); + + self.add_inaccessible( + metadata, + &mut ev.make_mut().directives, + &enum_value.directives, + ); + ev.make_mut().directives.push(Node::new(Directive { name: name!("join__enumValue"), arguments: vec![ @@ -327,6 +357,7 @@ impl Merger { fn merge_input_object_type( &mut self, types: &mut IndexMap, + directive_names: &DirectiveNames, subgraph_name: Name, input_object_name: NamedType, input_object: &Node, @@ -334,19 +365,32 @@ impl Merger { let existing_type = types .entry(input_object_name.clone()) .or_insert(copy_input_object_type(input_object_name, input_object)); + if let ExtendedType::InputObject(obj) = existing_type { let join_type_directives = join_type_applied_directive(subgraph_name, iter::empty(), false); let mutable_object = obj.make_mut(); mutable_object.directives.extend(join_type_directives); - for (field_name, _field) in input_object.fields.iter() { + self.add_inaccessible( + directive_names, + &mut mutable_object.directives, + &input_object.directives, + ); + + for (field_name, field) in input_object.fields.iter() { let existing_field = mutable_object.fields.entry(field_name.clone()); + match existing_field { Vacant(_i) => { // TODO warning - mismatch on input fields } - Occupied(_i) => { + Occupied(mut i) => { + self.add_inaccessible( + directive_names, + &mut i.get_mut().make_mut().directives, + &field.directives, + ); // merge_options(&i.get_mut().description, &field.description); // TODO check description // TODO check type @@ -363,40 +407,46 @@ impl Merger { fn merge_interface_type( &mut self, types: &mut IndexMap, - metadata: &Option<&LinksMetadata>, + directive_names: &DirectiveNames, subgraph_name: Name, interface_name: NamedType, interface: &Node, ) { - let federation_identity = - metadata.and_then(|m| m.by_identity.get(&Identity::federation_identity())); - - let key_directive_name = federation_identity - .map(|link| link.directive_name_in_schema(&FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC)) - .unwrap_or(FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC); - let existing_type = types .entry(interface_name.clone()) .or_insert(copy_interface_type(interface_name, interface)); + if let ExtendedType::Interface(intf) = existing_type { - let key_directives = interface.directives.get_all(&key_directive_name); + let key_directives = interface.directives.get_all(&directive_names.key); let join_type_directives = join_type_applied_directive(subgraph_name, key_directives, false); let mutable_intf = intf.make_mut(); mutable_intf.directives.extend(join_type_directives); + self.add_inaccessible( + directive_names, + &mut mutable_intf.directives, + &interface.directives, + ); + for (field_name, field) in interface.fields.iter() { let existing_field = mutable_intf.fields.entry(field_name.clone()); match existing_field { Vacant(i) => { // TODO warning mismatch missing fields - i.insert(Component::new(FieldDefinition { + let f = i.insert(Component::new(FieldDefinition { name: field.name.clone(), description: field.description.clone(), arguments: vec![], ty: field.ty.clone(), directives: Default::default(), })); + + self.add_inaccessible( + directive_names, + &mut f.make_mut().directives, + &field.directives, + ); } Occupied(_i) => { // TODO check description @@ -414,41 +464,12 @@ impl Merger { fn merge_object_type( &mut self, types: &mut IndexMap, - metadata: &Option<&LinksMetadata>, + directive_names: &DirectiveNames, subgraph_name: Name, object_name: NamedType, object: &Node, ) { - let federation_identity = - metadata.and_then(|m| m.by_identity.get(&Identity::federation_identity())); - - let key_directive_name = federation_identity - .map(|link| link.directive_name_in_schema(&FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC)) - .unwrap_or(FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC); - - let requires_directive_name = federation_identity - .map(|link| link.directive_name_in_schema(&FEDERATION_REQUIRES_DIRECTIVE_NAME_IN_SPEC)) - .unwrap_or(FEDERATION_REQUIRES_DIRECTIVE_NAME_IN_SPEC); - - let provides_directive_name = federation_identity - .map(|link| link.directive_name_in_schema(&FEDERATION_PROVIDES_DIRECTIVE_NAME_IN_SPEC)) - .unwrap_or(FEDERATION_PROVIDES_DIRECTIVE_NAME_IN_SPEC); - - let external_directive_name = federation_identity - .map(|link| link.directive_name_in_schema(&FEDERATION_EXTERNAL_DIRECTIVE_NAME_IN_SPEC)) - .unwrap_or(FEDERATION_EXTERNAL_DIRECTIVE_NAME_IN_SPEC); - - let interface_object_directive_name = federation_identity - .map(|link| { - link.directive_name_in_schema(&FEDERATION_INTERFACEOBJECT_DIRECTIVE_NAME_IN_SPEC) - }) - .unwrap_or(FEDERATION_INTERFACEOBJECT_DIRECTIVE_NAME_IN_SPEC); - - let override_directive_name = federation_identity - .map(|link| link.directive_name_in_schema(&FEDERATION_OVERRIDE_DIRECTIVE_NAME_IN_SPEC)) - .unwrap_or(FEDERATION_OVERRIDE_DIRECTIVE_NAME_IN_SPEC); - - let is_interface_object = object.directives.has(&interface_object_directive_name); + let is_interface_object = object.directives.has(&directive_names.interface_object); let existing_type = types .entry(object_name.clone()) .or_insert(copy_object_type_stub( @@ -456,13 +477,19 @@ impl Merger { object, is_interface_object, )); + if let ExtendedType::Object(obj) = existing_type { - let key_directives = object.directives.get_all(&key_directive_name); + let key_directives = object.directives.get_all(&directive_names.key); let join_type_directives = join_type_applied_directive(subgraph_name.clone(), key_directives, false); let mutable_object = obj.make_mut(); mutable_object.directives.extend(join_type_directives); self.merge_descriptions(&mut mutable_object.description, &object.description); + self.add_inaccessible( + directive_names, + &mut mutable_object.directives, + &object.directives, + ); object.implements_interfaces.iter().for_each(|intf_name| { // IndexSet::insert deduplicates mutable_object @@ -499,29 +526,45 @@ impl Merger { &mut supergraph_field.make_mut().description, &field.description, ); + + self.add_inaccessible( + directive_names, + &mut supergraph_field.make_mut().directives, + &field.directives, + ); + for arg in field.arguments.iter() { - if let Some(_existing_arg) = supergraph_field.argument_by_name(&arg.name) { - // TODO add args - } else { - // TODO mismatch no args + let arguments = &mut supergraph_field.make_mut().arguments; + if let Some(index) = arguments.iter().position(|a| a.name == arg.name) { + if let Some(existing_arg) = arguments.get_mut(index) { + // TODO add args + let mutable_arg = existing_arg.make_mut(); + self.add_inaccessible( + directive_names, + &mut mutable_arg.directives, + &arg.directives, + ); + } else { + // TODO mismatch no args + } } } let requires_directive_option = field .directives - .get_all(&requires_directive_name) + .get_all(&directive_names.requires) .next() .and_then(|p| directive_string_arg_value(p, &FEDERATION_FIELDS_ARGUMENT_NAME)); let provides_directive_option = field .directives - .get_all(&provides_directive_name) + .get_all(&directive_names.provides) .next() .and_then(|p| directive_string_arg_value(p, &FEDERATION_FIELDS_ARGUMENT_NAME)); let overrides_directive_option = field .directives - .get_all(&override_directive_name) + .get_all(&directive_names.r#override) .next() .and_then(|p| { let overrides_from = @@ -533,7 +576,7 @@ impl Merger { let external_field = field .directives - .get_all(&external_directive_name) + .get_all(&directive_names.external) .next() .is_some(); @@ -555,7 +598,7 @@ impl Merger { } } else if let ExtendedType::Interface(intf) = existing_type { // TODO support interface object - let key_directives = object.directives.get_all(&key_directive_name); + let key_directives = object.directives.get_all(&directive_names.key); let join_type_directives = join_type_applied_directive(subgraph_name, key_directives, true); intf.make_mut().directives.extend(join_type_directives); @@ -566,6 +609,7 @@ impl Merger { fn merge_union_type( &mut self, types: &mut IndexMap, + directive_names: &DirectiveNames, subgraph_name: Name, union_name: NamedType, union: &Node, @@ -574,10 +618,16 @@ impl Merger { union_name.clone(), union.description.clone(), )); + if let ExtendedType::Union(u) = existing_type { let join_type_directives = join_type_applied_directive(subgraph_name.clone(), iter::empty(), false); u.make_mut().directives.extend(join_type_directives); + self.add_inaccessible( + directive_names, + &mut u.make_mut().directives, + &union.directives, + ); for union_member in union.members.iter() { // IndexSet::insert deduplicates @@ -600,8 +650,9 @@ impl Merger { } fn merge_scalar_type( - &self, + &mut self, types: &mut IndexMap, + directive_names: &DirectiveNames, subgraph_name: Name, scalar_name: NamedType, ty: &Node, @@ -609,14 +660,118 @@ impl Merger { let existing_type = types .entry(scalar_name.clone()) .or_insert(copy_scalar_type(scalar_name, ty)); + if let ExtendedType::Scalar(s) = existing_type { let join_type_directives = join_type_applied_directive(subgraph_name.clone(), iter::empty(), false); s.make_mut().directives.extend(join_type_directives); + self.add_inaccessible( + directive_names, + &mut s.make_mut().directives, + &ty.directives, + ); } else { // conflict? } } + + // generic so it handles ast::DirectiveList and schema::DirectiveList + fn add_inaccessible( + &mut self, + directive_names: &DirectiveNames, + new_directives: &mut Vec, + original_directives: &[I], + ) where + I: AsRef + From + Clone, + { + if original_directives + .iter() + .any(|d| d.as_ref().name == directive_names.inaccessible) + && !new_directives + .iter() + .any(|d| d.as_ref().name == INACCESSIBLE_DIRECTIVE_NAME_IN_SPEC) + { + self.needs_inaccessible = true; + + new_directives.push( + Directive { + name: INACCESSIBLE_DIRECTIVE_NAME_IN_SPEC, + arguments: vec![], + } + .into(), + ); + } + } +} + +fn filter_directives<'a, D, I, O>(deny_list: &IndexSet, directives: D) -> O +where + D: IntoIterator, + I: 'a + AsRef + Clone, + O: FromIterator, +{ + directives + .into_iter() + .filter(|d| !deny_list.contains(&d.as_ref().name)) + .cloned() + .collect() +} + +struct DirectiveNames { + key: Name, + requires: Name, + provides: Name, + external: Name, + interface_object: Name, + r#override: Name, + inaccessible: Name, +} + +impl DirectiveNames { + fn for_metadata(metadata: &Option<&LinksMetadata>) -> Self { + let federation_identity = + metadata.and_then(|m| m.by_identity.get(&Identity::federation_identity())); + + let key = federation_identity + .map(|link| link.directive_name_in_schema(&FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC)) + .unwrap_or(FEDERATION_KEY_DIRECTIVE_NAME_IN_SPEC); + + let requires = federation_identity + .map(|link| link.directive_name_in_schema(&FEDERATION_REQUIRES_DIRECTIVE_NAME_IN_SPEC)) + .unwrap_or(FEDERATION_REQUIRES_DIRECTIVE_NAME_IN_SPEC); + + let provides = federation_identity + .map(|link| link.directive_name_in_schema(&FEDERATION_PROVIDES_DIRECTIVE_NAME_IN_SPEC)) + .unwrap_or(FEDERATION_PROVIDES_DIRECTIVE_NAME_IN_SPEC); + + let external = federation_identity + .map(|link| link.directive_name_in_schema(&FEDERATION_EXTERNAL_DIRECTIVE_NAME_IN_SPEC)) + .unwrap_or(FEDERATION_EXTERNAL_DIRECTIVE_NAME_IN_SPEC); + + let interface_object = federation_identity + .map(|link| { + link.directive_name_in_schema(&FEDERATION_INTERFACEOBJECT_DIRECTIVE_NAME_IN_SPEC) + }) + .unwrap_or(FEDERATION_INTERFACEOBJECT_DIRECTIVE_NAME_IN_SPEC); + + let r#override = federation_identity + .map(|link| link.directive_name_in_schema(&FEDERATION_OVERRIDE_DIRECTIVE_NAME_IN_SPEC)) + .unwrap_or(FEDERATION_OVERRIDE_DIRECTIVE_NAME_IN_SPEC); + + let inaccessible = federation_identity + .map(|link| link.directive_name_in_schema(&INACCESSIBLE_DIRECTIVE_NAME_IN_SPEC)) + .unwrap_or(INACCESSIBLE_DIRECTIVE_NAME_IN_SPEC); + + Self { + key, + requires, + provides, + external, + interface_object, + r#override, + inaccessible, + } + } } const EXECUTABLE_DIRECTIVE_LOCATIONS: [DirectiveLocation; 8] = [ @@ -659,7 +814,7 @@ fn copy_enum_type(enum_name: Name, enum_type: &Node) -> ExtendedType { description: enum_type.description.clone(), name: enum_name, directives: Default::default(), - values: IndexMap::new(), + values: IndexMap::default(), })) } @@ -671,7 +826,7 @@ fn copy_input_object_type( description: input_object.description.clone(), name: input_object_name, directives: Default::default(), - fields: IndexMap::new(), + fields: IndexMap::default(), }; for (field_name, input_field) in input_object.fields.iter() { @@ -730,7 +885,7 @@ fn copy_object_type_stub( fn copy_fields( fields_to_copy: Iter>, ) -> IndexMap> { - let mut new_fields: IndexMap> = IndexMap::new(); + let mut new_fields: IndexMap> = IndexMap::default(); for (field_name, field) in fields_to_copy { // skip federation built-in queries if field_name == "_service" || field_name == "_entities" { @@ -767,7 +922,7 @@ fn copy_union_type(union_name: Name, description: Option>) -> Extended description, name: union_name, directives: Default::default(), - members: IndexSet::new(), + members: IndexSet::default(), })) } @@ -955,7 +1110,7 @@ fn link_purpose_enum_type() -> (Name, EnumType) { description: None, name: link_purpose_name.clone(), directives: Default::default(), - values: IndexMap::new(), + values: IndexMap::default(), }; let link_purpose_security_value = EnumValueDefinition { description: Some( @@ -1350,7 +1505,7 @@ fn join_graph_enum_type( description: None, name: join_graph_enum_name.clone(), directives: Default::default(), - values: IndexMap::new(), + values: IndexMap::default(), }; for (s, subgraph_name) in subgraphs_and_enum_values { let join_graph_applied_directive = Directive { @@ -1378,6 +1533,51 @@ fn join_graph_enum_type( (join_graph_enum_name, join_graph_enum_type) } +fn add_core_feature_inaccessible(supergraph: &mut Schema) { + // @link(url: "https://specs.apollo.dev/inaccessible/v0.2") + let spec = InaccessibleSpecDefinition::new(Version { major: 0, minor: 2 }, None); + + supergraph + .schema_definition + .make_mut() + .directives + .push(Component::new(Directive { + name: name!("link"), + arguments: vec![ + Node::new(Argument { + name: name!("url"), + value: spec.to_string().into(), + }), + Node::new(Argument { + name: name!("for"), + value: Node::new(Value::Enum(name!("SECURITY"))), + }), + ], + })); + + supergraph.directive_definitions.insert( + INACCESSIBLE_DIRECTIVE_NAME_IN_SPEC, + Node::new(DirectiveDefinition { + name: INACCESSIBLE_DIRECTIVE_NAME_IN_SPEC, + description: None, + arguments: vec![], + locations: vec![ + DirectiveLocation::FieldDefinition, + DirectiveLocation::Object, + DirectiveLocation::Interface, + DirectiveLocation::Union, + DirectiveLocation::ArgumentDefinition, + DirectiveLocation::Scalar, + DirectiveLocation::Enum, + DirectiveLocation::EnumValue, + DirectiveLocation::InputObject, + DirectiveLocation::InputFieldDefinition, + ], + repeatable: false, + }), + ); +} + // TODO use apollo_compiler::executable::FieldSet fn parse_keys<'a>( directives: impl Iterator> + Sized, @@ -1507,4 +1707,40 @@ mod tests { assert_snapshot!(schema.serialize()); } + + #[test] + fn test_inaccessible() { + let one_sdl = include_str!("./sources/connect/expand/merge/inaccessible.graphql"); + let two_sdl = include_str!("./sources/connect/expand/merge/inaccessible_2.graphql"); + + let mut subgraphs = ValidFederationSubgraphs::new(); + subgraphs + .add(ValidFederationSubgraph { + name: "inaccessible".to_string(), + url: "".to_string(), + schema: ValidFederationSchema::new( + Schema::parse_and_validate(one_sdl, "./inaccessible.graphql").unwrap(), + ) + .unwrap(), + }) + .unwrap(); + subgraphs + .add(ValidFederationSubgraph { + name: "inaccessible_2".to_string(), + url: "".to_string(), + schema: ValidFederationSchema::new( + Schema::parse_and_validate(two_sdl, "./inaccessible_2.graphql").unwrap(), + ) + .unwrap(), + }) + .unwrap(); + + let result = merge_federation_subgraphs(subgraphs).unwrap(); + + let schema = result.schema.into_inner(); + let validation = schema.clone().validate(); + assert!(validation.is_ok(), "{:?}", validation); + + assert_snapshot!(schema.serialize()); + } } diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index d9dea7294b..468d1e88d4 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -4,9 +4,9 @@ //! Each "conceptual" type consists of up to three actual types: a data type, an "element" //! type, and a selection type. //! - The data type records the data about the type. Things like a field name or fragment type -//! condition are in the data type. These types can be constructed and modified with plain rust. +//! condition are in the data type. These types can be constructed and modified with plain rust. //! - The element type contains the data type and maintains a key for the data. These types provide -//! APIs for modifications that keep the key up-to-date. +//! APIs for modifications that keep the key up-to-date. //! - The selection type contains the element type and, for composite fields, a subselection. //! //! For example, for fields, the data type is [`FieldData`], the element type is @@ -23,13 +23,14 @@ use std::sync::atomic; use std::sync::Arc; use std::sync::OnceLock; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::executable; use apollo_compiler::name; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Node; -use indexmap::IndexMap; -use indexmap::IndexSet; +use serde::Serialize; use crate::error::FederationError; use crate::error::SingleFederationError; @@ -66,7 +67,7 @@ static NEXT_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(1); /// /// Note that we shouldn't add `derive(Serialize, Deserialize)` to this without changing the types /// to be something like UUIDs. -#[derive(Clone, Debug, Eq, PartialEq, Hash)] +#[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize)] pub(crate) struct SelectionId(usize); impl SelectionId { @@ -122,7 +123,7 @@ impl Operation { document: &Valid, operation_name: Option<&str>, ) -> Result { - let operation = document.get_operation(operation_name).map_err(|_| { + let operation = document.operations.get(operation_name).map_err(|_| { FederationError::internal(format!("No operation named {operation_name:?}")) })?; let named_fragments = NamedFragments::new(&document.fragments, &schema); @@ -146,7 +147,7 @@ impl Operation { operation: self, has_defers: false, assigned_defer_labels: HashSet::new(), - defer_conditions: IndexMap::new(), + defer_conditions: IndexMap::default(), } // TODO(@TylerBloom): Once defer is implement, the above statement needs to be replaced // with the commented-out one below. This is part of FED-95 @@ -158,7 +159,7 @@ impl Operation { operation: self, has_defers: false, assigned_defer_labels: HashSet::new(), - defer_conditions: IndexMap::new(), + defer_conditions: IndexMap::default(), } } */ @@ -187,8 +188,9 @@ impl Operation { /// - For the type, stores the schema and the position in that schema instead of just the /// `NamedType`. /// - Stores selections in a map so they can be normalized efficiently. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub(crate) struct SelectionSet { + #[serde(skip)] pub(crate) schema: ValidFederationSchema, pub(crate) type_position: CompositeTypeDefinitionPosition, pub(crate) selections: Arc, @@ -208,8 +210,9 @@ mod selection_map { use std::ops::Deref; use std::sync::Arc; + use apollo_compiler::collections::IndexMap; use apollo_compiler::executable; - use indexmap::IndexMap; + use serde::Serialize; use crate::error::FederationError; use crate::error::SingleFederationError::Internal; @@ -232,7 +235,7 @@ mod selection_map { /// `IndexSet` since key computation is expensive (it involves sorting). This type is in its own /// module to prevent code from accidentally mutating the underlying map outside the mutation /// API. - #[derive(Debug, Clone, PartialEq, Eq, Default)] + #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize)] pub(crate) struct SelectionMap(IndexMap); impl Deref for SelectionMap { @@ -245,7 +248,7 @@ mod selection_map { impl SelectionMap { pub(crate) fn new() -> Self { - SelectionMap(IndexMap::new()) + SelectionMap(IndexMap::default()) } pub(crate) fn clear(&mut self) { @@ -603,24 +606,27 @@ pub(crate) use selection_map::SelectionValue; /// * directives have to be applied in the same order /// * directive arguments order does not matter (they get automatically sorted by their names). /// * selection cannot specify @defer directive -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] pub(crate) enum SelectionKey { Field { /// The field alias (if specified) or field name in the resulting selection set. response_name: Name, /// directives applied on the field + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] directives: Arc, }, FragmentSpread { /// The name of the fragment. fragment_name: Name, /// Directives applied on the fragment spread (does not contain @defer). + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] directives: Arc, }, InlineFragment { /// The optional type condition of the fragment. type_condition: Option, /// Directives applied on the fragment spread (does not contain @defer). + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] directives: Arc, }, Defer { @@ -630,8 +636,21 @@ pub(crate) enum SelectionKey { } impl SelectionKey { + /// Returns true if the selection key is `__typename` *without directives*. pub(crate) fn is_typename_field(&self) -> bool { - matches!(self, SelectionKey::Field { response_name, .. } if *response_name == TYPENAME_FIELD) + matches!(self, SelectionKey::Field { response_name, directives } if *response_name == TYPENAME_FIELD && directives.is_empty()) + } + + /// Create a selection key for a specific field name. + /// + /// This is available for tests only as selection keys should not normally be created outside of + /// `HasSelectionKey::key`. + #[cfg(test)] + pub(crate) fn field_name(name: &str) -> Self { + SelectionKey::Field { + response_name: Name::new(name).unwrap(), + directives: Default::default(), + } } } @@ -641,7 +660,7 @@ pub(crate) trait HasSelectionKey { /// An analogue of the apollo-compiler type `Selection` that stores our other selection analogues /// instead of the apollo-compiler types. -#[derive(Debug, Clone, PartialEq, Eq, derive_more::IsVariant)] +#[derive(Debug, Clone, PartialEq, Eq, derive_more::IsVariant, Serialize)] pub(crate) enum Selection { Field(Arc), FragmentSpread(Arc), @@ -764,17 +783,7 @@ impl Selection { } // Note: Fragment spreads can be present in optimized operations. - pub(crate) fn selection_set(&self) -> Result, FederationError> { - match self { - Selection::Field(field_selection) => Ok(field_selection.selection_set.as_ref()), - Selection::FragmentSpread(_) => Ok(None), - Selection::InlineFragment(inline_fragment_selection) => { - Ok(Some(&inline_fragment_selection.selection_set)) - } - } - } - - pub(crate) fn try_selection_set(&self) -> Option<&SelectionSet> { + pub(crate) fn selection_set(&self) -> Option<&SelectionSet> { match self { Selection::Field(field_selection) => field_selection.selection_set.as_ref(), Selection::FragmentSpread(_) => None, @@ -785,7 +794,7 @@ impl Selection { } fn sub_selection_type_position(&self) -> Option { - Some(self.try_selection_set()?.type_position.clone()) + Some(self.selection_set()?.type_position.clone()) } pub(crate) fn conditions(&self) -> Result { @@ -814,21 +823,6 @@ impl Selection { } } - pub(crate) fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { - match self { - Selection::Field(field) => field.collect_variables(variables), - Selection::InlineFragment(inline_fragment) => { - inline_fragment.collect_variables(variables) - } - Selection::FragmentSpread(_) => { - Err(FederationError::internal("unexpected fragment spread")) - } - } - } - pub(crate) fn has_defer(&self) -> bool { match self { Selection::Field(field_selection) => field_selection.has_defer(), @@ -919,7 +913,7 @@ impl Selection { &self, mapper: impl FnOnce(&SelectionSet) -> Result, FederationError>, ) -> Result { - if let Some(selection_set) = self.selection_set()? { + if let Some(selection_set) = self.selection_set() { self.with_updated_selection_set(mapper(selection_set)?) } else { // selection has no (sub-)selection set. @@ -1046,7 +1040,6 @@ impl Fragment { } mod field_selection { - use std::collections::HashSet; use std::hash::Hash; use std::hash::Hasher; use std::ops::Deref; @@ -1056,6 +1049,7 @@ mod field_selection { use apollo_compiler::executable; use apollo_compiler::Name; use apollo_compiler::Node; + use serde::Serialize; use crate::error::FederationError; use crate::operation::sort_arguments; @@ -1079,7 +1073,7 @@ mod field_selection { /// - For the field definition, stores the schema and the position in that schema instead of just /// the `FieldDefinition` (which contains no references to the parent type or schema). /// - Encloses collection types in `Arc`s to facilitate cheaper cloning. - #[derive(Debug, Clone, PartialEq, Eq)] + #[derive(Debug, Clone, PartialEq, Eq, Serialize)] pub(crate) struct FieldSelection { pub(crate) field: Field, pub(crate) selection_set: Option, @@ -1121,25 +1115,15 @@ mod field_selection { data.alias = Some(alias); Field::new(data) } - - pub(crate) fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { - self.field.collect_variables(variables); - if let Some(set) = &self.selection_set { - set.collect_variables(variables)? - } - Ok(()) - } } /// The non-selection-set data of `FieldSelection`, used with operation paths and graph /// paths. - #[derive(Clone)] + #[derive(Clone, Serialize)] pub(crate) struct Field { data: FieldData, key: SelectionKey, + #[serde(serialize_with = "crate::display_helpers::serialize_as_debug_string")] sorted_arguments: Arc>>, } @@ -1285,36 +1269,6 @@ mod field_selection { pub(crate) fn as_path_element(&self) -> FetchDataPathElement { FetchDataPathElement::Key(self.response_name()) } - - pub(crate) fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) { - for arg in self.arguments.iter() { - collect_variables_from_argument(arg, variables) - } - for dir in self.directives.iter() { - collect_variables_from_directive(dir, variables) - } - } - } - - pub(crate) fn collect_variables_from_argument<'selection>( - argument: &'selection executable::Argument, - variables: &mut HashSet<&'selection Name>, - ) { - if let Some(v) = argument.value.as_variable() { - variables.insert(v); - } - } - - pub(crate) fn collect_variables_from_directive<'selection>( - directive: &'selection executable::Directive, - variables: &mut HashSet<&'selection Name>, - ) { - for arg in directive.arguments.iter() { - collect_variables_from_argument(arg, variables) - } } impl HasSelectionKey for Field { @@ -1326,7 +1280,7 @@ mod field_selection { // SiblingTypename indicates how the sibling __typename field should be restored. // PORT_NOTE: The JS version used the empty string to indicate unaliased sibling typenames. // Here we use an enum to make the distinction explicit. - #[derive(Debug, Clone)] + #[derive(Debug, Clone, Serialize)] pub(crate) enum SiblingTypename { Unaliased, Aliased(Name), // the sibling __typename has been aliased @@ -1341,12 +1295,15 @@ mod field_selection { } } - #[derive(Debug, Clone)] + #[derive(Debug, Clone, Serialize)] pub(crate) struct FieldData { + #[serde(skip)] pub(crate) schema: ValidFederationSchema, pub(crate) field_position: FieldDefinitionPosition, pub(crate) alias: Option, + #[serde(serialize_with = "crate::display_helpers::serialize_as_debug_string")] pub(crate) arguments: Arc>>, + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub(crate) directives: Arc, pub(crate) sibling_typename: Option, } @@ -1417,6 +1374,7 @@ mod fragment_spread_selection { use apollo_compiler::executable; use apollo_compiler::Name; + use serde::Serialize; use crate::operation::is_deferred_selection; use crate::operation::sort_directives; @@ -1427,7 +1385,7 @@ mod fragment_spread_selection { use crate::schema::position::CompositeTypeDefinitionPosition; use crate::schema::ValidFederationSchema; - #[derive(Debug, Clone, PartialEq, Eq)] + #[derive(Debug, Clone, PartialEq, Eq, Serialize)] pub(crate) struct FragmentSpreadSelection { pub(crate) spread: FragmentSpread, pub(crate) selection_set: SelectionSet, @@ -1442,7 +1400,7 @@ mod fragment_spread_selection { /// An analogue of the apollo-compiler type `FragmentSpread` with these changes: /// - Stores the schema (may be useful for directives). /// - Encloses collection types in `Arc`s to facilitate cheaper cloning. - #[derive(Clone)] + #[derive(Clone, Serialize)] pub(crate) struct FragmentSpread { data: FragmentSpreadData, key: SelectionKey, @@ -1493,18 +1451,21 @@ mod fragment_spread_selection { } } - #[derive(Debug, Clone)] + #[derive(Debug, Clone, Serialize)] pub(crate) struct FragmentSpreadData { + #[serde(skip)] pub(crate) schema: ValidFederationSchema, pub(crate) fragment_name: Name, pub(crate) type_condition_position: CompositeTypeDefinitionPosition, // directives applied on the fragment spread selection + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub(crate) directives: Arc, // directives applied within the fragment definition // // PORT_NOTE: The JS codebase combined the fragment spread's directives with the fragment // definition's directives. This was invalid GraphQL as those directives may not be applicable // on different locations. While we now keep track of those references, they are currently ignored. + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub(crate) fragment_directives: Arc, pub(crate) selection_id: SelectionId, } @@ -1642,16 +1603,14 @@ impl FragmentSpreadData { } mod inline_fragment_selection { - use std::collections::HashSet; use std::hash::Hash; use std::hash::Hasher; use std::ops::Deref; use std::sync::Arc; use apollo_compiler::executable; - use apollo_compiler::Name; + use serde::Serialize; - use super::field_selection::collect_variables_from_directive; use crate::error::FederationError; use crate::link::graphql_definition::defer_directive_arguments; use crate::link::graphql_definition::DeferDirectiveArguments; @@ -1673,7 +1632,7 @@ mod inline_fragment_selection { /// - Stores the parent type explicitly, which means storing the position (in apollo-compiler, this /// is in the parent selection set). /// - Encloses collection types in `Arc`s to facilitate cheaper cloning. - #[derive(Debug, Clone, PartialEq, Eq)] + #[derive(Debug, Clone, PartialEq, Eq, Serialize)] pub(crate) struct InlineFragmentSelection { pub(crate) inline_fragment: InlineFragment, pub(crate) selection_set: SelectionSet, @@ -1696,14 +1655,6 @@ mod inline_fragment_selection { selection_set: self.selection_set.clone(), } } - - pub(crate) fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { - self.inline_fragment.collect_variables(variables); - self.selection_set.collect_variables(variables) - } } impl HasSelectionKey for InlineFragmentSelection { @@ -1714,7 +1665,7 @@ mod inline_fragment_selection { /// The non-selection-set data of `InlineFragmentSelection`, used with operation paths and /// graph paths. - #[derive(Clone)] + #[derive(Clone, Serialize)] pub(crate) struct InlineFragment { data: InlineFragmentData, key: SelectionKey, @@ -1792,15 +1743,6 @@ mod inline_fragment_selection { condition.type_name().clone(), )) } - - pub(crate) fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) { - for dir in self.data.directives.iter() { - collect_variables_from_directive(dir, variables) - } - } } impl HasSelectionKey for InlineFragment { @@ -1809,11 +1751,13 @@ mod inline_fragment_selection { } } - #[derive(Debug, Clone)] + #[derive(Debug, Clone, Serialize)] pub(crate) struct InlineFragmentData { + #[serde(skip)] pub(crate) schema: ValidFederationSchema, pub(crate) parent_type_position: CompositeTypeDefinitionPosition, pub(crate) type_condition_position: Option, + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub(crate) directives: Arc, pub(crate) selection_id: SelectionId, } @@ -1880,7 +1824,7 @@ where K: Eq + Hash, { fn new() -> Self { - Self(IndexMap::new()) + Self(IndexMap::default()) } fn insert(&mut self, key: K, value: V) { @@ -1942,8 +1886,6 @@ impl SelectionSet { }; Box::new( sel.selection_set() - .ok() - .flatten() .cloned() .into_iter() .flat_map(SelectionSet::split_top_level_fields) @@ -2013,7 +1955,7 @@ impl SelectionSet { type_position.type_name().clone(), source_text, )?; - let named_fragments = NamedFragments::new(&IndexMap::new(), &schema); + let named_fragments = NamedFragments::new(&IndexMap::default(), &schema); SelectionSet::from_selection_set(&selection_set, &named_fragments, &schema) } @@ -2192,9 +2134,9 @@ impl SelectionSet { &mut self, others: impl Iterator, ) -> Result<(), FederationError> { - let mut fields = IndexMap::new(); - let mut fragment_spreads = IndexMap::new(); - let mut inline_fragments = IndexMap::new(); + let mut fields = IndexMap::default(); + let mut fragment_spreads = IndexMap::default(); + let mut inline_fragments = IndexMap::default(); let target = Arc::make_mut(&mut self.selections); for other_selection in others { let other_key = other_selection.key(); @@ -2557,7 +2499,7 @@ impl SelectionSet { let mut sub_selection_updates: MultiIndexMap = MultiIndexMap::new(); for selection in [first, second].into_iter().chain(iter) { - if let Some(sub_selection_set) = selection.selection_set()? { + if let Some(sub_selection_set) = selection.selection_set() { sub_selection_updates.extend( sub_selection_set .selections @@ -2699,7 +2641,7 @@ impl SelectionSet { } } for selection in self.selections.values() { - selection_map.insert(if let Some(selection_set) = selection.selection_set()? { + selection_map.insert(if let Some(selection_set) = selection.selection_set() { let type_if_abstract = selection .sub_selection_type_position() .and_then(|ty| ty.try_into().ok()); @@ -3007,7 +2949,7 @@ impl SelectionSet { } }) .collect::>(); - let selection_set = selection.selection_set()?; + let selection_set = selection.selection_set(); let updated_selection_set = selection_set .map(|selection_set| selection_set.with_field_aliased(&subselection_aliases)) .transpose()?; @@ -3100,24 +3042,6 @@ impl SelectionSet { fields } - pub(crate) fn used_variables(&self) -> Result, FederationError> { - let mut variables = HashSet::new(); - self.collect_variables(&mut variables)?; - let mut res: Vec = variables.into_iter().cloned().collect(); - res.sort(); - Ok(res) - } - - pub(crate) fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { - for selection in self.selections.values() { - selection.collect_variables(variables)? - } - Ok(()) - } - pub(crate) fn validate( &self, _variable_definitions: &[Node], @@ -3126,7 +3050,7 @@ impl SelectionSet { Err(FederationError::internal("Invalid empty selection set")) } else { for selection in self.selections.values() { - if let Some(s) = selection.selection_set()? { + if let Some(s) = selection.selection_set() { s.validate(_variable_definitions)?; } } @@ -3934,7 +3858,7 @@ impl NamedFragments { // Note: We use IndexMap to stabilize the ordering of the result, which influences // the outcome of `map_to_expanded_selection_sets`. - let mut fragments_map: IndexMap = IndexMap::new(); + let mut fragments_map: IndexMap = IndexMap::default(); for fragment in fragments.values() { let mut fragment_usages: HashMap = HashMap::new(); NamedFragments::collect_fragment_usages(&fragment.selection_set, &mut fragment_usages); @@ -4003,11 +3927,11 @@ impl NamedFragments { /// want to consider to ignore the fragment for that subgraph, and that is when: /// 1. the subset that apply is actually empty. The fragment wouldn't be valid in this case anyway. /// 2. the subset is a single leaf field: in that case, using the one field directly is just shorter than using - /// the fragment, so we consider the fragment don't really apply to that subgraph. Technically, using the - /// fragment could still be of value if the fragment name is a lot smaller than the one field name, but it's - /// enough of a niche case that we ignore it. Note in particular that one sub-case of this rule that is likely - /// to be common is when the subset ends up being just `__typename`: this would basically mean the fragment - /// don't really apply to the subgraph, and that this will ensure this is the case. + /// the fragment, so we consider the fragment don't really apply to that subgraph. Technically, using the + /// fragment could still be of value if the fragment name is a lot smaller than the one field name, but it's + /// enough of a niche case that we ignore it. Note in particular that one sub-case of this rule that is likely + /// to be common is when the subset ends up being just `__typename`: this would basically mean the fragment + /// don't really apply to the subgraph, and that this will ensure this is the case. pub(crate) fn is_selection_set_worth_using(selection_set: &SelectionSet) -> bool { if selection_set.selections.len() == 0 { return false; @@ -4075,6 +3999,130 @@ impl RebasedFragments { } } +// Collect used variables from operation types. + +fn collect_variables_from_value<'selection>( + value: &'selection executable::Value, + variables: &mut HashSet<&'selection Name>, +) { + match value { + executable::Value::Variable(v) => { + variables.insert(v); + } + executable::Value::List(list) => { + for value in list { + collect_variables_from_value(value, variables); + } + } + executable::Value::Object(object) => { + for (_key, value) in object { + collect_variables_from_value(value, variables); + } + } + _ => {} + } +} + +fn collect_variables_from_directive<'selection>( + directive: &'selection executable::Directive, + variables: &mut HashSet<&'selection Name>, +) { + for arg in directive.arguments.iter() { + collect_variables_from_value(&arg.value, variables) + } +} + +impl Field { + fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { + for arg in self.arguments.iter() { + collect_variables_from_value(&arg.value, variables) + } + for dir in self.directives.iter() { + collect_variables_from_directive(dir, variables) + } + } +} + +impl FieldSelection { + /// # Errors + /// Returns an error if the selection contains a named fragment spread. + fn collect_variables<'selection>( + &'selection self, + variables: &mut HashSet<&'selection Name>, + ) -> Result<(), FederationError> { + self.field.collect_variables(variables); + if let Some(set) = &self.selection_set { + set.collect_variables(variables)? + } + Ok(()) + } +} + +impl InlineFragment { + fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { + for dir in self.directives.iter() { + collect_variables_from_directive(dir, variables) + } + } +} + +impl InlineFragmentSelection { + /// # Errors + /// Returns an error if the selection contains a named fragment spread. + fn collect_variables<'selection>( + &'selection self, + variables: &mut HashSet<&'selection Name>, + ) -> Result<(), FederationError> { + self.inline_fragment.collect_variables(variables); + self.selection_set.collect_variables(variables) + } +} + +impl Selection { + /// # Errors + /// Returns an error if the selection contains a named fragment spread. + fn collect_variables<'selection>( + &'selection self, + variables: &mut HashSet<&'selection Name>, + ) -> Result<(), FederationError> { + match self { + Selection::Field(field) => field.collect_variables(variables), + Selection::InlineFragment(inline_fragment) => { + inline_fragment.collect_variables(variables) + } + Selection::FragmentSpread(_) => Err(FederationError::internal( + "collect_variables(): unexpected fragment spread", + )), + } + } +} + +impl SelectionSet { + /// Returns the variable names that are used by this selection set. + /// + /// # Errors + /// Returns an error if the selection set contains a named fragment spread. + pub(crate) fn used_variables(&self) -> Result, FederationError> { + let mut variables = HashSet::new(); + self.collect_variables(&mut variables)?; + Ok(variables) + } + + /// # Errors + /// Returns an error if the selection set contains a named fragment spread. + fn collect_variables<'selection>( + &'selection self, + variables: &mut HashSet<&'selection Name>, + ) -> Result<(), FederationError> { + for selection in self.selections.values() { + selection.collect_variables(variables)? + } + Ok(()) + } +} + +// Conversion between apollo-rs and apollo-federation types. + impl TryFrom<&Operation> for executable::Operation { type Error = FederationError; @@ -4250,11 +4298,13 @@ impl TryFrom for Valid { let mut document = executable::ExecutableDocument::new(); document.fragments = fragments; - document.insert_operation(operation); + document.operations.insert(operation); Ok(document.validate(value.schema.schema())?) } } +// Display implementations for the operation types. + impl Display for Operation { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let operation: executable::Operation = match self.try_into() { diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index 36cd4a7bbe..12d1642158 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -194,7 +194,7 @@ impl Selection { /// empty). Otherwise, we have no diff. fn minus(&self, other: &Selection) -> Result, FederationError> { if let (Some(self_sub_selection), Some(other_sub_selection)) = - (self.selection_set()?, other.selection_set()?) + (self.selection_set(), other.selection_set()) { let diff = self_sub_selection.minus(other_sub_selection)?; if !diff.is_empty() { @@ -215,7 +215,7 @@ impl Selection { /// - Otherwise, the intersection is same as `self`. fn intersection(&self, other: &Selection) -> Result, FederationError> { if let (Some(self_sub_selection), Some(other_sub_selection)) = - (self.selection_set()?, other.selection_set()?) + (self.selection_set(), other.selection_set()) { let common = self_sub_selection.intersection(other_sub_selection)?; if !common.is_empty() { @@ -1394,7 +1394,7 @@ impl SelectionSet { self.iter().any(|selection| { matches!(selection, Selection::FragmentSpread(_)) || selection - .try_selection_set() + .selection_set() .map(|subselection| subselection.contains_fragment_spread()) .unwrap_or(false) }) @@ -1536,6 +1536,8 @@ impl Operation { #[cfg(test)] mod tests { + use apollo_compiler::ExecutableDocument; + use super::*; use crate::operation::tests::*; @@ -3092,6 +3094,60 @@ mod tests { "###); } + #[test] + fn reuse_fragments_with_directive_on_typename() { + let schema = r#" + type Query { + t1: T + t2: T + t3: T + } + + type T { + a: Int + b: Int + c: Int + d: Int + } + "#; + let query = r#" + query A ($if: Boolean!) { + t1 { b a ...x } + t2 { ...x } + } + query B { + # Because this inline fragment is exactly the same shape as `x`, + # except for a `__typename` field, it may be tempting to reuse it. + # But `x.__typename` has a directive with a variable, and this query + # does not have that variable declared, so it can't be used. + t3 { ... on T { a c } } + } + fragment x on T { + __typename @include(if: $if) + a + c + } + "#; + let schema = parse_schema(schema); + let query = ExecutableDocument::parse_and_validate(schema.schema(), query, "query.graphql") + .unwrap(); + + let operation_a = + Operation::from_operation_document(schema.clone(), &query, Some("A")).unwrap(); + let operation_b = + Operation::from_operation_document(schema.clone(), &query, Some("B")).unwrap(); + let expanded_b = operation_b.expand_all_fragments_and_normalize().unwrap(); + + assert_optimized!(expanded_b, operation_a.named_fragments, @r###" + query B { + t3 { + a + c + } + } + "###); + } + /// /// empty branches removal /// diff --git a/apollo-federation/src/operation/rebase.rs b/apollo-federation/src/operation/rebase.rs index 1c9da5dc0d..858d931b3f 100644 --- a/apollo-federation/src/operation/rebase.rs +++ b/apollo-federation/src/operation/rebase.rs @@ -217,12 +217,12 @@ impl Field { /// /// There are 2 valid cases we want to allow: /// 1. either `parent_type` and `field_parent_type` are the same underlying type (same name) but from different underlying schema. Typically, - /// happens when we're building subgraph queries but using selections from the original query which is against the supergraph API schema. + /// happens when we're building subgraph queries but using selections from the original query which is against the supergraph API schema. /// 2. or they are not the same underlying type, but the field parent type is from an interface (or an interface object, which is the same - /// here), in which case we may be rebasing an interface field on one of the implementation type, which is ok. Note that we don't verify - /// that `parent_type` is indeed an implementation of `field_parent_type` because it's possible that this implementation relationship exists - /// in the supergraph, but not in any of the subgraph schema involved here. So we just let it be. Not that `rebase_on` will complain anyway - /// if the field name simply does not exist in `parent_type`. + /// here), in which case we may be rebasing an interface field on one of the implementation type, which is ok. Note that we don't verify + /// that `parent_type` is indeed an implementation of `field_parent_type` because it's possible that this implementation relationship exists + /// in the supergraph, but not in any of the subgraph schema involved here. So we just let it be. Not that `rebase_on` will complain anyway + /// if the field name simply does not exist in `parent_type`. fn can_rebase_on( &self, parent_type: &CompositeTypeDefinitionPosition, @@ -579,6 +579,7 @@ impl InlineFragment { .into()) } else { let mut rebased_fragment_data = self.data().clone(); + rebased_fragment_data.parent_type_position = parent_type.clone(); rebased_fragment_data.type_condition_position = rebased_condition; rebased_fragment_data.schema = schema.clone(); Ok(InlineFragment::new(rebased_fragment_data)) @@ -836,8 +837,8 @@ impl NamedFragments { #[cfg(test)] mod tests { + use apollo_compiler::collections::IndexSet; use apollo_compiler::name; - use indexmap::IndexSet; use crate::operation::normalize_operation; use crate::operation::tests::parse_schema_and_operation; @@ -893,12 +894,12 @@ type U { "operation should have some fragments" ); - if let Some(operation) = executable_document.named_operations.get_mut("TestQuery") { + if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); @@ -979,12 +980,12 @@ type U { ); assert_eq!(2, executable_document.fragments.len()); - if let Some(operation) = executable_document.named_operations.get_mut("TestQuery") { + if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); @@ -1061,12 +1062,12 @@ type T2 implements I { "operation should have some fragments" ); - if let Some(operation) = executable_document.named_operations.get_mut("TestQuery") { + if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); @@ -1138,8 +1139,9 @@ type T implements I { "operation should have some fragments" ); - if let Some(operation) = executable_document.named_operations.get_mut("TestQuery") { - let mut interface_objects: IndexSet = IndexSet::new(); + if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { + let mut interface_objects: IndexSet = + IndexSet::default(); interface_objects.insert(InterfaceTypeDefinitionPosition { type_name: name!("I"), }); @@ -1236,12 +1238,12 @@ type T { "operation should have some fragments" ); - if let Some(operation) = executable_document.named_operations.get_mut("TestQuery") { + if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); @@ -1314,12 +1316,12 @@ type U { "operation should have some fragments" ); - if let Some(operation) = executable_document.named_operations.get_mut("TestQuery") { + if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); @@ -1387,12 +1389,12 @@ type T implements I { "operation should have some fragments" ); - if let Some(operation) = executable_document.named_operations.get_mut("TestQuery") { + if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); diff --git a/apollo-federation/src/operation/tests/mod.rs b/apollo-federation/src/operation/tests/mod.rs index aac5dabbb6..b2081d0bcc 100644 --- a/apollo-federation/src/operation/tests/mod.rs +++ b/apollo-federation/src/operation/tests/mod.rs @@ -1,9 +1,9 @@ use std::sync::Arc; +use apollo_compiler::collections::IndexSet; use apollo_compiler::name; use apollo_compiler::schema::Schema; use apollo_compiler::ExecutableDocument; -use indexmap::IndexSet; use super::normalize_operation; use super::Name; @@ -46,7 +46,7 @@ pub(super) fn parse_operation(schema: &ValidFederationSchema, query: &str) -> Op "query.graphql", ) .unwrap(); - let operation = executable_document.get_operation(None).unwrap(); + let operation = executable_document.operations.get(None).unwrap(); let named_fragments = NamedFragments::new(&executable_document.fragments, schema); let selection_set = SelectionSet::from_selection_set(&operation.selection_set, &named_fragments, schema) @@ -102,14 +102,15 @@ type Foo { let (schema, mut executable_document) = parse_schema_and_operation(operation_with_named_fragment); if let Some(operation) = executable_document - .named_operations + .operations + .named .get_mut("NamedFragmentQuery") { let mut normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); normalized_operation.named_fragments = Default::default(); @@ -158,12 +159,12 @@ type Foo { "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_with_named_fragment); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let mut normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); normalized_operation.named_fragments = Default::default(); @@ -197,14 +198,15 @@ type Query { let (schema, mut executable_document) = parse_schema_and_operation(operation_with_introspection); if let Some(operation) = executable_document - .named_operations + .operations + .named .get_mut("TestIntrospectionQuery") { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); @@ -234,12 +236,12 @@ type T { } "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_string); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test { @@ -277,12 +279,12 @@ type T { } "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_with_directives); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test($skipIf: Boolean!) { @@ -323,12 +325,12 @@ type T { "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_with_directives_different_arg_order); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test($skipIf: Boolean!) { @@ -367,12 +369,12 @@ type T { "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_one_field_with_directives); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test($skipIf: Boolean!) { @@ -413,12 +415,12 @@ type T { "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_different_directives); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test($skip1: Boolean!, $skip2: Boolean!) { @@ -464,12 +466,12 @@ type T { } "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_defer_fields); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test { @@ -530,12 +532,12 @@ type V { } "#; let (schema, mut executable_document) = parse_schema_and_operation(nested_operation); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test { @@ -589,12 +591,12 @@ type T { } "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_with_fragments); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test { @@ -635,12 +637,12 @@ type T { "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments_with_directives); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test($skipIf: Boolean!) { @@ -685,12 +687,12 @@ type T { "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments_with_directives_args_order); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test($skipIf: Boolean!) { @@ -733,12 +735,12 @@ type T { "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_one_fragment_with_directive); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test($skipIf: Boolean!) { @@ -781,12 +783,12 @@ type T { "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments_with_different_directive); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test($skip1: Boolean!, $skip2: Boolean!) { @@ -833,12 +835,12 @@ type T { "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments_with_defer); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test { @@ -901,12 +903,12 @@ type V { } "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_nested_fragments); - if let Some((_, operation)) = executable_document.named_operations.first_mut() { + if let Some((_, operation)) = executable_document.operations.named.first_mut() { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query Test { @@ -947,12 +949,12 @@ type Foo { } "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_with_typename); - if let Some(operation) = executable_document.named_operations.get_mut("TestQuery") { + if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query TestQuery { @@ -986,12 +988,12 @@ type Foo { "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_with_single_typename); - if let Some(operation) = executable_document.named_operations.get_mut("TestQuery") { + if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { let normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); let expected = r#"query TestQuery { @@ -1031,8 +1033,8 @@ scalar FieldSet "#; let (schema, mut executable_document) = parse_schema_and_operation(operation_with_intf_object_typename); - if let Some(operation) = executable_document.named_operations.get_mut("TestQuery") { - let mut interface_objects: IndexSet = IndexSet::new(); + if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { + let mut interface_objects: IndexSet = IndexSet::default(); interface_objects.insert(InterfaceTypeDefinitionPosition { type_name: name!("Foo"), }); @@ -1147,7 +1149,7 @@ fn get_value_at_path<'a>(ss: &'a SelectionSet, path: &[Name]) -> Option<&'a Sele Some(value) } else { // Recursive case - match value.selection_set().unwrap() { + match value.selection_set() { None => None, // Error: Sub-selection expected, but not found. Some(ss) => get_value_at_path(ss, rest), } @@ -1184,7 +1186,7 @@ mod make_selection_tests { fn test_make_selection_order() { let (schema, executable_document) = parse_schema_and_operation(SAMPLE_OPERATION_DOC); let normalized_operation = normalize_operation( - executable_document.get_operation(None).unwrap(), + executable_document.operations.get(None).unwrap(), Default::default(), &schema, &Default::default(), @@ -1197,7 +1199,7 @@ mod make_selection_tests { // Create a new foo with a different selection order using `make_selection`. let clone_selection_at_path = |base: &Selection, path: &[Name]| { - let base_selection_set = base.selection_set().unwrap().unwrap(); + let base_selection_set = base.selection_set().unwrap(); let selection = get_value_at_path(base_selection_set, path).expect("path should exist"); let subselections = SelectionSet::from_selection( base_selection_set.type_position.clone(), @@ -1235,7 +1237,7 @@ mod lazy_map_tests { if !pred(s) { return Ok(SelectionMapperReturn::None); } - match s.selection_set()? { + match s.selection_set() { // Base case: leaf field None => Ok(s.clone().into()), @@ -1283,7 +1285,7 @@ mod lazy_map_tests { fn test_lazy_map() { let (schema, executable_document) = parse_schema_and_operation(SAMPLE_OPERATION_DOC); let normalized_operation = normalize_operation( - executable_document.get_operation(None).unwrap(), + executable_document.operations.get(None).unwrap(), Default::default(), &schema, &Default::default(), @@ -1341,7 +1343,7 @@ mod lazy_map_tests { fn test_lazy_map2() { let (schema, executable_document) = parse_schema_and_operation(SAMPLE_OPERATION_DOC); let normalized_operation = normalize_operation( - executable_document.get_operation(None).unwrap(), + executable_document.operations.get(None).unwrap(), Default::default(), &schema, &Default::default(), @@ -1551,12 +1553,12 @@ fn test_expand_all_fragments1() { } "#; let (schema, executable_document) = parse_schema_and_operation(operation_with_named_fragment); - if let Ok(operation) = executable_document.get_operation(None) { + if let Ok(operation) = executable_document.operations.get(None) { let mut normalized_operation = normalize_operation( operation, NamedFragments::new(&executable_document.fragments, &schema), &schema, - &IndexSet::new(), + &IndexSet::default(), ) .unwrap(); normalized_operation.named_fragments = Default::default(); @@ -1576,3 +1578,64 @@ fn test_expand_all_fragments1() { "###); } } + +#[test] +fn used_variables() { + let schema = r#" + input Ints { a: Int } + input LInts { a: [Int], b: LInts } + type Query { + f(ints: [Int]): Int + g(ints: Ints): Int + h(ints: LInts): Int + subquery: Query + } + "#; + let query = r#" + query ($a: Int, $b: Int, $c: Int, $d: Int) { + f(ints: [1, $a, 2]) + g(ints: { a: $b }) + subquery { + h(ints: { + b: { + a: [$d, $d] + b: { + a: [$c, 3, 4] + } + } + }) + } + } + "#; + + let valid = parse_schema(schema); + let operation = Operation::parse(valid, query, "used_variables.graphql", None).unwrap(); + + let mut variables = operation + .selection_set + .used_variables() + .unwrap() + .into_iter() + .collect::>(); + variables.sort(); + assert_eq!(variables, ["a", "b", "c", "d"]); + + let Selection::Field(subquery) = operation + .selection_set + .selections + .get(&SelectionKey::field_name("subquery")) + .unwrap() + else { + unreachable!(); + }; + let mut variables = subquery + .selection_set + .as_ref() + .unwrap() + .used_variables() + .unwrap() + .into_iter() + .collect::>(); + variables.sort(); + assert_eq!(variables, ["c", "d"], "works for a subset of the query"); +} diff --git a/apollo-federation/src/query_graph/build_query_graph.rs b/apollo-federation/src/query_graph/build_query_graph.rs index 972a2046a0..929e4b8d0e 100644 --- a/apollo-federation/src/query_graph/build_query_graph.rs +++ b/apollo-federation/src/query_graph/build_query_graph.rs @@ -1,12 +1,12 @@ use std::sync::Arc; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::schema::DirectiveList as ComponentDirectiveList; use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Schema; -use indexmap::IndexMap; -use indexmap::IndexSet; use petgraph::graph::EdgeIndex; use petgraph::graph::NodeIndex; use petgraph::visit::EdgeRef; @@ -116,10 +116,10 @@ impl BaseQueryGraphBuilder { query_graph.sources.insert(source.clone(), schema); query_graph .types_to_nodes_by_source - .insert(source.clone(), IndexMap::new()); + .insert(source.clone(), IndexMap::default()); query_graph .root_kinds_to_nodes_by_source - .insert(source.clone(), IndexMap::new()); + .insert(source.clone(), IndexMap::default()); Self { query_graph } } @@ -196,7 +196,7 @@ impl BaseQueryGraphBuilder { self.query_graph .types_to_nodes_mut()? .entry(pos.type_name().clone()) - .or_insert_with(IndexSet::new) + .or_default() .insert(node); } Ok(node) @@ -291,7 +291,7 @@ impl SchemaQueryGraphBuilder { if let Some(subgraph_metadata) = self.base.query_graph.schema()?.subgraph_metadata() { Ok(subgraph_metadata .external_metadata() - .is_external(field_definition_position)?) + .is_external(field_definition_position)) } else { Ok(false) } @@ -1007,7 +1007,7 @@ impl FederatedQueryGraphBuilder { } fn add_federated_root_nodes(&mut self) -> Result<(), FederationError> { - let mut root_kinds = IndexSet::new(); + let mut root_kinds = IndexSet::default(); for (source, root_kinds_to_nodes) in &self.base.query_graph.root_kinds_to_nodes_by_source { if *source == self.base.query_graph.current_source { continue; @@ -1023,15 +1023,15 @@ impl FederatedQueryGraphBuilder { } fn copy_types_to_nodes(&mut self) -> Result<(), FederationError> { - let mut federated_type_to_nodes = IndexMap::new(); + let mut federated_type_to_nodes = IndexMap::default(); for (source, types_to_nodes) in &self.base.query_graph.types_to_nodes_by_source { if *source == self.base.query_graph.current_source { continue; } for (type_name, nodes) in types_to_nodes { - let federated_nodes = federated_type_to_nodes + let federated_nodes: &mut IndexSet<_> = federated_type_to_nodes .entry(type_name.clone()) - .or_insert_with(IndexSet::new); + .or_default(); for node in nodes { federated_nodes.insert(*node); } @@ -1867,7 +1867,7 @@ impl FederatedQueryGraphBuilder { for edge in self.base.query_graph.graph.edge_indices() { let edge_weight = self.base.query_graph.edge_weight(edge)?; let (_, tail) = self.base.query_graph.edge_endpoints(edge)?; - let mut non_trivial_followups = IndexSet::new(); + let mut non_trivial_followups = IndexSet::default(); for followup_edge_ref in self .base .query_graph @@ -1953,7 +1953,7 @@ struct FederatedQueryGraphBuilderSubgraphs { impl FederatedQueryGraphBuilderSubgraphs { fn new(base: &BaseQueryGraphBuilder) -> Result { let mut subgraphs = FederatedQueryGraphBuilderSubgraphs { - map: IndexMap::new(), + map: IndexMap::default(), }; for (source, schema) in &base.query_graph.sources { if *source == base.query_graph.current_source { @@ -2053,11 +2053,11 @@ fn resolvable_key_applications<'doc>( #[cfg(test)] mod tests { + use apollo_compiler::collections::IndexMap; + use apollo_compiler::collections::IndexSet; use apollo_compiler::name; use apollo_compiler::Name; use apollo_compiler::Schema; - use indexmap::IndexMap; - use indexmap::IndexSet; use petgraph::graph::NodeIndex; use petgraph::visit::EdgeRef; use petgraph::Direction; @@ -2107,7 +2107,7 @@ mod tests { head: NodeIndex, field_names: IndexSet, ) -> Result, FederationError> { - let mut result = IndexMap::new(); + let mut result = IndexMap::default(); for field_name in field_names { // PORT_NOTE: In the JS codebase, there were a lot of asserts here, but they were all // duplicated with single_edge() (or they tested the JS codebase's graph representation, @@ -2181,7 +2181,7 @@ mod tests { .keys() .cloned() .collect::>(), - IndexSet::from([SchemaRootDefinitionKind::Query]) + IndexSet::from_iter([SchemaRootDefinitionKind::Query]) ); let root_node = query_graph @@ -2207,7 +2207,7 @@ mod tests { let root_fields = named_edges( &query_graph, *root_node, - IndexSet::from([name!("__typename"), name!("t1")]), + IndexSet::from_iter([name!("__typename"), name!("t1")]), )?; let root_typename_tail = root_fields.get("__typename").unwrap(); @@ -2241,7 +2241,7 @@ mod tests { let t1_fields = named_edges( &query_graph, *t1_node, - IndexSet::from([name!("__typename"), name!("f1"), name!("f2"), name!("f3")]), + IndexSet::from_iter([name!("__typename"), name!("f1"), name!("f2"), name!("f3")]), )?; let t1_typename_tail = t1_fields.get("__typename").unwrap(); @@ -2311,7 +2311,7 @@ mod tests { let t2_fields = named_edges( &query_graph, *t2_node, - IndexSet::from([name!("__typename"), name!("t")]), + IndexSet::from_iter([name!("__typename"), name!("t")]), )?; let t2_typename_tail = t2_fields.get("__typename").unwrap(); diff --git a/apollo-federation/src/query_graph/condition_resolver.rs b/apollo-federation/src/query_graph/condition_resolver.rs index 0b53423604..02f709c618 100644 --- a/apollo-federation/src/query_graph/condition_resolver.rs +++ b/apollo-federation/src/query_graph/condition_resolver.rs @@ -3,7 +3,7 @@ // trait directly using `ConditionResolverCache`. use std::sync::Arc; -use indexmap::IndexMap; +use apollo_compiler::collections::IndexMap; use petgraph::graph::EdgeIndex; use crate::error::FederationError; diff --git a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs index 9a4f6f72f0..ef354eaef1 100644 --- a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs +++ b/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs @@ -5,6 +5,8 @@ use std::ops::Deref; use std::sync::Arc; use apollo_compiler::ast::FieldDefinition; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::executable; use apollo_compiler::name; use apollo_compiler::schema::Component; @@ -29,8 +31,6 @@ use apollo_compiler::schema::UnionType; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Node; -use indexmap::IndexMap; -use indexmap::IndexSet; use lazy_static::lazy_static; use time::OffsetDateTime; @@ -179,8 +179,8 @@ fn collect_empty_subgraphs( let graph_directive_definition = join_spec_definition.graph_directive_definition(supergraph_schema)?; let graph_enum = join_spec_definition.graph_enum_definition(supergraph_schema)?; - let mut federation_spec_definitions = IndexMap::new(); - let mut graph_enum_value_name_to_subgraph_name = IndexMap::new(); + let mut federation_spec_definitions = IndexMap::default(); + let mut graph_enum_value_name_to_subgraph_name = IndexMap::default(); for (enum_value_name, enum_value_definition) in graph_enum.values.iter() { let graph_application = enum_value_definition .directives @@ -505,7 +505,7 @@ fn add_empty_type( } let mut type_info = TypeInfo { name: type_definition_position.type_name().clone(), - subgraph_info: IndexMap::new(), + subgraph_info: IndexMap::default(), }; for type_directive_application in type_directive_applications { let subgraph = get_subgraph( @@ -1416,12 +1416,7 @@ fn add_subgraph_input_field( /// Parse a string encoding a type reference. fn decode_type(type_: &str) -> Result { - Type::parse(type_, "").map_err(|_| { - SingleFederationError::InvalidGraphQL { - message: format!("Cannot parse type \"{}\"", type_), - } - .into() - }) + Ok(Type::parse(type_, "")?) } fn get_subgraph<'subgraph>( @@ -1549,7 +1544,7 @@ impl IntoIterator for ValidFederationSubgraphs { lazy_static! { static ref EXECUTABLE_DIRECTIVE_LOCATIONS: IndexSet = { - IndexSet::from([ + [ DirectiveLocation::Query, DirectiveLocation::Mutation, DirectiveLocation::Subscription, @@ -1558,7 +1553,9 @@ lazy_static! { DirectiveLocation::FragmentSpread, DirectiveLocation::InlineFragment, DirectiveLocation::VariableDefinition, - ]) + ] + .into_iter() + .collect() }; } diff --git a/apollo-federation/src/query_graph/graph_path.rs b/apollo-federation/src/query_graph/graph_path.rs index 7be5026d17..b65736311d 100644 --- a/apollo-federation/src/query_graph/graph_path.rs +++ b/apollo-federation/src/query_graph/graph_path.rs @@ -9,16 +9,21 @@ use std::sync::atomic; use std::sync::Arc; use apollo_compiler::ast::Value; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::executable::DirectiveList; -use indexmap::IndexMap; -use indexmap::IndexSet; +use itertools::Itertools; use petgraph::graph::EdgeIndex; use petgraph::graph::NodeIndex; use petgraph::visit::EdgeRef; +use tracing::debug; +use tracing::debug_span; +use crate::display_helpers::write_indented_lines; +use crate::display_helpers::DisplayOption; +use crate::display_helpers::DisplaySlice; +use crate::display_helpers::State as IndentedFormatter; use crate::error::FederationError; -use crate::indented_display::write_indented_lines; -use crate::indented_display::State as IndentedFormatter; use crate::is_leaf_type; use crate::link::federation_spec_definition::get_federation_spec_definition_from_subgraph; use crate::link::graphql_definition::BooleanOrVariable; @@ -94,7 +99,7 @@ use crate::schema::ValidFederationSchema; // in the Rust code we don't have a distinguished type for that case. We instead check this at // runtime (at the callsites that require root nodes). This means the `RootPath` type in the // JS codebase is replaced with this one. -#[derive(Clone)] +#[derive(Clone, serde::Serialize)] pub(crate) struct GraphPath where TTrigger: Eq + Hash, @@ -103,6 +108,7 @@ where EdgeIndex: Into, { /// The query graph of which this is a path. + #[serde(skip)] graph: Arc, /// The node at which the path starts. This should be the head of the first non-`None` edge in /// the path if such edge exists, but if there are only `None` edges (or if there are zero @@ -133,9 +139,13 @@ where /// to be confused with the `@override` directive, which is completely separate). /// /// This array stores the IDs associated with this path. + // TODO: There is a note of OverrideId to not add a (de)serialize derive. Once that is + // addressed, remove this skip + #[serde(skip)] own_path_ids: Arc>, /// This array stores the IDs of paths that override this one. (See docs for `own_path_ids` for /// more info). + #[serde(skip)] overriding_path_ids: Arc>, /// Names of all the possible runtime types the tail of the path can be. runtime_types_of_tail: Arc>, @@ -144,6 +154,8 @@ where runtime_types_before_tail_if_last_is_cast: Option>>, /// If the trigger of the last edge in the `edges` array was an operation element with a /// `@defer` application, then the arguments of that application. + // TODO(@TylerBloom): Add in once defer is supported. + #[serde(skip)] defer_on_tail: Option, } @@ -201,7 +213,7 @@ pub(crate) enum GraphPathTrigger { Transition(Arc), } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, serde::Serialize)] pub(crate) struct SubgraphEnteringEdgeInfo { /// The index within the `edges` array. index: usize, @@ -238,7 +250,7 @@ pub(crate) type GraphPathItem<'path, TTrigger, TEdge> = // codebase is replaced with this one. pub(crate) type OpGraphPath = GraphPath>; -#[derive(Debug, Clone, PartialEq, Eq, Hash, derive_more::From)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, derive_more::From, serde::Serialize)] pub(crate) enum OpGraphPathTrigger { OpPathElement(OpPathElement), Context(OpGraphPathContext), @@ -254,7 +266,7 @@ impl Display for OpGraphPathTrigger { } /// A path of operation elements within a GraphQL operation. -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, serde::Serialize)] pub(crate) struct OpPath(pub(crate) Vec>); impl Deref for OpPath { @@ -279,7 +291,7 @@ impl std::fmt::Display for OpPath { } } -#[derive(Debug, Clone, PartialEq, Eq, Hash, derive_more::From)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, derive_more::From, serde::Serialize)] pub(crate) enum OpPathElement { Field(Field), InlineFragment(InlineFragment), @@ -471,7 +483,7 @@ impl From for OpGraphPathTrigger { /// Records, as we walk a path within a GraphQL operation, important directives encountered /// (currently `@include` and `@skip` with their conditions). -#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Default, serde::Serialize)] pub(crate) struct OpGraphPathContext { /// A list of conditionals (e.g. `[{ kind: Include, value: true}, { kind: Skip, value: $foo }]`) /// in the reverse order in which they were applied (so the first element is the inner-most @@ -523,7 +535,7 @@ impl Display for OpGraphPathContext { /// for this by splitting a path into multiple paths (one for each possible outcome). The common /// example is abstract types, where we may end up taking a different edge depending on the runtime /// type (e.g. during type explosion). -#[derive(Clone)] +#[derive(Clone, serde::Serialize)] pub(crate) struct SimultaneousPaths(pub(crate) Vec>); impl SimultaneousPaths { @@ -562,7 +574,7 @@ impl std::fmt::Display for SimultaneousPaths { // PORT_NOTE: The JS codebase stored a `ConditionResolver` callback here, but it was the same for // a given traversal (and cached resolution across the traversal), so we accordingly store it in // `QueryPlanTraversal` and pass it down when needed instead. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, serde::Serialize)] pub(crate) struct SimultaneousPathsWithLazyIndirectPaths { pub(crate) paths: SimultaneousPaths, pub(crate) context: OpGraphPathContext, @@ -576,7 +588,7 @@ pub(crate) struct SimultaneousPathsWithLazyIndirectPaths { /// basically always be tiny (it's bounded by the number of distinct key on a given type, so usually /// 2-3 max; even in completely unrealistic cases, it's hard bounded by the number of subgraphs), so /// a `Vec` is going to perform a lot better than `IndexSet` in practice. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, serde::Serialize)] pub(crate) struct ExcludedDestinations(Arc>>); impl ExcludedDestinations { @@ -610,7 +622,7 @@ impl Default for ExcludedDestinations { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, serde::Serialize)] pub(crate) struct ExcludedConditions(Arc>>); impl ExcludedConditions { @@ -639,7 +651,7 @@ impl Default for ExcludedConditions { } } -#[derive(Clone)] +#[derive(Clone, serde::Serialize)] pub(crate) struct IndirectPaths where TTrigger: Eq + Hash, @@ -714,7 +726,7 @@ impl OpIndirectPaths { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, serde::Serialize)] struct Unadvanceables(Vec); impl Display for Unadvanceables { @@ -732,7 +744,7 @@ impl Display for Unadvanceables { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, serde::Serialize)] struct Unadvanceable { reason: UnadvanceableReason, from_subgraph: Arc, @@ -750,7 +762,7 @@ impl Display for Unadvanceable { } } -#[derive(Debug, Clone, strum_macros::Display)] +#[derive(Debug, Clone, strum_macros::Display, serde::Serialize)] enum UnadvanceableReason { UnsatisfiableKeyCondition, UnsatisfiableRequiresCondition, @@ -766,7 +778,7 @@ enum UnadvanceableReason { /// set, and the `SimultaneousPaths` ends at the node at which that query is made instead of a node /// for the leaf field. The selection set gets copied "as-is" into the `FetchNode`, and also avoids /// extra `GraphPath` creation and work during `PathTree` merging. -#[derive(Debug)] +#[derive(Debug, serde::Serialize)] pub(crate) struct ClosedPath { pub(crate) paths: SimultaneousPaths, pub(crate) selection_set: Option>, @@ -795,19 +807,19 @@ impl std::fmt::Display for ClosedPath { /// A list of the options generated during query planning for a specific "closed branch", which is a /// full/closed path in a GraphQL operation (i.e. one that ends in a leaf field). -#[derive(Debug)] +#[derive(Debug, serde::Serialize)] pub(crate) struct ClosedBranch(pub(crate) Vec>); /// A list of the options generated during query planning for a specific "open branch", which is a /// partial/open path in a GraphQL operation (i.e. one that does not end in a leaf field). -#[derive(Debug)] +#[derive(Debug, serde::Serialize)] pub(crate) struct OpenBranch(pub(crate) Vec); impl GraphPath where - TTrigger: Eq + Hash, + TTrigger: Eq + Hash + std::fmt::Debug, Arc: Into, - TEdge: Copy + Into>, + TEdge: Copy + Into> + std::fmt::Debug, EdgeIndex: Into, { pub(crate) fn new(graph: Arc, head: NodeIndex) -> Result { @@ -819,9 +831,9 @@ where edge_triggers: vec![], edge_conditions: vec![], last_subgraph_entering_edge_info: None, - own_path_ids: Arc::new(IndexSet::new()), - overriding_path_ids: Arc::new(IndexSet::new()), - runtime_types_of_tail: Arc::new(IndexSet::new()), + own_path_ids: Arc::new(IndexSet::default()), + overriding_path_ids: Arc::new(IndexSet::default()), + runtime_types_of_tail: Arc::new(IndexSet::default()), runtime_types_before_tail_if_last_is_cast: None, defer_on_tail: None, }; @@ -841,7 +853,7 @@ where .schema_by_source(&head_weight.source)? .possible_runtime_types(head_type_pos)? } - QueryGraphNodeType::FederatedRootType(_) => IndexSet::new(), + QueryGraphNodeType::FederatedRootType(_) => IndexSet::default(), }) } @@ -974,6 +986,7 @@ where if !new_runtime_types_of_tail.is_empty() && new_runtime_types_of_tail.is_subset(&self.runtime_types_of_tail) { + debug!("Previous cast {last_operation_element:?} is made obsolete by new cast {trigger:?}, removing from path."); // Note that `edge` starts at the node we wish to eliminate from the // path. So we need to replace it with the edge going directly from // the previous node to the new tail for this path. @@ -1309,6 +1322,10 @@ where == Some(self.edges.len() - 2)) } + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(skip_all, level = "trace", name = "GraphPath::can_satisfy_conditions") + )] fn can_satisfy_conditions( &self, edge: EdgeIndex, @@ -1321,6 +1338,7 @@ where if edge_weight.conditions.is_none() { return Ok(ConditionResolution::no_conditions()); } + debug_span!("Checking conditions {conditions} on edge {edge_weight}"); let resolution = condition_resolver.resolve( edge, context, @@ -1347,8 +1365,12 @@ where true }; if in_same_subgraph { + debug!( + "@requires conditions are satisfied, but validating post-require key." + ); let (edge_head, _) = self.graph.edge_endpoints(edge)?; if self.graph.get_locally_satisfiable_key(edge_head)?.is_none() { + debug!("Post-require conditions cannot be satisfied"); return Ok(ConditionResolution::Unsatisfied { reason: Some(UnsatisfiedConditionReason::NoPostRequireKey), }); @@ -1373,6 +1395,7 @@ where } } } + debug!("Condition resolution: {resolution:?}"); Ok(resolution) } @@ -1380,6 +1403,10 @@ where // composition, but we'll need to port that code when we port composition. // PORT_NOTE: In the JS codebase, this was named // `advancePathWithNonCollectingAndTypePreservingTransitions`. + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(skip_all, level = "trace") + )] fn advance_with_non_collecting_and_type_preserving_transitions( self: &Arc, context: &OpGraphPathContext, @@ -1421,7 +1448,7 @@ where type BestPathInfo = Option<(Arc>, QueryPlanCost)>; let mut best_path_by_source: IndexMap, BestPathInfo> = - IndexMap::new(); + IndexMap::default(); let dead_ends = vec![]; // Note that through `excluded` we avoid taking the same edge from multiple options. But // that means it's important we try the smallest paths first. That is, if we could in theory @@ -1429,8 +1456,13 @@ where // not A -> C -> B -> D. let mut heap: BinaryHeap> = BinaryHeap::new(); heap.push(HeapElement(self.clone())); + while let Some(HeapElement(to_advance)) = heap.pop() { + let span = debug_span!("From {to_advance:?}"); + let _guard = span.enter(); for edge in to_advance.next_edges()? { + let span = debug_span!("Testing edge {edge:?}"); + let _guard = span.enter(); let edge_weight = self.graph.edge_weight(edge)?; if edge_weight.transition.collect_operation_elements() { continue; @@ -1439,6 +1471,7 @@ where let edge_tail_weight = self.graph.node_weight(edge_tail)?; if excluded_destinations.is_excluded(&edge_tail_weight.source) { + debug!("Ignored: edge is excluded"); continue; } @@ -1448,6 +1481,7 @@ where // re-entering the current subgraph is actually useful. if edge_tail_weight.source == original_source && to_advance.defer_on_tail.is_none() { + debug!("Ignored: edge get us back to our original source"); continue; } @@ -1463,13 +1497,17 @@ where && !(to_advance.defer_on_tail.is_some() && self.graph.is_self_key_or_root_edge(edge)?) { + debug!(r#"Ignored: edge is a top-level "RootTypeResolution""#); continue; } let prev_for_source = best_path_by_source.get(&edge_tail_weight.source); let prev_for_source = match prev_for_source { Some(Some(prev_for_source)) => Some(prev_for_source), - Some(None) => continue, + Some(None) => { + debug!("Ignored: we've shown before than going to {original_source:?} is not productive"); + continue; + } None => None, }; @@ -1478,6 +1516,9 @@ where || (prev_for_source.0.edges.len() == to_advance.edges.len() + 1 && prev_for_source.1 <= 1.0) { + debug!( + "Ignored: a better (shorter) path to the same subgraph already added" + ); // We've already found another path that gets us to the same subgraph rather // than the edge we're about to check. If that previous path is strictly // shorter than the path we'd obtain with the new edge, then we don't @@ -1495,9 +1536,12 @@ where } if excluded_conditions.is_excluded(edge_weight.conditions.as_ref()) { + debug!("Ignored: edge condition is excluded"); continue; } + let span = debug_span!("Validating conditions {edge_weight}"); + let guard = span.enter(); // As we validate the condition for this edge, it might be necessary to jump to // another subgraph, but if for that we need to jump to the same subgraph we're // trying to get to, then it means there is another, shorter way to go to our @@ -1511,6 +1555,8 @@ where excluded_conditions, )?; if let ConditionResolution::Satisfied { path_tree, cost } = condition_resolution { + debug!("Condition satisfied"); + drop(guard); // We can get to `edge_tail_weight.source` with that edge. But if we had already // found another path to the same subgraph, we want to replace it with this one // only if either 1) it is shorter or 2) if it's of equal size, only if the @@ -1519,6 +1565,7 @@ where if prev_for_source.0.edges.len() == to_advance.edges.len() + 1 && prev_for_source.1 <= cost { + debug!("Ignored: a better (less costly) path to the same subgraph already added"); continue; } } @@ -1647,6 +1694,23 @@ where direct_key_edge_max_cost, )? { + debug!("Ignored: edge correspond to a detour by subgraph {} from subgraph {:?}: ", edge_tail_weight.source, self.graph.node_weight(last_subgraph_entering_edge_head)?.source); + debug!( + "we have a direct path from {} to {} in {}.", + self.graph + .node_weight(last_subgraph_entering_edge_head)? + .type_, + edge_tail_weight.type_, + self.graph + .node_weight(last_subgraph_entering_edge_head)? + .source + ); + if !is_edge_to_previous_subgraph { + debug!( + "And, it can move to {} from there", + edge_tail_weight.source + ); + } // We just found that going to the previous subgraph is useless // because there is a more direct path. But we additionally // record that this previous subgraph should be avoided @@ -1683,6 +1747,7 @@ where edge_tail_weight.source.clone(), Some((updated_path.clone(), cost)), ); + debug!("Using edge, advance path: {updated_path:?}"); // It can be necessary to "chain" keys, because different subgraphs may have // different keys exposed, and so we when we took a key, we want to check if // there is a new key we can now use that takes us to other subgraphs. For other @@ -1702,6 +1767,8 @@ where heap.push(HeapElement(updated_path)); } } + } else { + debug!("Condition unsatisfiable: {condition_resolution:?}"); } } } @@ -2321,6 +2388,12 @@ impl OpGraphPath { /// have also created multiple options). /// /// For the second element, it is true if the result only has type-exploded results. + #[cfg_attr(feature = "snapshot_tracing", tracing::instrument( + skip_all, + level = "trace", + name = "GraphPath::advance_with_operation_element" + fields(label = operation_element.to_string()) + ))] fn advance_with_operation_element( &self, supergraph_schema: ValidFederationSchema, @@ -2328,10 +2401,13 @@ impl OpGraphPath { context: &OpGraphPathContext, condition_resolver: &mut impl ConditionResolver, ) -> Result<(Option>, Option), FederationError> { + let span = debug_span!("Trying to advance {self} directly with {operation_element}"); + let _guard = span.enter(); let tail_weight = self.graph.node_weight(self.tail)?; let QueryGraphNodeType::SchemaType(tail_type_pos) = &tail_weight.type_ else { // We cannot advance any operation from here. We need to take the initial non-collecting // edges first. + debug!("Cannot advance federated graph root with direct operations"); return Ok((None, None)); }; match operation_element { @@ -2341,6 +2417,9 @@ impl OpGraphPath { // Just take the edge corresponding to the field, if it exists and can be // used. let Some(edge) = self.next_edge_for_field(operation_field) else { + debug!( + "No edge for field {operation_field} on object type {tail_weight}" + ); return Ok((None, None)); }; @@ -2387,6 +2466,12 @@ impl OpGraphPath { condition_resolver, context, )?; + match &field_path { + Some(_) => debug!("Collected field on object type {tail_weight}"), + None => debug!( + "Cannot satisfy @requires on field for object type {tail_weight}" + ), + } Ok((field_path.map(|p| vec![p.into()]), None)) } OutputTypeDefinitionPosition::Interface(tail_type_pos) => { @@ -2503,8 +2588,10 @@ impl OpGraphPath { "Unexpectedly missing interface path", )); }; + debug!("Collecting (leaf) field on interface {tail_weight} without type-exploding"); return Ok((Some(vec![interface_path.into()]), None)); } + debug!("Collecting field on interface {tail_weight} as 1st option"); } // There are 2 main cases to handle here: @@ -2538,8 +2625,14 @@ impl OpGraphPath { ) )); } - Arc::new(IndexSet::from([field_parent_pos.clone()])) + debug!("Casting into requested type {field_parent_pos}"); + Arc::new(IndexSet::from_iter([field_parent_pos.clone()])) } else { + if interface_path.is_some() { + debug!("No direct edge: type exploding interface {tail_weight} into possible runtime types {:?}", self.runtime_types_of_tail); + } else { + debug!("Type exploding interface {tail_weight} into possible runtime types {:?} as 2nd option", self.runtime_types_of_tail); + } self.runtime_types_of_tail.clone() }; @@ -2548,6 +2641,9 @@ impl OpGraphPath { // any gives us empty options, we bail. let mut options_for_each_implementation = vec![]; for implementation_type_pos in implementations.as_ref() { + let span = + debug_span!("Handling implementation {implementation_type_pos}"); + let guard = span.enter(); let implementation_inline_fragment = InlineFragment::new(InlineFragmentData { schema: self @@ -2576,18 +2672,26 @@ impl OpGraphPath { // If we find no options for that implementation, we bail (as we need to // simultaneously advance all implementations). let Some(mut implementation_options) = implementation_options else { + drop(guard); + debug!("Cannot collect field from {implementation_type_pos}: stopping with options [{interface_path:?}]"); return Ok((interface_path.map(|p| vec![p.into()]), None)); }; // If the new inline fragment makes it so that we're on an unsatisfiable // branch, we just ignore that implementation. if implementation_options.is_empty() { + debug!("Cannot ever get {implementation_type_pos} from this branch, ignoring it"); continue; } // For each option, we call `advance_with_operation_element()` again on // our own operation element (the field), which gives us some options // (or not and we bail). let mut field_options = vec![]; + debug!( + "Trying to collect field from options {implementation_options:?}" + ); for implementation_option in &mut implementation_options { + let span = debug_span!("For {implementation_option}"); + let _guard = span.enter(); let field_options_for_implementation = implementation_option .advance_with_operation_element( supergraph_schema.clone(), @@ -2597,6 +2701,7 @@ impl OpGraphPath { let Some(field_options_for_implementation) = field_options_for_implementation else { + debug!("Cannot collect field"); continue; }; // Advancing a field should never get us into an unsatisfiable @@ -2607,6 +2712,9 @@ impl OpGraphPath { operation_field ))); } + debug!( + "Collected field: adding {field_options_for_implementation:?}" + ); field_options.extend( field_options_for_implementation .into_iter() @@ -2616,8 +2724,11 @@ impl OpGraphPath { // If we find no options to advance that implementation, we bail (as we // need to simultaneously advance all implementations). if field_options.is_empty() { + drop(guard); + debug!("Cannot collect field from {implementation_type_pos}: stopping with options [{}]", DisplayOption::new(&interface_path)); return Ok((interface_path.map(|p| vec![p.into()]), None)); }; + debug!("Collected field from {implementation_type_pos}"); options_for_each_implementation.push(field_options); } let all_options = SimultaneousPaths::flat_cartesian_product( @@ -2630,16 +2741,14 @@ impl OpGraphPath { } else { (interface_path, all_options) }; - Ok(( - Some( - vec![interface_path.into()] - .into_iter() - .chain(all_options) - .collect(), - ), - None, - )) + let options = vec![interface_path.into()] + .into_iter() + .chain(all_options) + .collect::>(); + debug!("With type-exploded options: {}", DisplaySlice(&options)); + Ok((Some(options), None)) } else { + debug!("With type-exploded options: {}", DisplaySlice(&all_options)); // TODO: This appears to be the only place returning non-None for the // 2nd argument, so this could be Option<(Vec, bool)> // instead. @@ -2658,6 +2767,7 @@ impl OpGraphPath { condition_resolver, context, )?; + debug!("Trivial collection of __typename for union"); Ok((field_path.map(|p| vec![p.into()]), None)) } _ => { @@ -2683,6 +2793,7 @@ impl OpGraphPath { // on), it means we're essentially just applying some directives (could be a // `@skip`/`@include` for instance). This doesn't make us take any edge, but if // the operation element does has directives, we record it. + debug!("No edge to take for condition {operation_inline_fragment} from current type"); let fragment_path = if operation_inline_fragment.directives.is_empty() { self.clone() } else { @@ -2717,6 +2828,7 @@ impl OpGraphPath { ConditionResolution::no_conditions(), operation_inline_fragment.defer_directive_arguments()?, )?; + debug!("Using type-casting edge for {type_condition_name} from current type"); return Ok((Some(vec![fragment_path.into()]), None)); } @@ -2730,8 +2842,11 @@ impl OpGraphPath { .try_into()?, )?; let intersection = from_types.intersection(&to_types); + debug!("Trying to type-explode into intersection between current type and {type_condition_name} = [{}]", intersection.clone().format(",")); let mut options_for_each_implementation = vec![]; for implementation_type_pos in intersection { + let span = debug_span!("Trying {implementation_type_pos}"); + let guard = span.enter(); let implementation_inline_fragment = InlineFragment::new(InlineFragmentData { schema: self @@ -2758,11 +2873,14 @@ impl OpGraphPath { condition_resolver, )?; let Some(implementation_options) = implementation_options else { + drop(guard); + debug!("Cannot advance into {implementation_type_pos} from current type: no options for operation."); return Ok((None, None)); }; // If the new inline fragment makes it so that we're on an unsatisfiable // branch, we just ignore that implementation. if implementation_options.is_empty() { + debug!("Cannot ever get type name from this branch, ignoring it"); continue; } options_for_each_implementation.push( @@ -2770,11 +2888,13 @@ impl OpGraphPath { .into_iter() .map(|s| s.paths) .collect(), - ) + ); + debug!("Advanced into type from current type: {options_for_each_implementation:?}"); } let all_options = SimultaneousPaths::flat_cartesian_product( options_for_each_implementation, )?; + debug!("Type-exploded options: {}", DisplaySlice(&all_options)); Ok((Some(all_options), None)) } OutputTypeDefinitionPosition::Object(tail_type_pos) => { @@ -2801,6 +2921,7 @@ impl OpGraphPath { .possible_runtime_types(type_condition_pos.clone().into())? .contains(tail_type_pos) { + debug!("Type is a super-type of the current type. No edge to take"); // Type condition is applicable on the tail type, so the types are // already exploded but the condition can reference types from the // supergraph that are not present in the local subgraph. @@ -2874,6 +2995,7 @@ impl OpGraphPath { } } + debug!("Cannot ever get type from current type: returning empty branch"); // The operation element we're dealing with can never return results (the // type conditions applied have no intersection). This means we can fulfill // this operation element (by doing nothing and returning an empty result), @@ -3224,6 +3346,8 @@ impl SimultaneousPathsWithLazyIndirectPaths { operation_element: &OpPathElement, condition_resolver: &mut impl ConditionResolver, ) -> Result>, FederationError> { + let span = debug_span!("Trying to advance paths for operation", paths = %self.paths, operation = %operation_element); + let _gaurd = span.enter(); let updated_context = self.context.with_context_of(operation_element)?; let mut options_for_each_path = vec![]; @@ -3231,10 +3355,14 @@ impl SimultaneousPathsWithLazyIndirectPaths { // references to `self`, which means cloning these paths when iterating. let paths = self.paths.0.clone(); for (path_index, path) in paths.iter().enumerate() { + let span = debug_span!("Computing options for {path}"); + let gaurd = span.enter(); let mut options = None; let should_reenter_subgraph = path.defer_on_tail.is_some() && matches!(operation_element, OpPathElement::Field(_)); if !should_reenter_subgraph { + let span = debug_span!("Direct options"); + let gaurd = span.enter(); let (advance_options, has_only_type_exploded_results) = path .advance_with_operation_element( supergraph_schema.clone(), @@ -3242,6 +3370,8 @@ impl SimultaneousPathsWithLazyIndirectPaths { &updated_context, condition_resolver, )?; + debug!("{advance_options:?}"); + drop(gaurd); // If we've got some options, there are a number of cases where there is no point // looking for indirect paths: // - If the operation element is terminal: this means we just found a direct edge @@ -3267,6 +3397,7 @@ impl SimultaneousPathsWithLazyIndirectPaths { && !has_only_type_exploded_results.unwrap_or(false)) || matches!(operation_element, OpPathElement::InlineFragment(_)) { + debug!("Final options for {path}: {advance_options:?}"); // Note that if options is empty, that means this particular "branch" is // unsatisfiable, so we should just ignore it. if !advance_options.is_empty() { @@ -3283,14 +3414,25 @@ impl SimultaneousPathsWithLazyIndirectPaths { // defer), that's ok, we'll just try with non-collecting edges. let mut options = options.unwrap_or_else(Vec::new); if let OpPathElement::Field(operation_field) = operation_element { + let span = debug_span!("Computing indirect paths:"); + let _gaurd = span.enter(); // Add whatever options can be obtained by taking some non-collecting edges first. let paths_with_non_collecting_edges = self .indirect_options(&updated_context, path_index, condition_resolver)? .filter_non_collecting_paths_for_field(operation_field)?; if !paths_with_non_collecting_edges.paths.is_empty() { + debug!( + "{} indirect paths", + paths_with_non_collecting_edges.paths.len() + ); + let span = debug_span!("Validating indirect options:"); + let _gaurd = span.enter(); for paths_with_non_collecting_edges in paths_with_non_collecting_edges.paths.iter() { + let span = + debug_span!("For indirect path {paths_with_non_collecting_edges}:"); + let _gaurd = span.enter(); let (advance_options, _) = paths_with_non_collecting_edges .advance_with_operation_element( supergraph_schema.clone(), @@ -3301,8 +3443,10 @@ impl SimultaneousPathsWithLazyIndirectPaths { // If we can't advance the operation element after that path, ignore it, // it's just not an option. let Some(advance_options) = advance_options else { + debug!("Ignoring: cannot be advanced with {operation_element}"); continue; }; + debug!("Adding valid option: {advance_options:?}"); // `advance_with_operation_element()` can return an empty `Vec` only if the // operation element is a fragment with a type condition that, on top of the // "current" type is unsatisfiable. But as we've only taken type-preserving @@ -3362,6 +3506,8 @@ impl SimultaneousPathsWithLazyIndirectPaths { } options.extend(advance_options); } + } else { + debug!("no indirect paths"); } } @@ -3371,6 +3517,10 @@ impl SimultaneousPathsWithLazyIndirectPaths { // but could still find a direct path. If so, it means it's a corner case where we // cannot do query-planner-based-@defer and have to fall back on not deferring. if options.is_empty() && should_reenter_subgraph { + let span = debug_span!( + "Cannot defer (no indirect options); falling back to direct options" + ); + let _guard = span.enter(); let (advance_options, _) = path.advance_with_operation_element( supergraph_schema.clone(), operation_element, @@ -3378,11 +3528,14 @@ impl SimultaneousPathsWithLazyIndirectPaths { condition_resolver, )?; options = advance_options.unwrap_or_else(Vec::new); + debug!("{options:?}"); } // At this point, if options is empty, it means we found no ways to advance the // operation element for this path, so we should return `None`. if options.is_empty() { + drop(gaurd); + debug!("No valid options for {operation_element}, aborting."); return Ok(None); } else { options_for_each_path.push(options); @@ -3390,6 +3543,7 @@ impl SimultaneousPathsWithLazyIndirectPaths { } let all_options = SimultaneousPaths::flat_cartesian_product(options_for_each_path)?; + debug!("{all_options:?}"); Ok(Some(self.create_lazy_options(all_options, updated_context))) } } diff --git a/apollo-federation/src/query_graph/mod.rs b/apollo-federation/src/query_graph/mod.rs index 29db17c129..e77d191efa 100644 --- a/apollo-federation/src/query_graph/mod.rs +++ b/apollo-federation/src/query_graph/mod.rs @@ -3,10 +3,10 @@ use std::fmt::Formatter; use std::hash::Hash; use std::sync::Arc; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::schema::NamedType; use apollo_compiler::Name; -use indexmap::IndexMap; -use indexmap::IndexSet; use petgraph::graph::DiGraph; use petgraph::graph::EdgeIndex; use petgraph::graph::EdgeReference; @@ -630,7 +630,7 @@ impl QueryGraph { composite_type_position.type_name().clone(), key_value.fields, )?; - if !external_metadata.selects_any_external_field(&selection)? { + if !external_metadata.selects_any_external_field(&selection) { return Ok(Some(selection)); } } @@ -759,10 +759,10 @@ impl QueryGraph { let Ok(_): Result = tail_type_pos.clone().try_into() else { - return Ok(IndexSet::new()); + return Ok(IndexSet::default()); }; let schema = self.schema_by_source(source)?; - let mut new_possible_runtime_types = IndexSet::new(); + let mut new_possible_runtime_types = IndexSet::default(); for possible_runtime_type in possible_runtime_types { let field_pos = possible_runtime_type.field(field_definition_position.field_name().clone()); @@ -801,7 +801,7 @@ impl QueryGraph { "Unexpectedly encountered non-object root operation type.", )); }; - Ok(IndexSet::from([tail_type_pos])) + Ok(IndexSet::from_iter([tail_type_pos])) } QueryGraphEdgeTransition::SubgraphEnteringTransition => { let OutputTypeDefinitionPosition::Object(tail_type_pos) = tail_type_pos.clone() @@ -810,7 +810,7 @@ impl QueryGraph { "Unexpectedly encountered non-object root operation type.", )); }; - Ok(IndexSet::from([tail_type_pos])) + Ok(IndexSet::from_iter([tail_type_pos])) } QueryGraphEdgeTransition::InterfaceObjectFakeDownCast { .. } => { Ok(possible_runtime_types.clone()) @@ -857,7 +857,7 @@ impl QueryGraph { let selection = parse_field_set(schema, ty.name().clone(), value)?; let has_external = metadata .external_metadata() - .selects_any_external_field(&selection)?; + .selects_any_external_field(&selection); if !has_external { return Ok(Some(selection)); } diff --git a/apollo-federation/src/query_graph/path_tree.rs b/apollo-federation/src/query_graph/path_tree.rs index 02fbcf0ca7..3411458f89 100644 --- a/apollo-federation/src/query_graph/path_tree.rs +++ b/apollo-federation/src/query_graph/path_tree.rs @@ -3,10 +3,11 @@ use std::fmt::Formatter; use std::hash::Hash; use std::sync::Arc; +use apollo_compiler::collections::IndexMap; use indexmap::map::Entry; -use indexmap::IndexMap; use petgraph::graph::EdgeIndex; use petgraph::graph::NodeIndex; +use serde::Serialize; use crate::error::FederationError; use crate::operation::SelectionSet; @@ -23,13 +24,16 @@ use crate::query_graph::QueryGraphNode; // Typescript doesn't have a native way of associating equality/hash functions with types, so they // were passed around manually. This isn't the case with Rust, where we instead implement trigger // equality via `PartialEq` and `Hash`. -#[derive(Clone)] +#[derive(Serialize)] pub(crate) struct PathTree where TTrigger: Eq + Hash, TEdge: Copy + Into>, { /// The query graph of which this is a path tree. + // TODO: This is probably useful information for snapshot logging, but it can probably be + // inferred by the visualizer + #[serde(skip)] pub(crate) graph: Arc, /// The query graph node at which the path tree starts. pub(crate) node: NodeIndex, @@ -45,7 +49,35 @@ where pub(crate) childs: Vec>>, } -#[derive(Debug)] +impl Clone for PathTree +where + TTrigger: Eq + Hash, + TEdge: Copy + Into>, +{ + fn clone(&self) -> Self { + Self { + graph: self.graph.clone(), + node: self.node, + local_selection_sets: self.local_selection_sets.clone(), + childs: self.childs.clone(), + } + } +} + +impl PartialEq for PathTree +where + TTrigger: Eq + Hash, + TEdge: Copy + PartialEq + Into>, +{ + fn eq(&self, other: &Self) -> bool { + Arc::ptr_eq(&self.graph, &other.graph) + && self.node == other.node + && self.local_selection_sets == other.local_selection_sets + && self.childs == other.childs + } +} + +#[derive(Debug, Serialize)] pub(crate) struct PathTreeChild where TTrigger: Eq + Hash, @@ -61,6 +93,19 @@ where pub(crate) tree: Arc>, } +impl PartialEq for PathTreeChild +where + TTrigger: Eq + Hash, + TEdge: Copy + PartialEq + Into>, +{ + fn eq(&self, other: &Self) -> bool { + self.edge == other.edge + && self.trigger == other.trigger + && self.conditions == other.conditions + && self.tree == other.tree + } +} + /// A `PathTree` whose triggers are operation elements (essentially meaning that the constituent /// `GraphPath`s were guided by a GraphQL operation). pub(crate) type OpPathTree = PathTree>; @@ -178,7 +223,8 @@ where TEdge: 'inputs, { // Group by and order by unique edge ID, and among those by unique trigger - let mut merged = IndexMap::>::new(); + let mut merged = + IndexMap::>::default(); struct ByUniqueEdge<'inputs, TTrigger, GraphPathIter> { target_node: NodeIndex, @@ -212,7 +258,7 @@ where // For a "None" edge, stay on the same node node }, - by_unique_trigger: IndexMap::new(), + by_unique_trigger: IndexMap::default(), }) } }; @@ -283,6 +329,39 @@ where }) } + /// Appends the children of the other `OpTree` onto the children of this tree. + /// + /// ## Panics + /// Like `Self::merge`, this method will panic if the graphs of the two `OpTree`s below to + /// different allocations (i.e. they don't below to the same graph) or if they below to + /// different root nodes. + pub(crate) fn extend(&mut self, other: &Self) { + assert!( + Arc::ptr_eq(&self.graph, &other.graph), + "Cannot merge path tree build on another graph" + ); + assert_eq!( + self.node, other.node, + "Cannot merge path trees rooted different nodes" + ); + if self == other { + return; + } + if other.childs.is_empty() { + return; + } + if self.childs.is_empty() { + self.clone_from(other); + return; + } + self.childs.extend_from_slice(&other.childs); + self.local_selection_sets + .extend_from_slice(&other.local_selection_sets); + } + + /// ## Panics + /// This method will panic if the graphs of the two `OpTree`s below to different allocations + /// (i.e. they don't below to the same graph) or if they below to different root nodes. pub(crate) fn merge(self: &Arc, other: &Arc) -> Arc { if Arc::ptr_eq(self, other) { return self.clone(); @@ -503,7 +582,7 @@ mod tests { "#; let (schema, mut executable_document) = parse_schema_and_operation(src); - let (op_name, operation) = executable_document.named_operations.first_mut().unwrap(); + let (op_name, operation) = executable_document.operations.named.first_mut().unwrap(); let query_graph = Arc::new(build_query_graph(op_name.to_string().into(), schema.clone()).unwrap()); diff --git a/apollo-federation/src/query_plan/conditions.rs b/apollo-federation/src/query_plan/conditions.rs index 00b4b44543..ab84d2d8ca 100644 --- a/apollo-federation/src/query_plan/conditions.rs +++ b/apollo-federation/src/query_plan/conditions.rs @@ -1,12 +1,13 @@ use std::sync::Arc; use apollo_compiler::ast::Directive; +use apollo_compiler::collections::IndexMap; use apollo_compiler::executable::DirectiveList; use apollo_compiler::executable::Value; use apollo_compiler::Name; use apollo_compiler::Node; use indexmap::map::Entry; -use indexmap::IndexMap; +use serde::Serialize; use crate::error::FederationError; use crate::operation::Selection; @@ -19,7 +20,7 @@ use crate::query_graph::graph_path::OpPathElement; /// Accordingly, there is much logic around merging and short-circuiting; `OperationConditional` is /// the more appropriate struct when trying to record the original structure/intent of those /// `@skip`/`@include` applications. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub(crate) enum Conditions { Variables(VariableConditions), Boolean(bool), @@ -34,7 +35,7 @@ pub(crate) enum Condition { /// A list of variable conditions, represented as a map from variable names to whether that variable /// is negated in the condition. We maintain the invariant that there's at least one condition (i.e. /// the map is non-empty), and that there's at most one condition per variable name. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub(crate) struct VariableConditions(Arc>); impl VariableConditions { @@ -91,7 +92,7 @@ impl Conditions { } pub(crate) fn from_directives(directives: &DirectiveList) -> Result { - let mut variables = IndexMap::new(); + let mut variables = IndexMap::default(); for directive in directives { let negated = match directive.name.as_str() { "include" => false, @@ -133,7 +134,7 @@ impl Conditions { match (new_conditions, self) { (Conditions::Boolean(_), _) | (_, Conditions::Boolean(_)) => new_conditions.clone(), (Conditions::Variables(new_conditions), Conditions::Variables(handled_conditions)) => { - let mut filtered = IndexMap::new(); + let mut filtered = IndexMap::default(); for (cond_name, &cond_negated) in new_conditions.0.iter() { match handled_conditions.is_negated(cond_name) { Some(handled_cond) if cond_negated != handled_cond => { @@ -212,7 +213,7 @@ pub(crate) fn remove_conditions_from_selection_set( // We remove any of the conditions on the element and recurse. let updated_element = remove_conditions_of_element(element.clone(), variable_conditions); - let new_selection = if let Ok(Some(selection_set)) = selection.selection_set() { + let new_selection = if let Some(selection_set) = selection.selection_set() { let updated_selection_set = remove_conditions_from_selection_set(selection_set, conditions)?; if updated_element == element { diff --git a/apollo-federation/src/query_plan/display.rs b/apollo-federation/src/query_plan/display.rs index c48b0add77..9141b87747 100644 --- a/apollo-federation/src/query_plan/display.rs +++ b/apollo-federation/src/query_plan/display.rs @@ -3,8 +3,8 @@ use std::fmt; use apollo_compiler::executable; use super::*; -use crate::indented_display::write_indented_lines; -use crate::indented_display::State; +use crate::display_helpers::write_indented_lines; +use crate::display_helpers::State; impl QueryPlan { fn write_indented(&self, state: &mut State<'_, '_>) -> fmt::Result { @@ -314,7 +314,8 @@ fn write_operation( operation_document: &ExecutableDocument, ) -> fmt::Result { let operation = operation_document - .get_operation(None) + .operations + .get(None) .expect("expected a single-operation document"); write_selections(state, &operation.selection_set.selections)?; for fragment in operation_document.fragments.values() { diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index afb34c8b75..d5425e2187 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -11,14 +11,14 @@ use apollo_compiler::ast::Argument; use apollo_compiler::ast::Directive; use apollo_compiler::ast::OperationType; use apollo_compiler::ast::Type; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::executable; use apollo_compiler::executable::VariableDefinition; use apollo_compiler::name; use apollo_compiler::schema; use apollo_compiler::Name; use apollo_compiler::Node; -use indexmap::IndexMap; -use indexmap::IndexSet; use itertools::Itertools; use multimap::MultiMap; use petgraph::stable_graph::EdgeIndex; @@ -26,6 +26,7 @@ use petgraph::stable_graph::NodeIndex; use petgraph::stable_graph::StableDiGraph; use petgraph::visit::EdgeRef; use petgraph::visit::IntoNodeReferences; +use serde::Serialize; use crate::error::FederationError; use crate::error::SingleFederationError; @@ -73,6 +74,7 @@ use crate::schema::position::TypeDefinitionPosition; use crate::schema::ValidFederationSchema; use crate::subgraph::spec::ANY_SCALAR_NAME; use crate::subgraph::spec::ENTITIES_QUERY; +use crate::utils::logging::snapshot; /// Represents the value of a `@defer(label:)` argument. type DeferRef = String; @@ -86,7 +88,7 @@ type DeferredNodes = multimap::MultiMap>; // // The JS codebase additionally has a property named `subgraphAndMergeAtKey` that was used as a // precomputed map key, but this isn't necessary in Rust since we can use `PartialEq`/`Eq`/`Hash`. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub(crate) struct FetchDependencyGraphNode { /// The subgraph this fetch is queried against. pub(crate) subgraph_name: Arc, @@ -111,6 +113,7 @@ pub(crate) struct FetchDependencyGraphNode { /// The fetch ID generation, if one is necessary (used when handling `@defer`). /// /// This can be treated as an Option using `OnceLock::get()`. + #[serde(skip)] id: OnceLock, /// The label of the `@defer` block this fetch appears in, if any. defer_ref: Option, @@ -151,7 +154,7 @@ impl Clone for FetchIdGenerator { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub(crate) struct FetchSelectionSet { /// The selection set to be fetched from the subgraph. pub(crate) selection_set: Arc, @@ -163,17 +166,18 @@ pub(crate) struct FetchSelectionSet { // PORT_NOTE: The JS codebase additionally has a property `onUpdateCallback`. This was only ever // used to update `isKnownUseful` in `FetchGroup`, and it's easier to handle this there than try // to pass in a callback in Rust. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] pub(crate) struct FetchInputs { /// The selection sets to be used as input to `_entities`, separated per parent type. selection_sets_per_parent_type: IndexMap>, /// The supergraph schema (primarily used for validation of added selection sets). + #[serde(skip)] supergraph_schema: ValidFederationSchema, } /// Represents a dependency between two subgraph fetches, namely that the tail/child depends on the /// head/parent executing first. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub(crate) struct FetchDependencyGraphEdge { /// The operation path of the tail/child _relative_ to the head/parent. This information is /// maintained in case we want/need to merge nodes into each other. This can roughly be thought @@ -194,12 +198,14 @@ type FetchDependencyGraphPetgraph = /// /// In the graph, two fetches are connected if one of them (the parent/head) must be performed /// strictly before the other one (the child/tail). -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize)] pub(crate) struct FetchDependencyGraph { /// The supergraph schema that generated the federated query graph. + #[serde(skip)] supergraph_schema: ValidFederationSchema, /// The federated query graph that generated the fetches. (This also contains the subgraph /// schemas.) + #[serde(skip)] federated_query_graph: Arc, /// The nodes/edges of the fetch dependency graph. Note that this must be a stable graph since /// we remove nodes/edges during optimizations. @@ -208,10 +214,14 @@ pub(crate) struct FetchDependencyGraph { /// the subgraphs. root_nodes_by_subgraph: IndexMap, NodeIndex>, /// Tracks metadata about deferred blocks and their dependencies on one another. + // TODO(@TylerBloom): Since defer is not supported yet. Once it is, having this field in the + // serialized output will be needed. + #[serde(skip)] pub(crate) defer_tracking: DeferTracking, /// The initial fetch ID generation (used when handling `@defer`). starting_id_generation: u64, /// The current fetch ID generation (used when handling `@defer`). + #[serde(skip)] fetch_id_generation: FetchIdGenerator, /// Whether this fetch dependency graph has undergone a transitive reduction. is_reduced: bool, @@ -658,8 +668,8 @@ impl FetchDependencyGraph { // which is important for some case of @requires). for existing_id in self.children_of(parent.parent_node_id) { let existing = self.node_weight(existing_id)?; - if existing.subgraph_name == *subgraph_name - && existing.merge_at.as_deref() == Some(merge_at) + // we compare the subgraph names last because on average it improves performance + if existing.merge_at.as_deref() == Some(merge_at) && existing .selection_set .selection_set @@ -673,12 +683,13 @@ impl FetchDependencyGraph { ) }) && !self.is_in_nodes_or_their_ancestors(existing_id, conditions_nodes) - && existing.defer_ref.as_ref() == defer_ref && self .parents_relations_of(existing_id) .find(|rel| rel.parent_node_id == parent.parent_node_id) .and_then(|rel| rel.path_in_parent) == parent.path_in_parent + && existing.defer_ref.as_ref() == defer_ref + && existing.subgraph_name == *subgraph_name { return Ok(existing_id); } @@ -1155,7 +1166,7 @@ impl FetchDependencyGraph { let get_subgraph_schema = |subgraph_name: &Arc| { self.federated_query_graph .schema_by_source(subgraph_name) - .map(|schema| schema.clone()) + .cloned() }; // For nodes that fetches from an @interfaceObject, we can sometimes have something like @@ -1285,7 +1296,7 @@ impl FetchDependencyGraph { } } - let Some(sub_selection_set) = selection.selection_set()? else { + let Some(sub_selection_set) = selection.selection_set() else { // we're only here if `conditionInSupergraphIfInterfaceObject` returned something, // we imply that selection is a fragment selection and so has a sub-selectionSet. return Err(FederationError::internal(format!( @@ -1300,14 +1311,14 @@ impl FetchDependencyGraph { // case as a "safe" default). if !interface_input_selections.is_empty() { Ok(interface_input_selections.iter().any(|input| { - let Ok(Some(input_selection_set)) = input.selection_set() else { + let Some(input_selection_set) = input.selection_set() else { return false; }; input_selection_set.contains(sub_selection_set) })) } else if !implementation_input_selections.is_empty() { Ok(interface_input_selections.iter().all(|input| { - let Ok(Some(input_selection_set)) = input.selection_set() else { + let Some(input_selection_set) = input.selection_set() else { return false; }; input_selection_set.contains(sub_selection_set) @@ -1803,9 +1814,10 @@ impl FetchDependencyGraph { let child = self.node_weight(child_id)?; let parent_relation = self.parent_relation(child_id, node_id); - Ok(node.subgraph_name == child.subgraph_name + // we compare the subgraph names last because on average it improves performance + Ok(parent_relation.is_some_and(|r| r.path_in_parent.is_some()) && node.defer_ref == child.defer_ref - && parent_relation.is_some_and(|r| r.path_in_parent.is_some())) + && node.subgraph_name == child.subgraph_name) } /// We only allow merging sibling on the same subgraph, same "merge_at" and when the common parent is their only parent: @@ -1841,10 +1853,11 @@ impl FetchDependencyGraph { return Ok(false); }; - Ok(node.defer_ref == sibling.defer_ref - && node.subgraph_name == sibling.subgraph_name - && node.merge_at == sibling.merge_at - && own_parent_id == sibling_parent_id) + // we compare the subgraph names last because on average it improves performance + Ok(node.merge_at == sibling.merge_at + && own_parent_id == sibling_parent_id + && node.defer_ref == sibling.defer_ref + && node.subgraph_name == sibling.subgraph_name) } fn can_merge_grand_child_in( @@ -1868,12 +1881,13 @@ impl FetchDependencyGraph { return Ok(false); }; - Ok(node.subgraph_name == grand_child.subgraph_name - && node.defer_ref == grand_child.defer_ref - && grand_child_parent_relations[0].path_in_parent.is_some() + // we compare the subgraph names last because on average it improves performance + Ok(grand_child_parent_relations[0].path_in_parent.is_some() && grand_child_parent_parent_relation.is_some_and(|r| r.path_in_parent.is_some()) && node.merge_at == grand_child.merge_at - && node_inputs.contains(grand_child_inputs)) + && node_inputs.contains(grand_child_inputs) + && node.defer_ref == grand_child.defer_ref + && node.subgraph_name == grand_child.subgraph_name) } /// Merges a child of parent node into it. @@ -2331,7 +2345,14 @@ impl FetchDependencyGraphNode { }) .transpose()?; let subgraph_schema = query_graph.schema_by_source(&self.subgraph_name)?; - let variable_usages = selection.used_variables()?; + + let variable_usages = { + let set = selection.used_variables()?; + let mut list = set.into_iter().cloned().collect::>(); + list.sort(); + list + }; + let mut operation = if self.is_entity_fetch { operation_for_entities_fetch( subgraph_schema, @@ -2520,8 +2541,7 @@ fn operation_for_entities_fetch( let mut variable_definitions: Vec> = Vec::with_capacity(all_variable_definitions.len() + 1); variable_definitions.push(representations_variable_definition(subgraph_schema)?); - let mut used_variables = HashSet::new(); - selection_set.collect_variables(&mut used_variables)?; + let used_variables = selection_set.used_variables()?; variable_definitions.extend( all_variable_definitions .iter() @@ -2530,14 +2550,14 @@ fn operation_for_entities_fetch( ); let query_type_name = subgraph_schema.schema().root_operation(OperationType::Query).ok_or_else(|| - SingleFederationError::InvalidGraphQL { + SingleFederationError::InvalidSubgraph { message: "Subgraphs should always have a query root (they should at least provides _entities)".to_string() })?; let query_type = match subgraph_schema.get_type(query_type_name.clone())? { crate::schema::position::TypeDefinitionPosition::Object(o) => o, _ => { - return Err(SingleFederationError::InvalidGraphQL { + return Err(SingleFederationError::InvalidSubgraph { message: "the root query type must be an object".to_string(), } .into()) @@ -2549,7 +2569,7 @@ fn operation_for_entities_fetch( .fields .contains_key(&ENTITIES_QUERY) { - return Err(SingleFederationError::InvalidGraphQL { + return Err(SingleFederationError::InvalidSubgraph { message: "Subgraphs should always have the _entities field".to_string(), } .into()); @@ -2604,8 +2624,7 @@ fn operation_for_query_fetch( variable_definitions: &[Node], operation_name: &Option, ) -> Result { - let mut used_variables = HashSet::new(); - selection_set.collect_variables(&mut used_variables)?; + let used_variables = selection_set.used_variables()?; let variable_definitions = variable_definitions .iter() .filter(|definition| used_variables.contains(&definition.name)) @@ -2950,6 +2969,10 @@ struct ComputeNodesStackItem<'a> { defer_context: DeferContext, } +#[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(skip_all, level = "trace") +)] pub(crate) fn compute_nodes_for_tree( dependency_graph: &mut FetchDependencyGraph, initial_tree: &OpPathTree, @@ -2958,6 +2981,11 @@ pub(crate) fn compute_nodes_for_tree( initial_defer_context: DeferContext, initial_conditions: &OpGraphPathContext, ) -> Result, FederationError> { + snapshot!( + "OpPathTree", + serde_json_bytes::json!(initial_tree.to_string()).to_string(), + "path_tree" + ); let mut stack = vec![ComputeNodesStackItem { tree: initial_tree, node_id: initial_node_id, @@ -2965,7 +2993,7 @@ pub(crate) fn compute_nodes_for_tree( context: initial_conditions, defer_context: initial_defer_context, }]; - let mut created_nodes = IndexSet::new(); + let mut created_nodes = IndexSet::default(); while let Some(stack_item) = stack.pop() { let node = FetchDependencyGraph::node_weight_mut(&mut dependency_graph.graph, stack_item.node_id)?; @@ -3044,9 +3072,14 @@ pub(crate) fn compute_nodes_for_tree( } } } + snapshot!(dependency_graph, "updated_dependency_graph"); Ok(created_nodes) } +#[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(skip_all, level = "trace") +)] fn compute_nodes_for_key_resolution<'a>( dependency_graph: &mut FetchDependencyGraph, stack_item: &ComputeNodesStackItem<'a>, @@ -3197,6 +3230,10 @@ fn compute_nodes_for_key_resolution<'a>( }) } +#[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(skip_all, level = "trace") +)] fn compute_nodes_for_root_type_resolution<'a>( dependency_graph: &mut FetchDependencyGraph, stack_item: &ComputeNodesStackItem<'_>, @@ -3294,6 +3331,7 @@ fn compute_nodes_for_root_type_resolution<'a>( }) } +#[cfg_attr(feature = "snapshot_tracing", tracing::instrument(skip_all, level = "trace", fields(label = operation.to_string())))] fn compute_nodes_for_op_path_element<'a>( dependency_graph: &mut FetchDependencyGraph, stack_item: &ComputeNodesStackItem<'a>, diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs index 4fa0250f46..4ee9b57da0 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs @@ -43,7 +43,6 @@ const FETCH_COST: QueryPlanCost = 1000.0; /// The exact number is a tad arbitrary however. const PIPELINING_COST: QueryPlanCost = 100.0; -#[derive(Clone)] pub(crate) struct FetchDependencyGraphToQueryPlanProcessor { variable_definitions: Vec>, fragments: Option, @@ -57,14 +56,14 @@ pub(crate) struct FetchDependencyGraphToQueryPlanProcessor { /// A plan is essentially some mix of sequences and parallels of fetches. And the plan cost /// is about minimizing both: /// 1. The expected total latency of executing the plan. Typically, doing 2 fetches in -/// parallel will most likely have much better latency then executing those exact same -/// fetches in sequence, and so the cost of the latter must be greater than that of -/// the former. +/// parallel will most likely have much better latency then executing those exact same +/// fetches in sequence, and so the cost of the latter must be greater than that of +/// the former. /// 2. The underlying use of resources. For instance, if we query 2 fields and we have -/// the choice between getting those 2 fields from a single subgraph in 1 fetch, or -/// get each from a different subgraph with 2 fetches in parallel, then we want to -/// favor the former as just doing a fetch in and of itself has a cost in terms of -/// resources consumed. +/// the choice between getting those 2 fields from a single subgraph in 1 fetch, or +/// get each from a different subgraph with 2 fetches in parallel, then we want to +/// favor the former as just doing a fetch in and of itself has a cost in terms of +/// resources consumed. /// /// Do note that at the moment, this cost is solely based on the "shape" of the plan and has /// to make some conservative assumption regarding concrete runtime behaviour. In particular, diff --git a/apollo-federation/src/query_plan/generate.rs b/apollo-federation/src/query_plan/generate.rs index 68d2d798df..0511deb498 100644 --- a/apollo-federation/src/query_plan/generate.rs +++ b/apollo-federation/src/query_plan/generate.rs @@ -98,6 +98,10 @@ struct Extracted { /// The `Option`s in side `type Choices = Vec>` are for internal use /// and should all be `Some` when calling this function. /// * `plan_builder`: a struct that implements the `PlanBuilder` trait. +#[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(skip_all, level = "trace") +)] pub fn generate_all_plans_and_find_best( mut initial: Plan, to_add: Vec>, diff --git a/apollo-federation/src/query_plan/mod.rs b/apollo-federation/src/query_plan/mod.rs index 162e563b3d..f620e208b3 100644 --- a/apollo-federation/src/query_plan/mod.rs +++ b/apollo-federation/src/query_plan/mod.rs @@ -4,6 +4,7 @@ use apollo_compiler::executable; use apollo_compiler::validation::Valid; use apollo_compiler::ExecutableDocument; use apollo_compiler::Name; +use serde::Serialize; use crate::query_plan::query_planner::QueryPlanningStatistics; @@ -17,13 +18,13 @@ pub(crate) mod query_planning_traversal; pub type QueryPlanCost = f64; -#[derive(Debug, Default, PartialEq)] +#[derive(Debug, Default, PartialEq, Serialize)] pub struct QueryPlan { pub node: Option, pub statistics: QueryPlanningStatistics, } -#[derive(Debug, PartialEq, derive_more::From)] +#[derive(Debug, PartialEq, derive_more::From, Serialize)] pub enum TopLevelPlanNode { Subscription(SubscriptionNode), #[from(types(FetchNode))] @@ -36,14 +37,14 @@ pub enum TopLevelPlanNode { Condition(Box), } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct SubscriptionNode { pub primary: Box, // XXX(@goto-bus-stop) Is this not just always a SequenceNode? pub rest: Option>, } -#[derive(Debug, Clone, PartialEq, derive_more::From)] +#[derive(Debug, Clone, PartialEq, derive_more::From, Serialize)] pub enum PlanNode { #[from(types(FetchNode))] Fetch(Box), @@ -55,7 +56,7 @@ pub enum PlanNode { Condition(Box), } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct FetchNode { pub subgraph_name: Arc, /// Optional identifier for the fetch for defer support. All fetches of a given plan will be @@ -67,12 +68,15 @@ pub struct FetchNode { /// `FragmentSpread`. // PORT_NOTE: This was its own type in the JS codebase, but it's likely simpler to just have the // constraint be implicit for router instead of creating a new type. + #[serde(serialize_with = "crate::display_helpers::serialize_optional_vec_as_string")] pub requires: Option>, // PORT_NOTE: We don't serialize the "operation" string in this struct, as these query plan // nodes are meant for direct consumption by router (without any serdes), so we leave the // question of whether it needs to be serialized to router. + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub operation_document: Valid, pub operation_name: Option, + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub operation_kind: executable::OperationType, /// Optionally describe a number of "rewrites" that query plan executors should apply to the /// data that is sent as the input of this fetch. Note that such rewrites should only impact the @@ -88,17 +92,17 @@ pub struct FetchNode { pub context_rewrites: Vec>, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct SequenceNode { pub nodes: Vec, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct ParallelNode { pub nodes: Vec, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct FlattenNode { pub path: Vec, pub node: Box, @@ -120,7 +124,7 @@ pub struct FlattenNode { /// we implement more advanced server-side heuristics to decide if deferring is judicious or not. /// This allows the executor of the plan to consistently send a defer-abiding multipart response to /// the client. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct DeferNode { /// The "primary" part of a defer, that is the non-deferred part (though could be deferred /// itself for a nested defer). @@ -132,7 +136,7 @@ pub struct DeferNode { } /// The primary block of a `DeferNode`. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct PrimaryDeferBlock { /// The part of the original query that "selects" the data to send in that primary response /// once the plan in `node` completes). Note that if the parent `DeferNode` is nested, then it @@ -140,6 +144,7 @@ pub struct PrimaryDeferBlock { /// sub-selection will start at that parent `DeferredNode.query_path`. Note that this can be /// `None` in the rare case that everything in the original query is deferred (which is not very /// useful in practice, but not disallowed by the @defer spec at the moment). + #[serde(skip)] pub sub_selection: Option, /// The plan to get all the data for the primary block. Same notes as for subselection: usually /// defined, but can be undefined in some corner cases where nothing is to be done in the @@ -148,7 +153,7 @@ pub struct PrimaryDeferBlock { } /// A deferred block of a `DeferNode`. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct DeferredDeferBlock { /// References one or more fetch node(s) (by `id`) within `DeferNode.primary.node`. The plan of /// this deferred part should not be started until all such fetches return. @@ -160,6 +165,7 @@ pub struct DeferredDeferBlock { pub query_path: Vec, /// The part of the original query that "selects" the data to send in the deferred response /// (once the plan in `node` completes). Will be set _unless_ `node` is a `DeferNode` itself. + #[serde(serialize_with = "crate::display_helpers::serialize_as_debug_string")] pub sub_selection: Option, /// The plan to get all the data for this deferred block. Usually set, but can be `None` for a /// `@defer` application where everything has been fetched in the "primary block" (i.e. when @@ -170,13 +176,13 @@ pub struct DeferredDeferBlock { pub node: Option>, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct DeferredDependency { /// A `FetchNode` ID. pub id: String, } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct ConditionNode { pub condition_variable: Name, pub if_clause: Option>, @@ -187,14 +193,14 @@ pub struct ConditionNode { /// /// A rewrite usually identifies some sub-part of the data and some action to perform on that /// sub-part. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub enum FetchDataRewrite { ValueSetter(FetchDataValueSetter), KeyRenamer(FetchDataKeyRenamer), } /// A rewrite that sets a value at the provided path of the data it is applied to. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct FetchDataValueSetter { /// Path to the value that is set by this "rewrite". pub path: Vec, @@ -204,7 +210,7 @@ pub struct FetchDataValueSetter { } /// A rewrite that renames the key at the provided path of the data it is applied to. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize)] pub struct FetchDataKeyRenamer { /// Path to the key that is renamed by this "rewrite". pub path: Vec, @@ -227,7 +233,7 @@ pub struct FetchDataKeyRenamer { /// Note that the `@` is currently optional in some contexts, as query plan execution may assume /// upon encountering array data in a path that it should match the remaining path to the array's /// elements. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] pub enum FetchDataPathElement { Key(Name), AnyIndex, @@ -236,9 +242,11 @@ pub enum FetchDataPathElement { /// Vectors of this element match a path in a query. Each element is (1) a field in a query, or (2) /// an inline fragment in a query. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, serde::Serialize)] pub enum QueryPathElement { + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] Field(executable::Field), + #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] InlineFragment(executable::InlineFragment), } diff --git a/apollo-federation/src/query_plan/query_planner.rs b/apollo-federation/src/query_plan/query_planner.rs index f0df9fab42..012dd65392 100644 --- a/apollo-federation/src/query_plan/query_planner.rs +++ b/apollo-federation/src/query_plan/query_planner.rs @@ -2,14 +2,13 @@ use std::cell::Cell; use std::num::NonZeroU32; use std::sync::Arc; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::validation::Valid; use apollo_compiler::ExecutableDocument; use apollo_compiler::Name; -use indexmap::IndexMap; -use indexmap::IndexSet; use itertools::Itertools; -use petgraph::csr::NodeIndex; -use petgraph::stable_graph::IndexType; +use serde::Serialize; use crate::error::FederationError; use crate::error::SingleFederationError; @@ -43,6 +42,7 @@ use crate::schema::position::OutputTypeDefinitionPosition; use crate::schema::position::SchemaRootDefinitionKind; use crate::schema::position::TypeDefinitionPosition; use crate::schema::ValidFederationSchema; +use crate::utils::logging::snapshot; use crate::ApiSchemaOptions; use crate::Supergraph; @@ -167,7 +167,7 @@ impl Default for QueryPlannerDebugConfig { } // PORT_NOTE: renamed from PlanningStatistics in the JS codebase. -#[derive(Debug, PartialEq, Default)] +#[derive(Debug, PartialEq, Default, Serialize)] pub struct QueryPlanningStatistics { pub evaluated_plan_count: Cell, } @@ -199,6 +199,10 @@ pub struct QueryPlanner { } impl QueryPlanner { + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(level = "trace", skip_all, name = "QueryPlanner::new") + )] pub fn new( supergraph: &Supergraph, config: QueryPlannerConfig, @@ -311,22 +315,27 @@ impl QueryPlanner { } // PORT_NOTE: this receives an `Operation` object in JS which is a concept that doesn't exist in apollo-rs. + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(level = "trace", skip_all, name = "QueryPlanner::build_query_plan") + )] pub fn build_query_plan( &self, document: &Valid, operation_name: Option, ) -> Result { let operation = document - .get_operation(operation_name.as_ref().map(|name| name.as_str())) + .operations + .get(operation_name.as_ref().map(|name| name.as_str())) // TODO(@goto-bus-stop) this is not an internal error, but a user error .map_err(|_| FederationError::internal("requested operation does not exist"))?; if operation.selection_set.selections.is_empty() { // This should never happen because `operation` comes from a known-valid document. - return Err(SingleFederationError::InvalidGraphQL { - message: "Invalid operation: empty selection set".to_string(), - } - .into()); + // TODO(@goto-bus-stop) it's probably fair to panic here :) + return Err(FederationError::internal( + "Invalid operation: empty selection set", + )); } let is_subscription = operation.is_subscription(); @@ -403,6 +412,16 @@ impl QueryPlanner { return Ok(QueryPlan::default()); } + snapshot!( + "NormalizedOperation", + serde_json_bytes::json!({ + "original": &operation.serialize().to_string(), + "normalized": &normalized_operation.to_string() + }) + .to_string(), + "normalized operation" + ); + let Some(root) = self .federated_query_graph .root_kinds_to_nodes()? @@ -427,7 +446,7 @@ impl QueryPlanner { } else { None }; - let processor = FetchDependencyGraphToQueryPlanProcessor::new( + let mut processor = FetchDependencyGraphToQueryPlanProcessor::new( operation.variables.clone(), rebased_fragments, operation.name.clone(), @@ -437,7 +456,6 @@ impl QueryPlanner { supergraph_schema: self.supergraph_schema.clone(), federated_query_graph: self.federated_query_graph.clone(), operation: Arc::new(normalized_operation), - processor, head: *root, // PORT_NOTE(@goto-bus-stop): In JS, `root` is a `RootVertex`, which is dynamically // checked at various points in query planning. This is our Rust equivalent of that. @@ -455,7 +473,7 @@ impl QueryPlanner { Some(defer_conditions) if !defer_conditions.is_empty() => { compute_plan_for_defer_conditionals(&mut parameters, defer_conditions)? } - _ => compute_plan_internal(&mut parameters, has_defers)?, + _ => compute_plan_internal(&mut parameters, &mut processor, has_defers)?, }; let root_node = match root_node { @@ -499,10 +517,14 @@ impl QueryPlanner { None => None, }; - Ok(QueryPlan { + let plan = QueryPlan { node: root_node, statistics, - }) + }; + + snapshot!(plan, "query plan"); + + Ok(plan) } /// Get Query Planner's API Schema. @@ -560,7 +582,7 @@ fn compute_root_serial_dependency_graph( // } // then we should _not_ merge the 2 `mut1` fields (contrarily to what happens on queried fields). - prev_path = OpPathTree::merge(&prev_path, &new_path); + Arc::make_mut(&mut prev_path).extend(&new_path); fetch_dependency_graph = FetchDependencyGraph::new( supergraph_schema.clone(), federated_query_graph.clone(), @@ -590,16 +612,20 @@ fn compute_root_serial_dependency_graph( Ok(digest) } -fn only_root_subgraph(graph: &FetchDependencyGraph) -> Result { +fn only_root_subgraph(graph: &FetchDependencyGraph) -> Result, FederationError> { let mut iter = graph.root_node_by_subgraph_iter(); - let (Some((_, index)), None) = (iter.next(), iter.next()) else { + let (Some((name, _)), None) = (iter.next(), iter.next()) else { return Err(FederationError::internal(format!( "{graph} should have only one root." ))); }; - Ok(index.index() as u32) + Ok(name.clone()) } +#[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(level = "trace", skip_all, name = "compute_root_fetch_groups") +)] pub(crate) fn compute_root_fetch_groups( root_kind: SchemaRootDefinitionKind, dependency_graph: &mut FetchDependencyGraph, @@ -628,6 +654,7 @@ pub(crate) fn compute_root_fetch_groups( }; let fetch_dependency_node = dependency_graph.get_or_create_root_node(subgraph_name, root_kind, root_type)?; + snapshot!(dependency_graph, "tree_with_root_node"); compute_nodes_for_tree( dependency_graph, &child.tree, @@ -644,8 +671,17 @@ fn compute_root_parallel_dependency_graph( parameters: &QueryPlanningParameters, has_defers: bool, ) -> Result { + snapshot!( + "FetchDependencyGraph", + "Empty", + "Starting process to construct a parallel fetch dependency graph" + ); let selection_set = parameters.operation.selection_set.clone(); let best_plan = compute_root_parallel_best_plan(parameters, selection_set, has_defers)?; + snapshot!( + best_plan.fetch_dependency_graph, + "Plan returned from compute_root_parallel_best_plan" + ); Ok(best_plan.fetch_dependency_graph) } @@ -671,6 +707,7 @@ fn compute_root_parallel_best_plan( fn compute_plan_internal( parameters: &mut QueryPlanningParameters, + processor: &mut FetchDependencyGraphToQueryPlanProcessor, has_defers: bool, ) -> Result, FederationError> { let root_kind = parameters.operation.root_kind; @@ -682,11 +719,9 @@ fn compute_plan_internal( let mut primary_selection = None::; for mut dependency_graph in dependency_graphs { let (local_main, local_deferred) = - dependency_graph.process(&mut parameters.processor, root_kind)?; + dependency_graph.process(&mut *processor, root_kind)?; main = match main { - Some(unlocal_main) => parameters - .processor - .reduce_sequence([Some(unlocal_main), local_main]), + Some(unlocal_main) => processor.reduce_sequence([Some(unlocal_main), local_main]), None => local_main, }; deferred.extend(local_deferred); @@ -704,7 +739,11 @@ fn compute_plan_internal( } else { let mut dependency_graph = compute_root_parallel_dependency_graph(parameters, has_defers)?; - let (main, deferred) = dependency_graph.process(&mut parameters.processor, root_kind)?; + let (main, deferred) = dependency_graph.process(&mut *processor, root_kind)?; + snapshot!( + dependency_graph, + "Plan after calling FetchDependencyGraph::process" + ); // XXX(@goto-bus-stop) Maybe `.defer_tracking` should be on the return value of `process()`..? let primary_selection = dependency_graph.defer_tracking.primary_selection; @@ -717,9 +756,7 @@ fn compute_plan_internal( let Some(primary_selection) = primary_selection else { unreachable!("Should have had a primary selection created"); }; - parameters - .processor - .reduce_defer(main, &primary_selection, deferred) + processor.reduce_defer(main, &primary_selection, deferred) } } diff --git a/apollo-federation/src/query_plan/query_planning_traversal.rs b/apollo-federation/src/query_plan/query_planning_traversal.rs index 07072a94c4..edfdca8fa8 100644 --- a/apollo-federation/src/query_plan/query_planning_traversal.rs +++ b/apollo-federation/src/query_plan/query_planning_traversal.rs @@ -1,8 +1,10 @@ use std::sync::Arc; -use indexmap::IndexSet; +use apollo_compiler::collections::IndexSet; use petgraph::graph::EdgeIndex; use petgraph::graph::NodeIndex; +use serde::Serialize; +use tracing::trace; use crate::error::FederationError; use crate::operation::Operation; @@ -30,7 +32,6 @@ use crate::query_plan::fetch_dependency_graph::compute_nodes_for_tree; use crate::query_plan::fetch_dependency_graph::FetchDependencyGraph; use crate::query_plan::fetch_dependency_graph_processor::FetchDependencyGraphProcessor; use crate::query_plan::fetch_dependency_graph_processor::FetchDependencyGraphToCostProcessor; -use crate::query_plan::fetch_dependency_graph_processor::FetchDependencyGraphToQueryPlanProcessor; use crate::query_plan::generate::generate_all_plans_and_find_best; use crate::query_plan::generate::PlanBuilder; use crate::query_plan::query_planner::compute_root_fetch_groups; @@ -42,6 +43,7 @@ use crate::schema::position::CompositeTypeDefinitionPosition; use crate::schema::position::ObjectTypeDefinitionPosition; use crate::schema::position::SchemaRootDefinitionKind; use crate::schema::ValidFederationSchema; +use crate::utils::logging::snapshot; // PORT_NOTE: Named `PlanningParameters` in the JS codebase, but there was no particular reason to // leave out to the `Query` prefix, so it's been added for consistency. Similar to `GraphPath`, we @@ -56,8 +58,6 @@ pub(crate) struct QueryPlanningParameters<'a> { pub(crate) federated_query_graph: Arc, /// The operation to be query planned. pub(crate) operation: Arc, - /// A processor for converting fetch dependency graphs to query plans. - pub(crate) processor: FetchDependencyGraphToQueryPlanProcessor, /// The query graph node at which query planning begins. pub(crate) head: NodeIndex, /// Whether the head must be a root node for query planning. @@ -106,7 +106,7 @@ pub(crate) struct QueryPlanningTraversal<'a, 'b> { resolver_cache: ConditionResolverCache, } -#[derive(Debug)] +#[derive(Debug, Serialize)] struct OpenBranchAndSelections { /// The options for this open branch. open_branch: OpenBranch, @@ -125,6 +125,7 @@ impl std::fmt::Debug for PlanInfo { } } +#[derive(Serialize)] pub(crate) struct BestQueryPlanInfo { /// The fetch dependency graph for this query plan. pub fetch_dependency_graph: FetchDependencyGraph, @@ -152,6 +153,10 @@ impl BestQueryPlanInfo { } impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(level = "trace", skip_all, name = "QueryPlanningTraversal::new") + )] pub fn new( // TODO(@goto-bus-stop): This probably needs a mutable reference for some of the // yet-unimplemented methods, and storing a mutable ref in `Self` here smells bad. @@ -178,6 +183,10 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { // Many arguments is okay for a private constructor function. #[allow(clippy::too_many_arguments)] + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument(level = "trace", skip_all, name = "QueryPlanningTraversal::new_inner") + )] fn new_inner( parameters: &'a QueryPlanningParameters, selection_set: SelectionSet, @@ -208,6 +217,7 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { parameters.head, ) .unwrap(); + // In JS this is done *inside* create_initial_options, which would require awareness of the // query graph. let tail = parameters @@ -245,11 +255,27 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { // PORT_NOTE: In JS, the traversal is still usable after finding the best plan. Here we consume // the struct so we do not need to return a reference, which is very unergonomic. + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument( + level = "trace", + skip_all, + name = "QueryPlanningTraversal::find_best_plan" + ) + )] pub fn find_best_plan(mut self) -> Result, FederationError> { self.find_best_plan_inner()?; Ok(self.best_plan) } + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument( + level = "trace", + skip_all, + name = "QueryPlanningTraversal::find_best_plan_inner" + ) + )] fn find_best_plan_inner(&mut self) -> Result, FederationError> { while let Some(mut current_branch) = self.open_branches.pop() { let Some(current_selection) = current_branch.selections.pop() else { @@ -260,6 +286,7 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { let (terminate_planning, new_branch) = self.handle_open_branch(¤t_selection, &mut current_branch.open_branch.0)?; if terminate_planning { + trace!("Planning termianted!"); // We clear both open branches and closed ones as a means to terminate the plan // computation with no plan. self.open_branches = vec![]; @@ -279,6 +306,14 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { /// Returns whether to terminate planning immediately, and any new open branches to push onto /// the stack. + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument( + level = "trace", + skip_all, + name = "QueryPlanningTraversal::handle_open_branch" + ) + )] fn handle_open_branch( &mut self, selection: &Selection, @@ -287,6 +322,15 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { let operation_element = selection.element()?; let mut new_options = vec![]; let mut no_followups: bool = false; + + snapshot!(name = "Options", options, "options"); + + snapshot!( + "OperationElement", + operation_element.to_string(), + "operation_element" + ); + for option in options.iter_mut() { let followups_for_option = option.advance_with_operation_element( self.parameters.supergraph_schema.clone(), @@ -316,6 +360,8 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { } } + snapshot!(new_options, "new_options"); + if no_followups { // This operation element is valid from this option, but is guarantee to yield no result // (e.g. it's a type condition with no intersection with a prior type condition). Given @@ -386,8 +432,8 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { }; } - if let Some(selection_set) = selection.selection_set()? { - let mut all_tail_nodes = IndexSet::new(); + if let Some(selection_set) = selection.selection_set() { + let mut all_tail_nodes = IndexSet::default(); for option in &new_options { for path in &option.paths.0 { all_tail_nodes.insert(path.tail); @@ -544,7 +590,21 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { } } + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument( + level = "trace", + skip_all, + name = "QueryPlanningTraversal::compute_best_plan_from_closed_branches" + ) + )] fn compute_best_plan_from_closed_branches(&mut self) -> Result<(), FederationError> { + snapshot!( + name = "ClosedBranches", + self.closed_branches, + "closed_branches" + ); + if self.closed_branches.is_empty() { return Ok(()); } @@ -552,6 +612,12 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { self.sort_options_in_closed_branches()?; self.reduce_options_if_needed(); + snapshot!( + name = "ClosedBranches", + self.closed_branches, + "closed_branches_after_reduce" + ); + // debug log // self.closed_branches // .iter() @@ -578,6 +644,7 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { let (first_group, second_group) = self.closed_branches.split_at(sole_path_branch_index); let initial_tree; + snapshot!("FetchDependencyGraph", "", "Generating initial dep graph"); let mut initial_dependency_graph = self.new_dependency_graph(); let federated_query_graph = &self.parameters.federated_query_graph; let root = &self.parameters.head; @@ -597,6 +664,10 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { &single_choice_branches, )?; self.updated_dependency_graph(&mut initial_dependency_graph, &initial_tree)?; + snapshot!( + initial_dependency_graph, + "Updated dep graph with initial tree" + ); if first_group.is_empty() { // Well, we have the only possible plan; it's also the best. let cost = self.cost(&mut initial_dependency_graph)?; @@ -606,6 +677,9 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { cost, } .into(); + + snapshot!(self.best_plan, "best_plan"); + return Ok(()); } } @@ -642,6 +716,8 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { cost, } .into(); + + snapshot!(self.best_plan, "best_plan"); Ok(()) } @@ -875,6 +951,14 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { ) } + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument( + level = "trace", + skip_all, + name = "QueryPlanningTraversal::updated_dependency_graph" + ) + )] fn updated_dependency_graph( &self, dependency_graph: &mut FetchDependencyGraph, @@ -911,9 +995,19 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { &Default::default(), )?; } + + snapshot!(dependency_graph, "updated_dependency_graph"); Ok(()) } + #[cfg_attr( + feature = "snapshot_tracing", + tracing::instrument( + level = "trace", + skip_all, + name = "QueryPlanningTraversal::resolve_condition_plan" + ) + )] fn resolve_condition_plan( &self, edge: EdgeIndex, @@ -939,7 +1033,6 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { supergraph_schema: self.parameters.supergraph_schema.clone(), federated_query_graph: graph.clone(), operation: self.parameters.operation.clone(), - processor: self.parameters.processor.clone(), abstract_types_with_inconsistent_runtime_types: self .parameters .abstract_types_with_inconsistent_runtime_types diff --git a/apollo-federation/src/schema/field_set.rs b/apollo-federation/src/schema/field_set.rs index bdc9f5d299..077f33c7d5 100644 --- a/apollo-federation/src/schema/field_set.rs +++ b/apollo-federation/src/schema/field_set.rs @@ -1,10 +1,10 @@ +use apollo_compiler::collections::IndexMap; use apollo_compiler::executable; use apollo_compiler::executable::FieldSet; use apollo_compiler::schema::ExtendedType; use apollo_compiler::schema::NamedType; use apollo_compiler::validation::Valid; use apollo_compiler::Schema; -use indexmap::IndexMap; use crate::error::FederationError; use crate::error::MultipleFederationErrors; @@ -64,7 +64,7 @@ pub(crate) fn parse_field_set( )?; // field set should not contain any named fragments - let named_fragments = NamedFragments::new(&IndexMap::new(), schema); + let named_fragments = NamedFragments::new(&IndexMap::default(), schema); let selection_set = SelectionSet::from_selection_set(&field_set.selection_set, &named_fragments, schema)?; diff --git a/apollo-federation/src/schema/mod.rs b/apollo-federation/src/schema/mod.rs index 70f51dd335..14ae55e56b 100644 --- a/apollo-federation/src/schema/mod.rs +++ b/apollo-federation/src/schema/mod.rs @@ -3,11 +3,11 @@ use std::hash::Hasher; use std::ops::Deref; use std::sync::Arc; +use apollo_compiler::collections::IndexSet; use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Schema; -use indexmap::IndexSet; use referencer::Referencers; use crate::error::FederationError; @@ -146,7 +146,7 @@ impl FederationSchema { composite_type_definition_position: CompositeTypeDefinitionPosition, ) -> Result, FederationError> { Ok(match composite_type_definition_position { - CompositeTypeDefinitionPosition::Object(pos) => IndexSet::from([pos]), + CompositeTypeDefinitionPosition::Object(pos) => IndexSet::from_iter([pos]), CompositeTypeDefinitionPosition::Interface(pos) => self .referencers() .get_interface_type(&pos.type_name)? diff --git a/apollo-federation/src/schema/position.rs b/apollo-federation/src/schema/position.rs index 5ede64c640..ad0a81f944 100644 --- a/apollo-federation/src/schema/position.rs +++ b/apollo-federation/src/schema/position.rs @@ -4,6 +4,7 @@ use std::fmt::Formatter; use std::ops::Deref; use apollo_compiler::ast; +use apollo_compiler::collections::IndexSet; use apollo_compiler::name; use apollo_compiler::schema::Component; use apollo_compiler::schema::ComponentName; @@ -23,8 +24,8 @@ use apollo_compiler::schema::UnionType; use apollo_compiler::Name; use apollo_compiler::Node; use apollo_compiler::Schema; -use indexmap::IndexSet; use lazy_static::lazy_static; +use serde::Serialize; use strum::IntoEnumIterator; use crate::error::FederationError; @@ -337,7 +338,7 @@ infallible_conversions!(CompositeTypeDefinitionPosition::{Object, Interface, Uni infallible_conversions!(AbstractTypeDefinitionPosition::{Interface, Union} -> OutputTypeDefinitionPosition); infallible_conversions!(ObjectOrInterfaceTypeDefinitionPosition::{Object, Interface} -> OutputTypeDefinitionPosition); -#[derive(Clone, PartialEq, Eq, Hash, derive_more::From, derive_more::Display)] +#[derive(Clone, PartialEq, Eq, Hash, derive_more::From, derive_more::Display, Serialize)] pub(crate) enum CompositeTypeDefinitionPosition { Object(ObjectTypeDefinitionPosition), Interface(InterfaceTypeDefinitionPosition), @@ -675,7 +676,7 @@ fallible_conversions!(OutputTypeDefinitionPosition::{Object, Interface} -> Objec fallible_conversions!(CompositeTypeDefinitionPosition::{Object, Interface} -> ObjectOrInterfaceTypeDefinitionPosition); fallible_conversions!(AbstractTypeDefinitionPosition::{Interface} -> ObjectOrInterfaceTypeDefinitionPosition); -#[derive(Clone, PartialEq, Eq, Hash, derive_more::From, derive_more::Display)] +#[derive(Clone, PartialEq, Eq, Hash, derive_more::From, derive_more::Display, Serialize)] pub(crate) enum FieldDefinitionPosition { Object(ObjectFieldDefinitionPosition), Interface(InterfaceFieldDefinitionPosition), @@ -992,7 +993,15 @@ impl SchemaDefinitionPosition { } #[derive( - Debug, Copy, Clone, PartialEq, Eq, Hash, strum_macros::Display, strum_macros::EnumIter, + Debug, + Copy, + Clone, + PartialEq, + Eq, + Hash, + strum_macros::Display, + strum_macros::EnumIter, + Serialize, )] pub(crate) enum SchemaRootDefinitionKind { #[strum(to_string = "query")] @@ -1532,7 +1541,7 @@ impl Display for ScalarTypeDefinitionPosition { } } -#[derive(Clone, PartialEq, Eq, Hash)] +#[derive(Clone, PartialEq, Eq, Hash, Serialize)] pub(crate) struct ObjectTypeDefinitionPosition { pub(crate) type_name: Name, } @@ -2035,7 +2044,7 @@ impl Debug for ObjectTypeDefinitionPosition { } } -#[derive(Clone, PartialEq, Eq, Hash)] +#[derive(Clone, PartialEq, Eq, Hash, Serialize)] pub(crate) struct ObjectFieldDefinitionPosition { pub(crate) type_name: Name, pub(crate) field_name: Name, @@ -2713,7 +2722,7 @@ impl Debug for ObjectFieldArgumentDefinitionPosition { } } -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] pub(crate) struct InterfaceTypeDefinitionPosition { pub(crate) type_name: Name, } @@ -3138,7 +3147,7 @@ impl Display for InterfaceTypeDefinitionPosition { } } -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] pub(crate) struct InterfaceFieldDefinitionPosition { pub(crate) type_name: Name, pub(crate) field_name: Name, @@ -3819,7 +3828,7 @@ impl Display for InterfaceFieldArgumentDefinitionPosition { } } -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] pub(crate) struct UnionTypeDefinitionPosition { pub(crate) type_name: Name, } @@ -4182,7 +4191,7 @@ impl Display for UnionTypeDefinitionPosition { } } -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] pub(crate) struct UnionTypenameFieldDefinitionPosition { pub(crate) type_name: Name, } @@ -5981,7 +5990,7 @@ pub(crate) fn is_graphql_reserved_name(name: &str) -> bool { lazy_static! { static ref GRAPHQL_BUILTIN_SCALAR_NAMES: IndexSet = { - IndexSet::from([ + IndexSet::from_iter([ name!("Int"), name!("Float"), name!("String"), @@ -5990,7 +5999,7 @@ lazy_static! { ]) }; static ref GRAPHQL_BUILTIN_DIRECTIVE_NAMES: IndexSet = { - IndexSet::from([ + IndexSet::from_iter([ name!("include"), name!("skip"), name!("deprecated"), @@ -6067,10 +6076,12 @@ fn validate_arguments(arguments: &[Node]) -> Result<(), Fe impl FederationSchema { /// Note that the input schema must be partially valid, in that: + /// /// 1. All schema element references must point to an existing schema element of the appropriate /// kind (e.g. object type fields must return an existing output type). /// 2. If the schema uses the core/link spec, then usages of the @core/@link directive must be /// valid. + /// /// The input schema may be otherwise invalid GraphQL (e.g. it may not contain a Query type). If /// you want a ValidFederationSchema, use ValidFederationSchema::new() instead. pub(crate) fn new(schema: Schema) -> Result { diff --git a/apollo-federation/src/schema/referencer.rs b/apollo-federation/src/schema/referencer.rs index eee5c395ba..e4cf9973c3 100644 --- a/apollo-federation/src/schema/referencer.rs +++ b/apollo-federation/src/schema/referencer.rs @@ -1,6 +1,6 @@ +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::Name; -use indexmap::IndexMap; -use indexmap::IndexSet; use crate::error::FederationError; use crate::error::SingleFederationError; diff --git a/apollo-federation/src/schema/subgraph_metadata.rs b/apollo-federation/src/schema/subgraph_metadata.rs index a0352cac48..936b0b6e9c 100644 --- a/apollo-federation/src/schema/subgraph_metadata.rs +++ b/apollo-federation/src/schema/subgraph_metadata.rs @@ -1,6 +1,6 @@ +use apollo_compiler::collections::IndexSet; use apollo_compiler::validation::Valid; use apollo_compiler::Schema; -use indexmap::IndexSet; use crate::error::FederationError; use crate::link::federation_spec_definition::FederationSpecDefinition; @@ -120,7 +120,7 @@ impl ExternalMetadata { .referencers .get_directive(&external_directive_definition.name)?; - let mut external_fields = IndexSet::new(); + let mut external_fields = IndexSet::default(); external_fields.extend( external_directive_referencers @@ -143,7 +143,7 @@ impl ExternalMetadata { federation_spec_definition: &'static FederationSpecDefinition, schema: &Valid, ) -> Result, FederationError> { - let mut fake_external_fields = IndexSet::new(); + let mut fake_external_fields = IndexSet::default(); let extends_directive_definition = federation_spec_definition.extends_directive_definition(schema)?; let key_directive_definition = @@ -192,7 +192,7 @@ impl ExternalMetadata { federation_spec_definition: &'static FederationSpecDefinition, schema: &Valid, ) -> Result, FederationError> { - let mut provided_fields = IndexSet::new(); + let mut provided_fields = IndexSet::default(); let provides_directive_definition = federation_spec_definition.provides_directive_definition(schema)?; let provides_directive_referencers = schema @@ -241,7 +241,7 @@ impl ExternalMetadata { .referencers .get_directive(&external_directive_definition.name)?; - let mut fields_on_external_types = IndexSet::new(); + let mut fields_on_external_types = IndexSet::default(); for object_type_position in &external_directive_referencers.object_types { let object_type = object_type_position.get(schema.schema())?; // PORT_NOTE: The JS codebase does not differentiate fields at a definition/extension @@ -256,15 +256,12 @@ impl ExternalMetadata { Ok(fields_on_external_types) } - pub(crate) fn is_external( - &self, - field_definition_position: &FieldDefinitionPosition, - ) -> Result { - Ok((self.external_fields.contains(field_definition_position) + pub(crate) fn is_external(&self, field_definition_position: &FieldDefinitionPosition) -> bool { + (self.external_fields.contains(field_definition_position) || self .fields_on_external_types .contains(field_definition_position)) - && !self.is_fake_external(field_definition_position)) + && !self.is_fake_external(field_definition_position) } pub(crate) fn is_fake_external( @@ -275,38 +272,35 @@ impl ExternalMetadata { .contains(field_definition_position) } - pub(crate) fn selects_any_external_field( - &self, - selection_set: &SelectionSet, - ) -> Result { + pub(crate) fn selects_any_external_field(&self, selection_set: &SelectionSet) -> bool { for selection in selection_set.selections.values() { if let Selection::Field(field_selection) = selection { - if self.is_external(&field_selection.field.field_position)? { - return Ok(true); + if self.is_external(&field_selection.field.field_position) { + return true; } } - if let Some(selection_set) = selection.selection_set()? { - if self.selects_any_external_field(selection_set)? { - return Ok(true); + if let Some(selection_set) = selection.selection_set() { + if self.selects_any_external_field(selection_set) { + return true; } } } - Ok(false) + false } pub(crate) fn is_partially_external( &self, field_definition_position: &FieldDefinitionPosition, - ) -> Result { - Ok(self.is_external(field_definition_position)? - && self.provided_fields.contains(field_definition_position)) + ) -> bool { + self.is_external(field_definition_position) + && self.provided_fields.contains(field_definition_position) } pub(crate) fn is_fully_external( &self, field_definition_position: &FieldDefinitionPosition, - ) -> Result { - Ok(self.is_external(field_definition_position)? - && !self.provided_fields.contains(field_definition_position)) + ) -> bool { + self.is_external(field_definition_position) + && !self.provided_fields.contains(field_definition_position) } } diff --git a/apollo-federation/src/schema/type_and_directive_specification.rs b/apollo-federation/src/schema/type_and_directive_specification.rs index 397e713aed..e1078b0415 100644 --- a/apollo-federation/src/schema/type_and_directive_specification.rs +++ b/apollo-federation/src/schema/type_and_directive_specification.rs @@ -1,6 +1,8 @@ use apollo_compiler::ast::DirectiveLocation; use apollo_compiler::ast::FieldDefinition; use apollo_compiler::ast::Value; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::schema::Component; use apollo_compiler::schema::ComponentName; use apollo_compiler::schema::DirectiveDefinition; @@ -14,8 +16,6 @@ use apollo_compiler::schema::Type; use apollo_compiler::schema::UnionType; use apollo_compiler::Name; use apollo_compiler::Node; -use indexmap::IndexMap; -use indexmap::IndexSet; use crate::error::FederationError; use crate::error::MultipleFederationErrors; @@ -147,7 +147,7 @@ impl TypeAndDirectiveSpecification for ObjectTypeSpecification { return MultipleFederationErrors::from_iter(errors).into_result(); } - let mut field_map = IndexMap::new(); + let mut field_map = IndexMap::default(); for ref field_spec in field_specs { let field_def: FieldDefinition = field_spec.into(); field_map.insert(field_spec.name.clone(), Component::new(field_def)); diff --git a/apollo-federation/src/snapshots/apollo_federation__merge__tests__inaccessible.snap b/apollo-federation/src/snapshots/apollo_federation__merge__tests__inaccessible.snap new file mode 100644 index 0000000000..29501545a7 --- /dev/null +++ b/apollo-federation/src/snapshots/apollo_federation__merge__tests__inaccessible.snap @@ -0,0 +1,78 @@ +--- +source: apollo-federation/src/merge.rs +expression: schema.serialize() +--- +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) { + query: Query +} + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on ENUM | INPUT_OBJECT | INTERFACE | OBJECT | SCALAR | UNION + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, overrideLabel: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on INTERFACE | OBJECT + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +enum link__Purpose { + """ + SECURITY features provide metadata necessary to securely resolve fields. + """ + SECURITY + """EXECUTION features provide metadata necessary for operation execution.""" + EXECUTION +} + +scalar link__Import + +scalar join__FieldSet + +enum join__Graph { + INACCESSIBLE @join__graph(name: "inaccessible", url: "") + INACCESSIBLE_2 @join__graph(name: "inaccessible_2", url: "") +} + +type Query @join__type(graph: INACCESSIBLE) @join__type(graph: INACCESSIBLE_2) { + a( + input: Input @inaccessible, + ): A @join__field(graph: INACCESSIBLE) + b: B @inaccessible @join__field(graph: INACCESSIBLE) + as: [A] @inaccessible @join__field(graph: INACCESSIBLE_2) +} + +type A @join__type(graph: INACCESSIBLE, key: "id") @join__type(graph: INACCESSIBLE_2, key: "id") { + id: ID! @join__field(graph: INACCESSIBLE) @join__field(graph: INACCESSIBLE_2) + c: Int @inaccessible @join__field(graph: INACCESSIBLE) @join__field(graph: INACCESSIBLE_2) + d: Enum @inaccessible @join__field(graph: INACCESSIBLE) +} + +type B implements Interface @join__type(graph: INACCESSIBLE) @inaccessible @join__implements(graph: INACCESSIBLE, interface: "Interface") { + b: Scalar @join__field(graph: INACCESSIBLE) +} + +enum Enum @join__type(graph: INACCESSIBLE) @inaccessible { + A @join__enumValue(graph: INACCESSIBLE) + B @join__enumValue(graph: INACCESSIBLE) + C @inaccessible @join__enumValue(graph: INACCESSIBLE) +} + +input Input @join__type(graph: INACCESSIBLE) @inaccessible { + a: Int @inaccessible + b: String +} + +scalar Scalar @join__type(graph: INACCESSIBLE) @inaccessible + +interface Interface @join__type(graph: INACCESSIBLE) @inaccessible { + b: Scalar +} + +union Union @join__type(graph: INACCESSIBLE) @inaccessible @join__unionMember(graph: INACCESSIBLE, member: "A") @join__unionMember(graph: INACCESSIBLE, member: "B") = A | B diff --git a/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap b/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap index c2efe7b3f4..8ace8b2b86 100644 --- a/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap +++ b/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap @@ -2,7 +2,7 @@ source: apollo-federation/src/merge.rs expression: schema.serialize() --- -schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { +schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) { query: Query } @@ -20,6 +20,8 @@ directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE +directive @inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + enum link__Purpose { """ SECURITY features provide metadata necessary to securely resolve fields. @@ -51,5 +53,5 @@ type User @join__type(graph: CONNECTOR_QUERY_USER_0, key: "id") @join__type(grap type Query @join__type(graph: CONNECTOR_QUERY_USER_0) @join__type(graph: CONNECTOR_QUERY_USERS_0) @join__type(graph: CONNECTOR_USER_D_1) @join__type(graph: GRAPHQL) { user(id: ID!): User @join__field(graph: CONNECTOR_QUERY_USER_0) users: [User] @join__field(graph: CONNECTOR_QUERY_USERS_0) - _: ID @join__field(graph: CONNECTOR_USER_D_1) + _: ID @inaccessible @join__field(graph: CONNECTOR_USER_D_1) } diff --git a/apollo-federation/src/sources/connect/expand/merge/inaccessible.graphql b/apollo-federation/src/sources/connect/expand/merge/inaccessible.graphql new file mode 100644 index 0000000000..6d088605e7 --- /dev/null +++ b/apollo-federation/src/sources/connect/expand/merge/inaccessible.graphql @@ -0,0 +1,102 @@ +schema { + query: Query +} + +extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/federation/v2.5") + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +directive @federation__key( + fields: federation__FieldSet! + resolvable: Boolean = true +) repeatable on OBJECT | INTERFACE + +directive @federation__requires( + fields: federation__FieldSet! +) on FIELD_DEFINITION + +directive @federation__provides( + fields: federation__FieldSet! +) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag( + name: String! +) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes( + scopes: [[federation__Scope!]!]! +) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +type Query { + a(input: Input @federation__inaccessible): A + b: B @federation__inaccessible +} + +type A @federation__key(fields: "id") { + id: ID! + c: Int @federation__inaccessible + d: Enum @federation__inaccessible +} + +type B implements Interface @federation__inaccessible { + b: Scalar +} + +enum Enum @federation__inaccessible { + A + B + C @federation__inaccessible +} + +input Input @federation__inaccessible { + a: Int @federation__inaccessible + b: String +} + +scalar Scalar @federation__inaccessible + +interface Interface @federation__inaccessible { + b: Scalar @federation__inaccessible +} + +union Union @federation__inaccessible = A | B diff --git a/apollo-federation/src/sources/connect/expand/merge/inaccessible_2.graphql b/apollo-federation/src/sources/connect/expand/merge/inaccessible_2.graphql new file mode 100644 index 0000000000..2f8641a81b --- /dev/null +++ b/apollo-federation/src/sources/connect/expand/merge/inaccessible_2.graphql @@ -0,0 +1,77 @@ +schema { + query: Query +} + +extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/federation/v2.5") + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +directive @federation__key( + fields: federation__FieldSet! + resolvable: Boolean = true +) repeatable on OBJECT | INTERFACE + +directive @federation__requires( + fields: federation__FieldSet! +) on FIELD_DEFINITION + +directive @federation__provides( + fields: federation__FieldSet! +) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag( + name: String! +) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes( + scopes: [[federation__Scope!]!]! +) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +type Query { + as: [A] @federation__inaccessible +} + +type A @federation__key(fields: "id") { + id: ID! + c: Int @federation__inaccessible +} diff --git a/apollo-federation/src/sources/connect/json_selection/apply_to.rs b/apollo-federation/src/sources/connect/json_selection/apply_to.rs index 80bf5e1a77..08cb5d0fcb 100644 --- a/apollo-federation/src/sources/connect/json_selection/apply_to.rs +++ b/apollo-federation/src/sources/connect/json_selection/apply_to.rs @@ -3,8 +3,8 @@ use std::hash::Hash; use std::hash::Hasher; -use indexmap::IndexMap; -use indexmap::IndexSet; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use itertools::Itertools; use serde_json_bytes::json; use serde_json_bytes::Map; @@ -20,7 +20,7 @@ pub trait ApplyTo { // explicitly support), which are distinct from null values (which it does // support). fn apply_to(&self, data: &JSON) -> (Option, Vec) { - self.apply_with_vars(data, &IndexMap::new()) + self.apply_with_vars(data, &IndexMap::default()) } fn apply_with_vars( @@ -30,7 +30,7 @@ pub trait ApplyTo { ) -> (Option, Vec) { let mut input_path = vec![]; // Using IndexSet over HashSet to preserve the order of the errors. - let mut errors = IndexSet::new(); + let mut errors = IndexSet::default(); let value = self.apply_to_path(data, vars, &mut input_path, &mut errors); (value, errors.into_iter().collect()) } @@ -343,7 +343,7 @@ impl ApplyTo for SubSelection { }; let mut output = Map::new(); - let mut input_names = IndexSet::new(); + let mut input_names = IndexSet::default(); for named_selection in &self.selections { let value = named_selection.apply_to_path(data, vars, input_path, errors); @@ -1184,7 +1184,7 @@ mod tests { ), ); - let mut vars = IndexMap::new(); + let mut vars = IndexMap::default(); vars.insert("$args".to_string(), json!({ "id": "id from args" })); assert_eq!( selection!("id: $args.id name").apply_with_vars(&data, &vars), @@ -1208,7 +1208,7 @@ mod tests { }))], ), ); - let mut vars_without_args_id = IndexMap::new(); + let mut vars_without_args_id = IndexMap::default(); vars_without_args_id.insert("$args".to_string(), json!({ "unused": "ignored" })); assert_eq!( selection!("id: $args.id name").apply_with_vars(&data, &vars_without_args_id), diff --git a/apollo-federation/src/sources/connect/url_path_template.rs b/apollo-federation/src/sources/connect/url_path_template.rs index 86f390e3c7..dda02f89cb 100644 --- a/apollo-federation/src/sources/connect/url_path_template.rs +++ b/apollo-federation/src/sources/connect/url_path_template.rs @@ -1,7 +1,7 @@ use std::collections::HashSet; use std::fmt::Display; -use indexmap::IndexMap; +use apollo_compiler::collections::IndexMap; use itertools::Itertools; use nom::branch::alt; use nom::bytes::complete::tag; @@ -80,7 +80,7 @@ impl URLPathTemplate { } } - let mut query = IndexMap::new(); + let mut query = IndexMap::default(); if let Some(query_suffix) = query_suffix { for query_part in query_suffix.split('&') { diff --git a/apollo-federation/src/subgraph/mod.rs b/apollo-federation/src/subgraph/mod.rs index 9f3cc35ad0..959d731ec3 100644 --- a/apollo-federation/src/subgraph/mod.rs +++ b/apollo-federation/src/subgraph/mod.rs @@ -2,6 +2,8 @@ use std::collections::BTreeMap; use std::fmt::Formatter; use std::sync::Arc; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::name; use apollo_compiler::schema::ComponentName; use apollo_compiler::schema::ExtendedType; @@ -10,8 +12,6 @@ use apollo_compiler::validation::Valid; use apollo_compiler::Node; use apollo_compiler::Schema; use indexmap::map::Entry; -use indexmap::IndexMap; -use indexmap::IndexSet; use crate::error::FederationError; use crate::link::spec::Identity; @@ -235,8 +235,8 @@ impl Subgraph { description: None, name: query_type_name.name.clone(), directives: Default::default(), - fields: IndexMap::new(), - implements_interfaces: IndexSet::new(), + fields: IndexMap::default(), + implements_interfaces: IndexSet::default(), }))) { let query_type = query_type.make_mut(); diff --git a/apollo-federation/src/subgraph/spec.rs b/apollo-federation/src/subgraph/spec.rs index 75dc226a4b..9c3aa1ebd7 100644 --- a/apollo-federation/src/subgraph/spec.rs +++ b/apollo-federation/src/subgraph/spec.rs @@ -9,6 +9,8 @@ use apollo_compiler::ast::FieldDefinition; use apollo_compiler::ast::InputValueDefinition; use apollo_compiler::ast::Type; use apollo_compiler::ast::Value; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::name; use apollo_compiler::schema::Component; use apollo_compiler::schema::ComponentName; @@ -21,8 +23,6 @@ use apollo_compiler::ty; use apollo_compiler::InvalidNameError; use apollo_compiler::Name; use apollo_compiler::Node; -use indexmap::IndexMap; -use indexmap::IndexSet; use lazy_static::lazy_static; use thiserror::Error; @@ -98,7 +98,7 @@ enum FederationDirectiveName { lazy_static! { static ref FEDERATION_DIRECTIVE_NAMES_TO_ENUM: IndexMap = { - IndexMap::from([ + IndexMap::from_iter([ (COMPOSE_DIRECTIVE_NAME, FederationDirectiveName::Compose), (KEY_DIRECTIVE_NAME, FederationDirectiveName::Key), (EXTENDS_DIRECTIVE_NAME, FederationDirectiveName::Extends), @@ -135,13 +135,13 @@ pub enum FederationSpecError { }, #[error("Unsupported federation directive import {0}")] UnsupportedFederationDirective(String), - #[error("Invalid GraphQL name {0}")] - InvalidGraphQLName(String), + #[error(transparent)] + InvalidGraphQLName(InvalidNameError), } impl From for FederationSpecError { fn from(err: InvalidNameError) -> Self { - FederationSpecError::InvalidGraphQLName(format!("Invalid GraphQL name \"{}\"", err.name)) + FederationSpecError::InvalidGraphQLName(err) } } @@ -554,8 +554,8 @@ impl FederationSpecDefinitions { description: None, name: SERVICE_TYPE, directives: Default::default(), - fields: IndexMap::new(), - implements_interfaces: IndexSet::new(), + fields: IndexMap::default(), + implements_interfaces: IndexSet::default(), }; service_type.fields.insert( name!("_sdl"), @@ -648,7 +648,8 @@ impl LinkSpecDefinitions { .into(), ), ] - .into(), + .into_iter() + .collect(), } } diff --git a/apollo-federation/src/utils/logging.rs b/apollo-federation/src/utils/logging.rs new file mode 100644 index 0000000000..fd7bb4b3d2 --- /dev/null +++ b/apollo-federation/src/utils/logging.rs @@ -0,0 +1,52 @@ +/// This macro is a wrapper around `tracing::trace!` and should not be confused with our snapshot +/// testing. This primary goal of this macro is to add the necessary context to logging statements +/// so that external tools (like the snapshot log visualizer) can show how various key data +/// structures evolve over the course of planning a query. +/// +/// There are two ways of creating a snapshot. The easiest is by passing the macro a indentifier +/// for the value you'd like to take a snapshot of. This will tag the snapshot type with the type +/// name of the value, create data that is JSON string using serde_json, and add the message +/// literal that you pass in. EX: +/// ```no_test +/// snapshot!(dependency_graph, "updated dependency graph"); +/// // Generates: +/// // trace!(snapshot = "FetchDependencyGraph", data = "{ .. }", "updated dependency graph"); +/// ``` +/// If you do not want to serialize the data, you can pass the name tag for the snapshot and data +/// in directly. Note that the data needs to implement the tracing crate's `Value` trait. Ideally, +/// this is a string representation of the data you're snapshotting. EX: +/// ```no_test +/// snapshot!("FetchDependencyGraph", dependency_graph.to_string(), "updated dependency graph"); +/// // Generates: +/// // trace!(snapshot = "FetchDependencyGraph", data = dependency_graph.to_string(), "updated dependency graph"); +/// ``` +macro_rules! snapshot { + ($value:expr, $msg:literal) => { + #[cfg(feature = "snapshot_tracing")] + tracing::trace!( + snapshot = std::any::type_name_of_val(&$value), + data = serde_json::to_string(&$value).expect(concat!( + "Could not serialize value for a snapshot with message: ", + $msg + )), + $msg + ); + }; + (name = $name:literal, $value:expr, $msg:literal) => { + #[cfg(feature = "snapshot_tracing")] + tracing::trace!( + snapshot = std::any::type_name_of_val(&$value), + data = serde_json::to_string(&$value).expect(concat!( + "Could not serialize value for a snapshot with message: ", + $msg + )), + $msg + ); + }; + ($name:literal, $value:expr, $msg:literal) => { + #[cfg(feature = "snapshot_tracing")] + tracing::trace!(snapshot = $name, data = $value, $msg); + }; +} + +pub(crate) use snapshot; diff --git a/apollo-federation/src/utils/mod.rs b/apollo-federation/src/utils/mod.rs new file mode 100644 index 0000000000..31348d2fda --- /dev/null +++ b/apollo-federation/src/utils/mod.rs @@ -0,0 +1 @@ +pub mod logging; diff --git a/apollo-federation/tests/api_schema.rs b/apollo-federation/tests/api_schema.rs index 57d6961cfb..9b039ef3f8 100644 --- a/apollo-federation/tests/api_schema.rs +++ b/apollo-federation/tests/api_schema.rs @@ -2038,8 +2038,13 @@ fn inaccessible_on_builtins() { // Note this is different from the JS implementation insta::assert_snapshot!(errors, @r###" - The following errors occurred: - - built-in scalar definitions must be omitted + Error: built-in scalar definitions must be omitted + ╭─[schema.graphql:26:7] + │ + 26 │ scalar String @inaccessible + │ ─────────────┬───────────── + │ ╰─────────────── remove this scalar definition + ────╯ "###); } diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests.rs b/apollo-federation/tests/query_plan/build_query_plan_tests.rs index cb2e31a52d..e849bf74c2 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests.rs @@ -41,6 +41,7 @@ mod interface_object; mod interface_type_explosion; mod introspection_typename_handling; mod merged_abstract_types_handling; +mod mutations; mod named_fragments; mod named_fragments_preservation; mod provides; diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/fetch_operation_names.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/fetch_operation_names.rs index 5ef9d53de5..a18565aed0 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/fetch_operation_names.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/fetch_operation_names.rs @@ -265,7 +265,7 @@ fn correctly_handle_case_where_there_is_too_many_plans_to_consider() { assert_eq!(fetch.subgraph_name.as_ref(), "S1"); assert!(fetch.requires.is_none()); assert!(fetch.operation_document.fragments.is_empty()); - let mut operations = fetch.operation_document.all_operations(); + let mut operations = fetch.operation_document.operations.iter(); let operation = operations.next().unwrap(); assert!(operations.next().is_none()); // operation is essentially: diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/interface_type_explosion.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/interface_type_explosion.rs index 9707e396e2..22a12f1615 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/interface_type_explosion.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/interface_type_explosion.rs @@ -5,16 +5,16 @@ fn handles_non_matching_value_types_under_interface_field() { type Query { i: I } - + interface I { s: S } - + type T implements I @key(fields: "id") { id: ID! s: S @shareable } - + type S @shareable { x: Int } @@ -24,7 +24,7 @@ fn handles_non_matching_value_types_under_interface_field() { id: ID! s: S @shareable } - + type S @shareable { x: Int y: Int @@ -90,16 +90,16 @@ fn skip_type_explosion_early_if_unnecessary() { type Query { i: I } - + interface I { s: S } - + type T implements I @key(fields: "id") { id: ID! s: S @shareable } - + type S @shareable { x: Int y: Int @@ -110,7 +110,7 @@ fn skip_type_explosion_early_if_unnecessary() { id: ID! s: S @shareable } - + type S @shareable { x: Int y: Int diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/mutations.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/mutations.rs new file mode 100644 index 0000000000..6b20362aca --- /dev/null +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/mutations.rs @@ -0,0 +1,135 @@ +const SUBGRAPH_A: &str = r#" + type Foo @key(fields: "id") { + id: ID! + bar: String + } + + type Query { + foo: Foo + } + + type Mutation { + updateFooInA: Foo + } +"#; + +const SUBGRAPH_B: &str = r#" + type Mutation { + updateFooInB: Foo + } + + type Foo @key(fields: "id") { + id: ID! + baz: Int + } +"#; + +#[test] +fn adjacent_mutations_get_merged() { + let planner = planner!( + SubgraphA: SUBGRAPH_A, + SubgraphB: SUBGRAPH_B, + ); + assert_plan!( + &planner, + r#" + mutation TestMutation { + updateInAOne: updateFooInA { + id + bar + } + updateInATwo: updateFooInA { + id + bar + } + updateInBOne: updateFooInB { + id + baz + } + } + "#, + @r###" + QueryPlan { + Sequence { + Fetch(service: "SubgraphA") { + { + updateInAOne: updateFooInA { + id + bar + } + updateInATwo: updateFooInA { + id + bar + } + } + }, + Fetch(service: "SubgraphB") { + { + updateInBOne: updateFooInB { + id + baz + } + } + }, + }, + } + "### + ); +} + +#[test] +fn non_adjacent_mutations_do_not_get_merged() { + let planner = planner!( + SubgraphA: SUBGRAPH_A, + SubgraphB: SUBGRAPH_B, + ); + assert_plan!( + &planner, + r#" + mutation TestMutation { + updateInAOne: updateFooInA { + id + bar + } + updateInBOne: updateFooInB { + id + baz + } + updateInATwo: updateFooInA { + id + bar + } + } + "#, + @r###" + QueryPlan { + Sequence { + Fetch(service: "SubgraphA") { + { + updateInAOne: updateFooInA { + id + bar + } + } + }, + Fetch(service: "SubgraphB") { + { + updateInBOne: updateFooInB { + id + baz + } + } + }, + Fetch(service: "SubgraphA") { + { + updateInATwo: updateFooInA { + id + bar + } + } + }, + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/supergraphs/adjacent_mutations_get_merged.graphql b/apollo-federation/tests/query_plan/supergraphs/adjacent_mutations_get_merged.graphql new file mode 100644 index 0000000000..e99930397b --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/adjacent_mutations_get_merged.graphql @@ -0,0 +1,67 @@ +# Composed from subgraphs with hash: 2655e7da6754e73955fece01d7cbb5f21085bdbb +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query + mutation: Mutation +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +type Foo + @join__type(graph: SUBGRAPHA, key: "id") + @join__type(graph: SUBGRAPHB, key: "id") +{ + id: ID! + bar: String @join__field(graph: SUBGRAPHA) + baz: Int @join__field(graph: SUBGRAPHB) +} + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPHA @join__graph(name: "SubgraphA", url: "none") + SUBGRAPHB @join__graph(name: "SubgraphB", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Mutation + @join__type(graph: SUBGRAPHA) + @join__type(graph: SUBGRAPHB) +{ + updateFooInA: Foo @join__field(graph: SUBGRAPHA) + updateFooInB: Foo @join__field(graph: SUBGRAPHB) +} + +type Query + @join__type(graph: SUBGRAPHA) + @join__type(graph: SUBGRAPHB) +{ + foo: Foo @join__field(graph: SUBGRAPHA) +} diff --git a/apollo-federation/tests/query_plan/supergraphs/handles_non_matching_value_types_under_interface_field.graphql b/apollo-federation/tests/query_plan/supergraphs/handles_non_matching_value_types_under_interface_field.graphql index e0031f8458..27487542e0 100644 --- a/apollo-federation/tests/query_plan/supergraphs/handles_non_matching_value_types_under_interface_field.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/handles_non_matching_value_types_under_interface_field.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: ea9d061045c5d69c7ac50bff2b63f8b35e9494a5 +# Composed from subgraphs with hash: 688f2dad6c47c75df28f6cdd47bb6cc242311192 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/non_adjacent_mutations_do_not_get_merged.graphql b/apollo-federation/tests/query_plan/supergraphs/non_adjacent_mutations_do_not_get_merged.graphql new file mode 100644 index 0000000000..e99930397b --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/non_adjacent_mutations_do_not_get_merged.graphql @@ -0,0 +1,67 @@ +# Composed from subgraphs with hash: 2655e7da6754e73955fece01d7cbb5f21085bdbb +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query + mutation: Mutation +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +type Foo + @join__type(graph: SUBGRAPHA, key: "id") + @join__type(graph: SUBGRAPHB, key: "id") +{ + id: ID! + bar: String @join__field(graph: SUBGRAPHA) + baz: Int @join__field(graph: SUBGRAPHB) +} + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPHA @join__graph(name: "SubgraphA", url: "none") + SUBGRAPHB @join__graph(name: "SubgraphB", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Mutation + @join__type(graph: SUBGRAPHA) + @join__type(graph: SUBGRAPHB) +{ + updateFooInA: Foo @join__field(graph: SUBGRAPHA) + updateFooInB: Foo @join__field(graph: SUBGRAPHB) +} + +type Query + @join__type(graph: SUBGRAPHA) + @join__type(graph: SUBGRAPHB) +{ + foo: Foo @join__field(graph: SUBGRAPHA) +} diff --git a/apollo-federation/tests/query_plan/supergraphs/skip_type_explosion_early_if_unnecessary.graphql b/apollo-federation/tests/query_plan/supergraphs/skip_type_explosion_early_if_unnecessary.graphql index 4b24332cd3..2626014b39 100644 --- a/apollo-federation/tests/query_plan/supergraphs/skip_type_explosion_early_if_unnecessary.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/skip_type_explosion_early_if_unnecessary.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: b04cc15c476a83174e7ed381948ac73fdb2c4ea3 +# Composed from subgraphs with hash: 92a7f0921bb3da20d67bc49b77d12eecef437e91 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index c2acf8a5c8..cc1e73902a 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.51.0" +version = "1.52.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index aa62f1b73d..7bd39a628b 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.51.0" +version = "1.52.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.template.toml b/apollo-router-scaffold/templates/base/Cargo.template.toml index 4733b6c87d..21f679e602 100644 --- a/apollo-router-scaffold/templates/base/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/Cargo.template.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.51.0" +apollo-router = "1.52.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml index d5c86be60b..5194c11c10 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.51.0" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.52.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index c0875539eb..26618a16a6 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.51.0" +version = "1.52.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" @@ -8,7 +8,7 @@ description = "A configurable, high-performance routing runtime for Apollo Feder license = "Elastic-2.0" # renovate-automation: rustc version -rust-version = "1.72.0" +rust-version = "1.76.0" edition = "2021" build = "build/main.rs" @@ -68,7 +68,7 @@ askama = "0.12.1" access-json = "0.1.0" anyhow = "1.0.86" apollo-compiler.workspace = true -apollo-federation = { path = "../apollo-federation", version = "=1.51.0" } +apollo-federation = { path = "../apollo-federation", version = "=1.52.0" } arc-swap = "1.6.0" async-channel = "1.9.0" async-compression = { version = "0.4.6", features = [ @@ -156,7 +156,11 @@ opentelemetry_sdk = { version = "0.20.0", default-features = false, features = [ ] } opentelemetry_api = "0.20.0" opentelemetry-aws = "0.8.0" -opentelemetry-datadog = { version = "0.8.0", features = ["reqwest-client"] } +# START TEMP DATADOG Temporarily remove until we upgrade otel to the latest version +# This means including the rmp library +# opentelemetry-datadog = { version = "0.8.0", features = ["reqwest-client"] } +rmp = "0.8" +# END TEMP DATADOG opentelemetry-http = "0.9.0" opentelemetry-jaeger = { version = "0.19.0", features = [ "collector_client", @@ -171,7 +175,7 @@ opentelemetry-otlp = { version = "0.13.0", default-features = false, features = "http-proto", "metrics", "reqwest-client", - "trace" + "trace", ] } opentelemetry-semantic-conventions = "0.12.0" opentelemetry-zipkin = { version = "0.18.0", default-features = false, features = [ @@ -186,7 +190,7 @@ prost = "0.12.6" prost-types = "0.12.6" proteus = "0.5.0" rand = "0.8.5" -rhai = { version = "=1.17.1", features = ["sync", "serde", "internals"] } +rhai = { version = "1.19.0", features = ["sync", "serde", "internals"] } regex = "1.10.5" reqwest.workspace = true @@ -268,6 +272,9 @@ time = { version = "0.3.36", features = ["serde"] } similar = { version = "2.5.0", features = ["inline"] } console = "0.15.8" bytesize = { version = "1.3.0", features = ["serde"] } +ahash = "0.8.11" +itoa = "1.0.9" +ryu = "1.0.15" [target.'cfg(macos)'.dependencies] uname = "0.1.1" @@ -305,6 +312,7 @@ opentelemetry-proto = { version = "0.5.0", features = [ "gen-tonic-messages", "with-serde", ] } +opentelemetry-datadog = { version = "0.8.0", features = ["reqwest-client"] } p256 = "0.13.2" rand_core = "0.6.4" reqwest = { version = "0.11.27", default-features = false, features = [ diff --git a/apollo-router/src/apollo_studio_interop/mod.rs b/apollo-router/src/apollo_studio_interop/mod.rs index 4909a7af42..e1571fa672 100644 --- a/apollo-router/src/apollo_studio_interop/mod.rs +++ b/apollo-router/src/apollo_studio_interop/mod.rs @@ -420,7 +420,8 @@ impl UsageGenerator<'_> { match self .signature_doc - .get_operation(self.operation_name.as_deref()) + .operations + .get(self.operation_name.as_deref()) .ok() { None => "".to_string(), @@ -494,7 +495,8 @@ impl UsageGenerator<'_> { match self .references_doc - .get_operation(self.operation_name.as_deref()) + .operations + .get(self.operation_name.as_deref()) .ok() { None => HashMap::new(), @@ -583,7 +585,8 @@ impl UsageGenerator<'_> { if let Ok(operation) = self .references_doc - .get_operation(self.operation_name.as_deref()) + .operations + .get(self.operation_name.as_deref()) { self.process_extended_refs_for_selection_set(&operation.selection_set); } diff --git a/apollo-router/src/apollo_studio_interop/tests.rs b/apollo-router/src/apollo_studio_interop/tests.rs index 9547a411cb..b6c1e03d22 100644 --- a/apollo-router/src/apollo_studio_interop/tests.rs +++ b/apollo-router/src/apollo_studio_interop/tests.rs @@ -126,17 +126,14 @@ fn enums_from_response( response_body_str: &str, ) -> ReferencedEnums { let config = Configuration::default(); - let compiler_schema = Schema::parse_and_validate(schema_str, "schema.graphql").unwrap(); - let spec_schema = crate::spec::Schema::parse(schema_str, &config) - .unwrap() - .with_api_schema(compiler_schema.clone()); - let query = Query::parse(query_str, operation_name, &spec_schema, &config).unwrap(); + let schema = crate::spec::Schema::parse(schema_str, &config).unwrap(); + let query = Query::parse(query_str, operation_name, &schema, &config).unwrap(); let response_body: Object = serde_json::from_str(response_body_str).unwrap(); extract_enums_from_response( Arc::new(query), operation_name, - &compiler_schema, + schema.supergraph_schema(), &response_body, ) } diff --git a/apollo-router/src/axum_factory/compression/mod.rs b/apollo-router/src/axum_factory/compression/mod.rs index 40d5ef94d2..feef4f5a0a 100644 --- a/apollo-router/src/axum_factory/compression/mod.rs +++ b/apollo-router/src/axum_factory/compression/mod.rs @@ -31,9 +31,10 @@ pub(crate) enum Compressor { } impl Compressor { - pub(crate) fn new<'a, It: 'a>(it: It) -> Option + pub(crate) fn new<'a, It>(it: It) -> Option where It: Iterator, + It: 'a, { for s in it { match s { diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs index b5946284ac..dde3852032 100644 --- a/apollo-router/src/axum_factory/tests.rs +++ b/apollo-router/src/axum_factory/tests.rs @@ -376,7 +376,7 @@ async fn it_displays_sandbox() { // Regular studio redirect let response = client - .get(&format!( + .get(format!( "{}/", server.graphql_listen_address().as_ref().unwrap() )) @@ -422,7 +422,7 @@ async fn it_displays_sandbox_with_different_supergraph_path() { // Regular studio redirect let response = client - .get(&format!( + .get(format!( "{}/custom", server.graphql_listen_address().as_ref().unwrap() )) @@ -739,7 +739,7 @@ async fn response_with_root_wildcard() -> Result<(), ApolloRouterError> { // Post query without path let response = client .post( - &server + server .graphql_listen_address() .as_ref() .unwrap() @@ -1046,7 +1046,7 @@ async fn cors_preflight() -> Result<(), ApolloRouterError> { let response = client .request( Method::OPTIONS, - &format!( + format!( "{}/graphql", server.graphql_listen_address().as_ref().unwrap() ), @@ -1197,7 +1197,7 @@ async fn it_displays_homepage() { .await .unwrap(); let response = client - .get(&format!( + .get(format!( "{}/", server.graphql_listen_address().as_ref().unwrap() )) @@ -1244,7 +1244,7 @@ async fn it_doesnt_display_disabled_homepage() { .await .unwrap(); let response = client - .get(&format!( + .get(format!( "{}/", server.graphql_listen_address().as_ref().unwrap() )) @@ -1303,7 +1303,7 @@ async fn it_answers_to_custom_endpoint() -> Result<(), ApolloRouterError> { for path in &["/a-custom-path", "/an-other-custom-path"] { let response = client - .get(&format!( + .get(format!( "{}{}", server.graphql_listen_address().as_ref().unwrap(), path @@ -1318,7 +1318,7 @@ async fn it_answers_to_custom_endpoint() -> Result<(), ApolloRouterError> { for path in &["/a-custom-path", "/an-other-custom-path"] { let response = client - .post(&format!( + .post(format!( "{}{}", server.graphql_listen_address().as_ref().unwrap(), path diff --git a/apollo-router/src/batching.rs b/apollo-router/src/batching.rs index eb6611e06e..b570587b6f 100644 --- a/apollo-router/src/batching.rs +++ b/apollo-router/src/batching.rs @@ -533,7 +533,7 @@ mod tests { // We should see the aggregation of all of the requests let actual: Vec = serde_json::from_str( - &String::from_utf8(request.into_body().to_bytes().await.unwrap().to_vec()).unwrap(), + std::str::from_utf8(&request.into_body().to_bytes().await.unwrap()).unwrap(), ) .unwrap(); @@ -561,6 +561,7 @@ mod tests { .body(graphql::Response::builder().data(data.clone()).build()) .unwrap(), context: Context::new(), + subgraph_name: None, }; tx.send(Ok(response)).unwrap(); diff --git a/apollo-router/src/configuration/cors.rs b/apollo-router/src/configuration/cors.rs index f099479ee1..313d863214 100644 --- a/apollo-router/src/configuration/cors.rs +++ b/apollo-router/src/configuration/cors.rs @@ -35,7 +35,7 @@ pub(crate) struct Cors { /// and make sure you either: /// - accept `x-apollo-operation-name` AND / OR `apollo-require-preflight` /// - defined `csrf` required headers in your yml configuration, as shown in the - /// `examples/cors-and-csrf/custom-headers.router.yaml` files. + /// `examples/cors-and-csrf/custom-headers.router.yaml` files. pub(crate) allow_headers: Vec, /// Which response headers should be made available to scripts running in the browser, diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index cc10c70e6b..8cd6b56381 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -394,9 +394,8 @@ impl InstrumentData { // We need to update the entry we just made because the selected strategy is a named object in the config. // The jsonpath spec doesn't include a utility for getting the keys out of an object, so we do it manually. - if let Some((_, demand_control_attributes)) = self - .data - .get_mut(&"apollo.router.config.demand_control".to_string()) + if let Some((_, demand_control_attributes)) = + self.data.get_mut("apollo.router.config.demand_control") { Self::get_first_key_from_path( demand_control_attributes, diff --git a/apollo-router/src/configuration/migrations/0026-to_api_schema.yaml b/apollo-router/src/configuration/migrations/0026-to_api_schema.yaml new file mode 100644 index 0000000000..36b783e31c --- /dev/null +++ b/apollo-router/src/configuration/migrations/0026-to_api_schema.yaml @@ -0,0 +1,4 @@ +description: experimental_api_schema_generation_mode is no longer supported +actions: + - type: delete + path: experimental_api_schema_generation_mode diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index a9e6d3155c..e3f5ee071e 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -161,10 +161,6 @@ pub struct Configuration { #[serde(default)] pub(crate) experimental_chaos: Chaos, - /// Set the API schema generation implementation to use. - #[serde(default)] - pub(crate) experimental_api_schema_generation_mode: ApiSchemaMode, - /// Set the Apollo usage report signature and referenced field generation implementation to use. #[serde(default)] pub(crate) experimental_apollo_metrics_generation_mode: ApolloMetricsGenerationMode, @@ -204,21 +200,6 @@ impl PartialEq for Configuration { } } -/// API schema generation modes. -#[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)] -#[derivative(Debug)] -#[serde(rename_all = "lowercase")] -pub(crate) enum ApiSchemaMode { - /// Use the new Rust-based implementation. - New, - /// Use the old JavaScript-based implementation. - Legacy, - /// Use Rust-based and Javascript-based implementations side by side, logging warnings if the - /// implementations disagree. - #[default] - Both, -} - /// Apollo usage report signature and referenced field generation modes. #[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)] #[derivative(Debug)] @@ -275,7 +256,6 @@ impl<'de> serde::Deserialize<'de> for Configuration { batching: Batching, experimental_type_conditioned_fetching: bool, experimental_apollo_metrics_generation_mode: ApolloMetricsGenerationMode, - experimental_api_schema_generation_mode: ApiSchemaMode, experimental_query_planner_mode: QueryPlannerMode, } let ad_hoc: AdHocConfiguration = serde::Deserialize::deserialize(deserializer)?; @@ -295,7 +275,6 @@ impl<'de> serde::Deserialize<'de> for Configuration { persisted_queries: ad_hoc.persisted_queries, limits: ad_hoc.limits, experimental_chaos: ad_hoc.experimental_chaos, - experimental_api_schema_generation_mode: ad_hoc.experimental_api_schema_generation_mode, experimental_apollo_metrics_generation_mode: ad_hoc .experimental_apollo_metrics_generation_mode, experimental_type_conditioned_fetching: ad_hoc.experimental_type_conditioned_fetching, @@ -343,7 +322,6 @@ impl Configuration { operation_limits: Option, chaos: Option, uplink: Option, - experimental_api_schema_generation_mode: Option, experimental_type_conditioned_fetching: Option, batching: Option, experimental_apollo_metrics_generation_mode: Option, @@ -362,8 +340,6 @@ impl Configuration { persisted_queries: persisted_query.unwrap_or_default(), limits: operation_limits.unwrap_or_default(), experimental_chaos: chaos.unwrap_or_default(), - experimental_api_schema_generation_mode: experimental_api_schema_generation_mode - .unwrap_or_default(), experimental_apollo_metrics_generation_mode: experimental_apollo_metrics_generation_mode.unwrap_or_default(), experimental_query_planner_mode: experimental_query_planner_mode.unwrap_or_default(), @@ -467,7 +443,6 @@ impl Configuration { chaos: Option, uplink: Option, batching: Option, - experimental_api_schema_generation_mode: Option, experimental_type_conditioned_fetching: Option, experimental_apollo_metrics_generation_mode: Option, experimental_query_planner_mode: Option, @@ -481,8 +456,6 @@ impl Configuration { cors: cors.unwrap_or_default(), limits: operation_limits.unwrap_or_default(), experimental_chaos: chaos.unwrap_or_default(), - experimental_api_schema_generation_mode: experimental_api_schema_generation_mode - .unwrap_or_default(), experimental_apollo_metrics_generation_mode: experimental_apollo_metrics_generation_mode.unwrap_or_default(), experimental_query_planner_mode: experimental_query_planner_mode.unwrap_or_default(), diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index cfd488b7c6..1f66e536ba 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -107,31 +107,11 @@ expression: "&schema" }, "type": "object" }, - "ApiSchemaMode": { - "description": "API schema generation modes.", - "oneOf": [ - { - "description": "Use the new Rust-based implementation.", - "enum": [ - "new" - ], - "type": "string" - }, - { - "description": "Use the old JavaScript-based implementation.", - "enum": [ - "legacy" - ], - "type": "string" - }, - { - "description": "Use Rust-based and Javascript-based implementations side by side, logging warnings if the implementations disagree.", - "enum": [ - "both" - ], - "type": "string" - } - ] + "All": { + "enum": [ + "all" + ], + "type": "string" }, "ApolloMetricsGenerationMode": { "description": "Apollo usage report signature and referenced field generation modes.", @@ -513,6 +493,35 @@ expression: "&schema" }, "type": "object" }, + "CacheAttributes": { + "additionalProperties": false, + "properties": { + "entity.type": { + "default": null, + "description": "Entity type", + "nullable": true, + "type": "boolean" + } + }, + "type": "object" + }, + "CacheInstrumentsConfig": { + "additionalProperties": false, + "properties": { + "apollo.router.operations.entity.cache": { + "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector", + "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector" + } + }, + "type": "object" + }, + "CacheKind": { + "enum": [ + "hit", + "miss" + ], + "type": "string" + }, "CallbackMode": { "additionalProperties": false, "description": "Using a callback url", @@ -1459,6 +1468,24 @@ expression: "&schema" "default": {}, "description": "Custom mapping to be used as the resource field in spans, defaults to: router -> http.route supergraph -> graphql.operation.name query_planning -> graphql.operation.name subgraph -> subgraph.name subgraph_request -> subgraph.name http_request -> http.route", "type": "object" + }, + "span_metrics": { + "additionalProperties": { + "type": "boolean" + }, + "default": { + "execution": true, + "http_request": true, + "parse_query": true, + "query_planning": true, + "request": true, + "router": true, + "subgraph": true, + "subgraph_request": true, + "supergraph": true + }, + "description": "Which spans will be eligible for span stats to be collected for viewing in the APM view. Defaults to true for `request`, `router`, `query_parsing`, `supergraph`, `execution`, `query_planning`, `subgraph`, `subgraph_request` and `http_request`.", + "type": "object" } }, "required": [ @@ -1987,6 +2014,29 @@ expression: "&schema" } ] }, + "DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector": { + "anyOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "additionalProperties": false, + "properties": { + "attributes": { + "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector", + "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector" + } + }, + "required": [ + "attributes" + ], + "type": "object" + } + ] + }, "DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes_apollo_router::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector": { "anyOf": [ { @@ -2070,6 +2120,17 @@ expression: "&schema" ], "type": "string" }, + "EntityType": { + "anyOf": [ + { + "$ref": "#/definitions/All", + "description": "#/definitions/All" + }, + { + "type": "string" + } + ] + }, "ErrorConfig": { "properties": { "log": { @@ -3228,6 +3289,42 @@ expression: "&schema" } ] }, + "Instrument_for_CacheAttributes_and_SubgraphSelector_and_SubgraphValue": { + "additionalProperties": false, + "properties": { + "attributes": { + "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector", + "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector" + }, + "condition": { + "$ref": "#/definitions/Condition_for_SubgraphSelector", + "description": "#/definitions/Condition_for_SubgraphSelector" + }, + "description": { + "description": "The description of the instrument.", + "type": "string" + }, + "type": { + "$ref": "#/definitions/InstrumentType", + "description": "#/definitions/InstrumentType" + }, + "unit": { + "description": "The units of the instrument, e.g. \"ms\", \"bytes\", \"requests\".", + "type": "string" + }, + "value": { + "$ref": "#/definitions/SubgraphValue", + "description": "#/definitions/SubgraphValue" + } + }, + "required": [ + "description", + "type", + "unit", + "value" + ], + "type": "object" + }, "Instrument_for_GraphQLAttributes_and_GraphQLSelector_and_GraphQLValue": { "additionalProperties": false, "properties": { @@ -3394,6 +3491,10 @@ expression: "&schema" "InstrumentsConfig": { "additionalProperties": false, "properties": { + "cache": { + "$ref": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::CacheInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument", + "description": "#/definitions/extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::CacheInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument" + }, "default_requirement_level": { "$ref": "#/definitions/DefaultAttributeRequirementLevel", "description": "#/definitions/DefaultAttributeRequirementLevel" @@ -4575,97 +4676,97 @@ expression: "&schema" }, "error.type": { "default": null, - "description": "Describes a class of error the operation ended with. Examples: * timeout * name_resolution_error * 500 Requirement level: Conditionally Required: If request has ended with an error.", + "description": "Describes a class of error the operation ended with. Examples:\n\n* timeout * name_resolution_error * 500\n\nRequirement level: Conditionally Required: If request has ended with an error.", "nullable": true, "type": "boolean" }, "http.request.body.size": { "default": null, - "description": "The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples: * 3495 Requirement level: Recommended", + "description": "The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples:\n\n* 3495\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "http.request.method": { "default": null, - "description": "HTTP request method. Examples: * GET * POST * HEAD Requirement level: Required", + "description": "HTTP request method. Examples:\n\n* GET * POST * HEAD\n\nRequirement level: Required", "nullable": true, "type": "boolean" }, "http.response.body.size": { "default": null, - "description": "The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples: * 3495 Requirement level: Recommended", + "description": "The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples:\n\n* 3495\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "http.response.status_code": { "default": null, - "description": "HTTP response status code. Examples: * 200 Requirement level: Conditionally Required: If and only if one was received/sent.", + "description": "HTTP response status code. Examples:\n\n* 200\n\nRequirement level: Conditionally Required: If and only if one was received/sent.", "nullable": true, "type": "boolean" }, "http.route": { "default": null, - "description": "The matched route (path template in the format used by the respective server framework). Examples: * /graphql Requirement level: Conditionally Required: If and only if it’s available", + "description": "The matched route (path template in the format used by the respective server framework). Examples:\n\n* /graphql\n\nRequirement level: Conditionally Required: If and only if it’s available", "nullable": true, "type": "boolean" }, "network.local.address": { "default": null, - "description": "Local socket address. Useful in case of a multi-IP host. Examples: * 10.1.2.80 * /tmp/my.sock Requirement level: Opt-In", + "description": "Local socket address. Useful in case of a multi-IP host. Examples:\n\n* 10.1.2.80 * /tmp/my.sock\n\nRequirement level: Opt-In", "nullable": true, "type": "boolean" }, "network.local.port": { "default": null, - "description": "Local socket port. Useful in case of a multi-port host. Examples: * 65123 Requirement level: Opt-In", + "description": "Local socket port. Useful in case of a multi-port host. Examples:\n\n* 65123\n\nRequirement level: Opt-In", "nullable": true, "type": "boolean" }, "network.peer.address": { "default": null, - "description": "Peer address of the network connection - IP address or Unix domain socket name. Examples: * 10.1.2.80 * /tmp/my.sock Requirement level: Recommended", + "description": "Peer address of the network connection - IP address or Unix domain socket name. Examples:\n\n* 10.1.2.80 * /tmp/my.sock\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "network.peer.port": { "default": null, - "description": "Peer port number of the network connection. Examples: * 65123 Requirement level: Recommended", + "description": "Peer port number of the network connection. Examples:\n\n* 65123\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "network.protocol.name": { "default": null, - "description": "OSI application layer or non-OSI equivalent. Examples: * http * spdy Requirement level: Recommended: if not default (http).", + "description": "OSI application layer or non-OSI equivalent. Examples:\n\n* http * spdy\n\nRequirement level: Recommended: if not default (http).", "nullable": true, "type": "boolean" }, "network.protocol.version": { "default": null, - "description": "Version of the protocol specified in network.protocol.name. Examples: * 1.0 * 1.1 * 2 * 3 Requirement level: Recommended", + "description": "Version of the protocol specified in network.protocol.name. Examples:\n\n* 1.0 * 1.1 * 2 * 3\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "network.transport": { "default": null, - "description": "OSI transport layer. Examples: * tcp * udp Requirement level: Conditionally Required", + "description": "OSI transport layer. Examples:\n\n* tcp * udp\n\nRequirement level: Conditionally Required", "nullable": true, "type": "boolean" }, "network.type": { "default": null, - "description": "OSI network layer or non-OSI equivalent. Examples: * ipv4 * ipv6 Requirement level: Recommended", + "description": "OSI network layer or non-OSI equivalent. Examples:\n\n* ipv4 * ipv6\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "server.address": { "default": null, - "description": "Name of the local HTTP server that received the request. Examples: * example.com * 10.1.2.80 * /tmp/my.sock Requirement level: Recommended", + "description": "Name of the local HTTP server that received the request. Examples:\n\n* example.com * 10.1.2.80 * /tmp/my.sock\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "server.port": { "default": null, - "description": "Port of the local HTTP server that received the request. Examples: * 80 * 8080 * 443 Requirement level: Recommended", + "description": "Port of the local HTTP server that received the request. Examples:\n\n* 80 * 8080 * 443\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, @@ -4677,25 +4778,25 @@ expression: "&schema" }, "url.path": { "default": null, - "description": "The URI path component Examples: * /search Requirement level: Required", + "description": "The URI path component Examples:\n\n* /search\n\nRequirement level: Required", "nullable": true, "type": "boolean" }, "url.query": { "default": null, - "description": "The URI query component Examples: * q=OpenTelemetry Requirement level: Conditionally Required: If and only if one was received/sent.", + "description": "The URI query component Examples:\n\n* q=OpenTelemetry\n\nRequirement level: Conditionally Required: If and only if one was received/sent.", "nullable": true, "type": "boolean" }, "url.scheme": { "default": null, - "description": "The URI scheme component identifying the used protocol. Examples: * http * https Requirement level: Required", + "description": "The URI scheme component identifying the used protocol. Examples:\n\n* http * https\n\nRequirement level: Required", "nullable": true, "type": "boolean" }, "user_agent.original": { "default": null, - "description": "Value of the HTTP User-Agent header sent by the client. Examples: * CERN-LineMode/2.15 * libwww/2.17b3 Requirement level: Recommended", + "description": "Value of the HTTP User-Agent header sent by the client. Examples:\n\n* CERN-LineMode/2.15 * libwww/2.17b3\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" } @@ -5504,25 +5605,25 @@ expression: "&schema" "properties": { "subgraph.graphql.document": { "default": null, - "description": "The GraphQL document being executed. Examples: * query findBookById { bookById(id: ?) { name } } Requirement level: Recommended", + "description": "The GraphQL document being executed. Examples:\n\n* `query findBookById { bookById(id: ?) { name } }`\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "subgraph.graphql.operation.name": { "default": null, - "description": "The name of the operation being executed. Examples: * findBookById Requirement level: Recommended", + "description": "The name of the operation being executed. Examples:\n\n* findBookById\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "subgraph.graphql.operation.type": { "default": null, - "description": "The type of the operation being executed. Examples: * query * subscription * mutation Requirement level: Recommended", + "description": "The type of the operation being executed. Examples:\n\n* query * subscription * mutation\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "subgraph.name": { "default": null, - "description": "The name of the subgraph Examples: * products Requirement level: Required", + "description": "The name of the subgraph Examples:\n\n* products\n\nRequirement level: Required", "nullable": true, "type": "boolean" } @@ -5821,6 +5922,19 @@ expression: "&schema" ], "type": "object" }, + { + "additionalProperties": false, + "properties": { + "subgraph_name": { + "description": "The subgraph name", + "type": "boolean" + } + }, + "required": [ + "subgraph_name" + ], + "type": "object" + }, { "additionalProperties": false, "properties": { @@ -6160,6 +6274,24 @@ expression: "&schema" "error" ], "type": "object" + }, + { + "additionalProperties": false, + "properties": { + "cache": { + "$ref": "#/definitions/CacheKind", + "description": "#/definitions/CacheKind" + }, + "entity_type": { + "$ref": "#/definitions/EntityType", + "description": "#/definitions/EntityType", + "nullable": true + } + }, + "required": [ + "cache" + ], + "type": "object" } ] }, @@ -6382,19 +6514,19 @@ expression: "&schema" }, "graphql.document": { "default": null, - "description": "The GraphQL document being executed. Examples: * query findBookById { bookById(id: ?) { name } } Requirement level: Recommended", + "description": "The GraphQL document being executed. Examples:\n\n* `query findBookById { bookById(id: ?) { name } }`\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "graphql.operation.name": { "default": null, - "description": "The name of the operation being executed. Examples: * findBookById Requirement level: Recommended", + "description": "The name of the operation being executed. Examples:\n\n* findBookById\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "graphql.operation.type": { "default": null, - "description": "The type of the operation being executed. Examples: * query * subscription * mutation Requirement level: Recommended", + "description": "The type of the operation being executed. Examples:\n\n* query * subscription * mutation\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" } @@ -7230,97 +7362,97 @@ expression: "&schema" }, "error.type": { "default": null, - "description": "Describes a class of error the operation ended with. Examples: * timeout * name_resolution_error * 500 Requirement level: Conditionally Required: If request has ended with an error.", + "description": "Describes a class of error the operation ended with. Examples:\n\n* timeout * name_resolution_error * 500\n\nRequirement level: Conditionally Required: If request has ended with an error.", "nullable": true, "type": "boolean" }, "http.request.body.size": { "default": null, - "description": "The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples: * 3495 Requirement level: Recommended", + "description": "The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples:\n\n* 3495\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "http.request.method": { "default": null, - "description": "HTTP request method. Examples: * GET * POST * HEAD Requirement level: Required", + "description": "HTTP request method. Examples:\n\n* GET * POST * HEAD\n\nRequirement level: Required", "nullable": true, "type": "boolean" }, "http.response.body.size": { "default": null, - "description": "The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples: * 3495 Requirement level: Recommended", + "description": "The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples:\n\n* 3495\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "http.response.status_code": { "default": null, - "description": "HTTP response status code. Examples: * 200 Requirement level: Conditionally Required: If and only if one was received/sent.", + "description": "HTTP response status code. Examples:\n\n* 200\n\nRequirement level: Conditionally Required: If and only if one was received/sent.", "nullable": true, "type": "boolean" }, "http.route": { "default": null, - "description": "The matched route (path template in the format used by the respective server framework). Examples: * /graphql Requirement level: Conditionally Required: If and only if it’s available", + "description": "The matched route (path template in the format used by the respective server framework). Examples:\n\n* /graphql\n\nRequirement level: Conditionally Required: If and only if it’s available", "nullable": true, "type": "boolean" }, "network.local.address": { "default": null, - "description": "Local socket address. Useful in case of a multi-IP host. Examples: * 10.1.2.80 * /tmp/my.sock Requirement level: Opt-In", + "description": "Local socket address. Useful in case of a multi-IP host. Examples:\n\n* 10.1.2.80 * /tmp/my.sock\n\nRequirement level: Opt-In", "nullable": true, "type": "boolean" }, "network.local.port": { "default": null, - "description": "Local socket port. Useful in case of a multi-port host. Examples: * 65123 Requirement level: Opt-In", + "description": "Local socket port. Useful in case of a multi-port host. Examples:\n\n* 65123\n\nRequirement level: Opt-In", "nullable": true, "type": "boolean" }, "network.peer.address": { "default": null, - "description": "Peer address of the network connection - IP address or Unix domain socket name. Examples: * 10.1.2.80 * /tmp/my.sock Requirement level: Recommended", + "description": "Peer address of the network connection - IP address or Unix domain socket name. Examples:\n\n* 10.1.2.80 * /tmp/my.sock\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "network.peer.port": { "default": null, - "description": "Peer port number of the network connection. Examples: * 65123 Requirement level: Recommended", + "description": "Peer port number of the network connection. Examples:\n\n* 65123\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "network.protocol.name": { "default": null, - "description": "OSI application layer or non-OSI equivalent. Examples: * http * spdy Requirement level: Recommended: if not default (http).", + "description": "OSI application layer or non-OSI equivalent. Examples:\n\n* http * spdy\n\nRequirement level: Recommended: if not default (http).", "nullable": true, "type": "boolean" }, "network.protocol.version": { "default": null, - "description": "Version of the protocol specified in network.protocol.name. Examples: * 1.0 * 1.1 * 2 * 3 Requirement level: Recommended", + "description": "Version of the protocol specified in network.protocol.name. Examples:\n\n* 1.0 * 1.1 * 2 * 3\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "network.transport": { "default": null, - "description": "OSI transport layer. Examples: * tcp * udp Requirement level: Conditionally Required", + "description": "OSI transport layer. Examples:\n\n* tcp * udp\n\nRequirement level: Conditionally Required", "nullable": true, "type": "boolean" }, "network.type": { "default": null, - "description": "OSI network layer or non-OSI equivalent. Examples: * ipv4 * ipv6 Requirement level: Recommended", + "description": "OSI network layer or non-OSI equivalent. Examples:\n\n* ipv4 * ipv6\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "server.address": { "default": null, - "description": "Name of the local HTTP server that received the request. Examples: * example.com * 10.1.2.80 * /tmp/my.sock Requirement level: Recommended", + "description": "Name of the local HTTP server that received the request. Examples:\n\n* example.com * 10.1.2.80 * /tmp/my.sock\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "server.port": { "default": null, - "description": "Port of the local HTTP server that received the request. Examples: * 80 * 8080 * 443 Requirement level: Recommended", + "description": "Port of the local HTTP server that received the request. Examples:\n\n* 80 * 8080 * 443\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, @@ -7332,25 +7464,25 @@ expression: "&schema" }, "url.path": { "default": null, - "description": "The URI path component Examples: * /search Requirement level: Required", + "description": "The URI path component Examples:\n\n* /search\n\nRequirement level: Required", "nullable": true, "type": "boolean" }, "url.query": { "default": null, - "description": "The URI query component Examples: * q=OpenTelemetry Requirement level: Conditionally Required: If and only if one was received/sent.", + "description": "The URI query component Examples:\n\n* q=OpenTelemetry\n\nRequirement level: Conditionally Required: If and only if one was received/sent.", "nullable": true, "type": "boolean" }, "url.scheme": { "default": null, - "description": "The URI scheme component identifying the used protocol. Examples: * http * https Requirement level: Required", + "description": "The URI scheme component identifying the used protocol. Examples:\n\n* http * https\n\nRequirement level: Required", "nullable": true, "type": "boolean" }, "user_agent.original": { "default": null, - "description": "Value of the HTTP User-Agent header sent by the client. Examples: * CERN-LineMode/2.15 * libwww/2.17b3 Requirement level: Recommended", + "description": "Value of the HTTP User-Agent header sent by the client. Examples:\n\n* CERN-LineMode/2.15 * libwww/2.17b3\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" } @@ -7378,97 +7510,97 @@ expression: "&schema" }, "error.type": { "default": null, - "description": "Describes a class of error the operation ended with. Examples: * timeout * name_resolution_error * 500 Requirement level: Conditionally Required: If request has ended with an error.", + "description": "Describes a class of error the operation ended with. Examples:\n\n* timeout * name_resolution_error * 500\n\nRequirement level: Conditionally Required: If request has ended with an error.", "nullable": true, "type": "boolean" }, "http.request.body.size": { "default": null, - "description": "The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples: * 3495 Requirement level: Recommended", + "description": "The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples:\n\n* 3495\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "http.request.method": { "default": null, - "description": "HTTP request method. Examples: * GET * POST * HEAD Requirement level: Required", + "description": "HTTP request method. Examples:\n\n* GET * POST * HEAD\n\nRequirement level: Required", "nullable": true, "type": "boolean" }, "http.response.body.size": { "default": null, - "description": "The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples: * 3495 Requirement level: Recommended", + "description": "The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. Examples:\n\n* 3495\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "http.response.status_code": { "default": null, - "description": "HTTP response status code. Examples: * 200 Requirement level: Conditionally Required: If and only if one was received/sent.", + "description": "HTTP response status code. Examples:\n\n* 200\n\nRequirement level: Conditionally Required: If and only if one was received/sent.", "nullable": true, "type": "boolean" }, "http.route": { "default": null, - "description": "The matched route (path template in the format used by the respective server framework). Examples: * /graphql Requirement level: Conditionally Required: If and only if it’s available", + "description": "The matched route (path template in the format used by the respective server framework). Examples:\n\n* /graphql\n\nRequirement level: Conditionally Required: If and only if it’s available", "nullable": true, "type": "boolean" }, "network.local.address": { "default": null, - "description": "Local socket address. Useful in case of a multi-IP host. Examples: * 10.1.2.80 * /tmp/my.sock Requirement level: Opt-In", + "description": "Local socket address. Useful in case of a multi-IP host. Examples:\n\n* 10.1.2.80 * /tmp/my.sock\n\nRequirement level: Opt-In", "nullable": true, "type": "boolean" }, "network.local.port": { "default": null, - "description": "Local socket port. Useful in case of a multi-port host. Examples: * 65123 Requirement level: Opt-In", + "description": "Local socket port. Useful in case of a multi-port host. Examples:\n\n* 65123\n\nRequirement level: Opt-In", "nullable": true, "type": "boolean" }, "network.peer.address": { "default": null, - "description": "Peer address of the network connection - IP address or Unix domain socket name. Examples: * 10.1.2.80 * /tmp/my.sock Requirement level: Recommended", + "description": "Peer address of the network connection - IP address or Unix domain socket name. Examples:\n\n* 10.1.2.80 * /tmp/my.sock\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "network.peer.port": { "default": null, - "description": "Peer port number of the network connection. Examples: * 65123 Requirement level: Recommended", + "description": "Peer port number of the network connection. Examples:\n\n* 65123\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "network.protocol.name": { "default": null, - "description": "OSI application layer or non-OSI equivalent. Examples: * http * spdy Requirement level: Recommended: if not default (http).", + "description": "OSI application layer or non-OSI equivalent. Examples:\n\n* http * spdy\n\nRequirement level: Recommended: if not default (http).", "nullable": true, "type": "boolean" }, "network.protocol.version": { "default": null, - "description": "Version of the protocol specified in network.protocol.name. Examples: * 1.0 * 1.1 * 2 * 3 Requirement level: Recommended", + "description": "Version of the protocol specified in network.protocol.name. Examples:\n\n* 1.0 * 1.1 * 2 * 3\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "network.transport": { "default": null, - "description": "OSI transport layer. Examples: * tcp * udp Requirement level: Conditionally Required", + "description": "OSI transport layer. Examples:\n\n* tcp * udp\n\nRequirement level: Conditionally Required", "nullable": true, "type": "boolean" }, "network.type": { "default": null, - "description": "OSI network layer or non-OSI equivalent. Examples: * ipv4 * ipv6 Requirement level: Recommended", + "description": "OSI network layer or non-OSI equivalent. Examples:\n\n* ipv4 * ipv6\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "server.address": { "default": null, - "description": "Name of the local HTTP server that received the request. Examples: * example.com * 10.1.2.80 * /tmp/my.sock Requirement level: Recommended", + "description": "Name of the local HTTP server that received the request. Examples:\n\n* example.com * 10.1.2.80 * /tmp/my.sock\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "server.port": { "default": null, - "description": "Port of the local HTTP server that received the request. Examples: * 80 * 8080 * 443 Requirement level: Recommended", + "description": "Port of the local HTTP server that received the request. Examples:\n\n* 80 * 8080 * 443\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, @@ -7480,25 +7612,25 @@ expression: "&schema" }, "url.path": { "default": null, - "description": "The URI path component Examples: * /search Requirement level: Required", + "description": "The URI path component Examples:\n\n* /search\n\nRequirement level: Required", "nullable": true, "type": "boolean" }, "url.query": { "default": null, - "description": "The URI query component Examples: * q=OpenTelemetry Requirement level: Conditionally Required: If and only if one was received/sent.", + "description": "The URI query component Examples:\n\n* q=OpenTelemetry\n\nRequirement level: Conditionally Required: If and only if one was received/sent.", "nullable": true, "type": "boolean" }, "url.scheme": { "default": null, - "description": "The URI scheme component identifying the used protocol. Examples: * http * https Requirement level: Required", + "description": "The URI scheme component identifying the used protocol. Examples:\n\n* http * https\n\nRequirement level: Required", "nullable": true, "type": "boolean" }, "user_agent.original": { "default": null, - "description": "Value of the HTTP User-Agent header sent by the client. Examples: * CERN-LineMode/2.15 * libwww/2.17b3 Requirement level: Recommended", + "description": "Value of the HTTP User-Agent header sent by the client. Examples:\n\n* CERN-LineMode/2.15 * libwww/2.17b3\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" } @@ -7513,25 +7645,25 @@ expression: "&schema" "properties": { "subgraph.graphql.document": { "default": null, - "description": "The GraphQL document being executed. Examples: * query findBookById { bookById(id: ?) { name } } Requirement level: Recommended", + "description": "The GraphQL document being executed. Examples:\n\n* `query findBookById { bookById(id: ?) { name } }`\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "subgraph.graphql.operation.name": { "default": null, - "description": "The name of the operation being executed. Examples: * findBookById Requirement level: Recommended", + "description": "The name of the operation being executed. Examples:\n\n* findBookById\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "subgraph.graphql.operation.type": { "default": null, - "description": "The type of the operation being executed. Examples: * query * subscription * mutation Requirement level: Recommended", + "description": "The type of the operation being executed. Examples:\n\n* query * subscription * mutation\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "subgraph.name": { "default": null, - "description": "The name of the subgraph Examples: * products Requirement level: Required", + "description": "The name of the subgraph Examples:\n\n* products\n\nRequirement level: Required", "nullable": true, "type": "boolean" } @@ -7546,25 +7678,25 @@ expression: "&schema" "properties": { "subgraph.graphql.document": { "default": null, - "description": "The GraphQL document being executed. Examples: * query findBookById { bookById(id: ?) { name } } Requirement level: Recommended", + "description": "The GraphQL document being executed. Examples:\n\n* `query findBookById { bookById(id: ?) { name } }`\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "subgraph.graphql.operation.name": { "default": null, - "description": "The name of the operation being executed. Examples: * findBookById Requirement level: Recommended", + "description": "The name of the operation being executed. Examples:\n\n* findBookById\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "subgraph.graphql.operation.type": { "default": null, - "description": "The type of the operation being executed. Examples: * query * subscription * mutation Requirement level: Recommended", + "description": "The type of the operation being executed. Examples:\n\n* query * subscription * mutation\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "subgraph.name": { "default": null, - "description": "The name of the subgraph Examples: * products Requirement level: Required", + "description": "The name of the subgraph Examples:\n\n* products\n\nRequirement level: Required", "nullable": true, "type": "boolean" } @@ -7604,19 +7736,19 @@ expression: "&schema" }, "graphql.document": { "default": null, - "description": "The GraphQL document being executed. Examples: * query findBookById { bookById(id: ?) { name } } Requirement level: Recommended", + "description": "The GraphQL document being executed. Examples:\n\n* `query findBookById { bookById(id: ?) { name } }`\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "graphql.operation.name": { "default": null, - "description": "The name of the operation being executed. Examples: * findBookById Requirement level: Recommended", + "description": "The name of the operation being executed. Examples:\n\n* findBookById\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "graphql.operation.type": { "default": null, - "description": "The type of the operation being executed. Examples: * query * subscription * mutation Requirement level: Recommended", + "description": "The type of the operation being executed. Examples:\n\n* query * subscription * mutation\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" } @@ -7656,19 +7788,47 @@ expression: "&schema" }, "graphql.document": { "default": null, - "description": "The GraphQL document being executed. Examples: * query findBookById { bookById(id: ?) { name } } Requirement level: Recommended", + "description": "The GraphQL document being executed. Examples:\n\n* `query findBookById { bookById(id: ?) { name } }`\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "graphql.operation.name": { "default": null, - "description": "The name of the operation being executed. Examples: * findBookById Requirement level: Recommended", + "description": "The name of the operation being executed. Examples:\n\n* findBookById\n\nRequirement level: Recommended", "nullable": true, "type": "boolean" }, "graphql.operation.type": { "default": null, - "description": "The type of the operation being executed. Examples: * query * subscription * mutation Requirement level: Recommended", + "description": "The type of the operation being executed. Examples:\n\n* query * subscription * mutation\n\nRequirement level: Recommended", + "nullable": true, + "type": "boolean" + } + }, + "type": "object" + }, + "extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::CacheInstrumentsConfig_apollo_router::plugins::telemetry::config_new::instruments::Instrument": { + "additionalProperties": { + "$ref": "#/definitions/Instrument_for_CacheAttributes_and_SubgraphSelector_and_SubgraphValue", + "description": "#/definitions/Instrument_for_CacheAttributes_and_SubgraphSelector_and_SubgraphValue" + }, + "properties": { + "apollo.router.operations.entity.cache": { + "$ref": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector", + "description": "#/definitions/DefaultedStandardInstrument_for_extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector" + } + }, + "type": "object" + }, + "extendable_attribute_apollo_router::plugins::telemetry::config_new::cache::attributes::CacheAttributes_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector": { + "additionalProperties": { + "$ref": "#/definitions/SubgraphSelector", + "description": "#/definitions/SubgraphSelector" + }, + "properties": { + "entity.type": { + "default": null, + "description": "Entity type", "nullable": true, "type": "boolean" } @@ -8077,10 +8237,6 @@ expression: "&schema" "$ref": "#/definitions/CSRFConfig", "description": "#/definitions/CSRFConfig" }, - "experimental_api_schema_generation_mode": { - "$ref": "#/definitions/ApiSchemaMode", - "description": "#/definitions/ApiSchemaMode" - }, "experimental_apollo_metrics_generation_mode": { "$ref": "#/definitions/ApolloMetricsGenerationMode", "description": "#/definitions/ApolloMetricsGenerationMode" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@js_api_schema.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@js_api_schema.router.yaml.snap new file mode 100644 index 0000000000..faa8280018 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@js_api_schema.router.yaml.snap @@ -0,0 +1,6 @@ +--- +source: apollo-router/src/configuration/tests.rs +expression: new_config +--- +--- +{} diff --git a/apollo-router/src/configuration/testdata/migrations/js_api_schema.router.yaml b/apollo-router/src/configuration/testdata/migrations/js_api_schema.router.yaml new file mode 100644 index 0000000000..7e9fe93811 --- /dev/null +++ b/apollo-router/src/configuration/testdata/migrations/js_api_schema.router.yaml @@ -0,0 +1 @@ +experimental_api_schema_generation_mode: both diff --git a/apollo-router/src/configuration/tests.rs b/apollo-router/src/configuration/tests.rs index 9d82fcb069..7367da2970 100644 --- a/apollo-router/src/configuration/tests.rs +++ b/apollo-router/src/configuration/tests.rs @@ -57,30 +57,30 @@ fn routing_url_in_schema() { REVIEWS @join__graph(name: "reviews" url: "http://localhost:4004/graphql") } "#; - let schema = crate::spec::Schema::parse_test(schema, &Default::default()).unwrap(); + let schema = crate::spec::Schema::parse(schema, &Default::default()).unwrap(); - let subgraphs: HashMap<&String, &Uri> = schema.subgraphs().collect(); + let subgraphs: HashMap<&str, &Uri> = schema.subgraphs().map(|(k, v)| (k.as_str(), v)).collect(); // if no configuration override, use the URL from the supergraph assert_eq!( - subgraphs.get(&"accounts".to_string()).unwrap().to_string(), + subgraphs.get("accounts").unwrap().to_string(), "http://localhost:4001/graphql" ); // if both configuration and schema specify a non empty URL, the configuration wins // this should show a warning in logs assert_eq!( - subgraphs.get(&"inventory".to_string()).unwrap().to_string(), + subgraphs.get("inventory").unwrap().to_string(), "http://localhost:4002/graphql" ); // if the configuration has a non empty routing URL, and the supergraph // has an empty one, the configuration wins assert_eq!( - subgraphs.get(&"products".to_string()).unwrap().to_string(), + subgraphs.get("products").unwrap().to_string(), "http://localhost:4003/graphql" ); assert_eq!( - subgraphs.get(&"reviews".to_string()).unwrap().to_string(), + subgraphs.get("reviews").unwrap().to_string(), "http://localhost:4004/graphql" ); } @@ -109,7 +109,7 @@ fn missing_subgraph_url() { PRODUCTS @join__graph(name: "products" url: "http://localhost:4003/graphql") REVIEWS @join__graph(name: "reviews" url: "") }"#; - let schema_error = crate::spec::Schema::parse_test(schema_error, &Default::default()) + let schema_error = crate::spec::Schema::parse(schema_error, &Default::default()) .expect_err("Must have an error because we have one missing subgraph routing url"); if let SchemaError::MissingSubgraphUrl(subgraph) = schema_error { @@ -431,7 +431,7 @@ fn validate_project_config_files() { { continue; } - #[cfg(not(telemetry_next))] + #[cfg(not(feature = "telemetry_next"))] if entry.path().to_string_lossy().contains("telemetry_next") { continue; } @@ -657,7 +657,7 @@ fn upgrade_old_configuration() { #[test] fn all_properties_are_documented() { - let schema = serde_json::to_value(&generate_config_schema()) + let schema = serde_json::to_value(generate_config_schema()) .expect("must be able to convert the schema to json"); let mut errors = Vec::new(); diff --git a/apollo-router/src/configuration/yaml.rs b/apollo-router/src/configuration/yaml.rs index 7567c4a22d..cf165daa03 100644 --- a/apollo-router/src/configuration/yaml.rs +++ b/apollo-router/src/configuration/yaml.rs @@ -198,7 +198,7 @@ impl MarkedEventReceiver for MarkedYaml { let current_label = self.current_label.take(); self.object_stack.push(( current_label.clone(), - Value::Mapping(current_label, IndexMap::new(), marker), + Value::Mapping(current_label, IndexMap::default(), marker), id, )); } diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs index fee4e29e72..73794512a0 100644 --- a/apollo-router/src/context/mod.rs +++ b/apollo-router/src/context/mod.rs @@ -450,7 +450,7 @@ mod test { @join__graph(name: "products" url: "http://localhost:4003/graphql") REVIEWS @join__graph(name: "reviews" url: "http://localhost:4002/graphql") }"#; - let schema = Schema::parse_test(schema, &Default::default()).unwrap(); + let schema = Schema::parse(schema, &Default::default()).unwrap(); let document = Query::parse_document("{ me }", None, &schema, &Configuration::default()).unwrap(); assert!(c.unsupported_executable_document().is_none()); diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index e61d088116..2ebf66bd4d 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -599,11 +599,11 @@ impl IntoGraphQLErrors for ParseErrors { .message(diagnostic.error.to_string()) .locations( diagnostic - .get_line_column() + .line_column_range() .map(|location| { vec![ErrorLocation { - line: location.line as u32, - column: location.column as u32, + line: location.start.line as u32, + column: location.start.column as u32, }] }) .unwrap_or_default(), diff --git a/apollo-router/src/executable.rs b/apollo-router/src/executable.rs index 873409da62..4d826b6554 100644 --- a/apollo-router/src/executable.rs +++ b/apollo-router/src/executable.rs @@ -2,8 +2,6 @@ use std::cell::Cell; use std::env; -use std::ffi::OsStr; -use std::fmt; use std::fmt::Debug; use std::net::SocketAddr; use std::path::PathBuf; @@ -19,7 +17,6 @@ use clap::Args; use clap::CommandFactory; use clap::Parser; use clap::Subcommand; -use directories::ProjectDirs; #[cfg(any(feature = "dhat-heap", feature = "dhat-ad-hoc"))] use once_cell::sync::OnceCell; use regex::Captures; @@ -314,43 +311,6 @@ impl Opt { } } -/// Wrapper so that clap can display the default config path in the help message. -/// Uses ProjectDirs to get the default location. -#[derive(Debug)] -struct ProjectDir { - path: Option, -} - -impl Default for ProjectDir { - fn default() -> Self { - let dirs = ProjectDirs::from("com", "Apollo", "Federation"); - Self { - path: dirs.map(|dirs| dirs.config_dir().to_path_buf()), - } - } -} - -impl From<&OsStr> for ProjectDir { - fn from(s: &OsStr) -> Self { - Self { - path: Some(PathBuf::from(s)), - } - } -} - -impl fmt::Display for ProjectDir { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match &self.path { - None => { - write!(f, "Unknown, -p option must be used.") - } - Some(path) => { - write!(f, "{}", path.to_string_lossy()) - } - } - } -} - /// This is the main router entrypoint. /// /// Starts a Tokio runtime and runs a Router in it based on command-line options. @@ -521,22 +481,23 @@ impl Executable { } (Some(config), None) => config, #[allow(clippy::blocks_in_conditions)] - _ => match opt.config_path.as_ref().map(|path| { - let path = if path.is_relative() { - current_directory.join(path) - } else { - path.to_path_buf() - }; + _ => opt + .config_path + .as_ref() + .map(|path| { + let path = if path.is_relative() { + current_directory.join(path) + } else { + path.to_path_buf() + }; - ConfigurationSource::File { - path, - watch: opt.hot_reload, - delay: None, - } - }) { - Some(configuration) => configuration, - None => Default::default(), - }, + ConfigurationSource::File { + path, + watch: opt.hot_reload, + delay: None, + } + }) + .unwrap_or_default(), }; let apollo_telemetry_msg = if opt.anonymous_telemetry_disabled { diff --git a/apollo-router/src/graphql/visitor.rs b/apollo-router/src/graphql/visitor.rs index 31653d2957..ee92b8741f 100644 --- a/apollo-router/src/graphql/visitor.rs +++ b/apollo-router/src/graphql/visitor.rs @@ -53,10 +53,10 @@ pub(crate) trait ResponseVisitor { } if let Some(Value::Object(children)) = &response.data { - if let Some(operation) = &request.anonymous_operation { + if let Some(operation) = &request.operations.anonymous { self.visit_selections(request, &operation.selection_set, children); } - for operation in request.named_operations.values() { + for operation in request.operations.named.values() { self.visit_selections(request, &operation.selection_set, children); } } diff --git a/apollo-router/src/json_ext.rs b/apollo-router/src/json_ext.rs index e0b29f9ed4..7e26929f24 100644 --- a/apollo-router/src/json_ext.rs +++ b/apollo-router/src/json_ext.rs @@ -74,11 +74,13 @@ pub(crate) trait ValueExt { /// Returns `true` if the values are equal and the objects are ordered the same. /// /// **Note:** this is recursive. + #[cfg(test)] fn eq_and_ordered(&self, other: &Self) -> bool; /// Returns `true` if the set is a subset of another, i.e., `other` contains at least all the /// values in `self`. #[track_caller] + #[cfg(test)] fn is_subset(&self, superset: &Value) -> bool; /// Create a `Value` by inserting a value at a subpath. @@ -181,6 +183,7 @@ impl ValueExt for Value { } } + #[cfg(test)] fn eq_and_ordered(&self, other: &Self) -> bool { match (self, other) { (Value::Object(a), Value::Object(b)) => { @@ -221,6 +224,7 @@ impl ValueExt for Value { } } + #[cfg(test)] fn is_subset(&self, superset: &Value) -> bool { match (self, superset) { (Value::Object(subset), Value::Object(superset)) => { @@ -1157,7 +1161,7 @@ mod tests { /// the path, and so we use the following simple schema for tests. Note however that tests that /// don't use fragments in the path essentially ignore this schema. fn test_schema() -> Schema { - Schema::parse_test( + Schema::parse( r#" schema @core(feature: "https://specs.apollo.dev/core/v0.1"), diff --git a/apollo-router/src/lib.rs b/apollo-router/src/lib.rs index a10910a840..656f33c976 100644 --- a/apollo-router/src/lib.rs +++ b/apollo-router/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(feature = "failfast", allow(unreachable_code))] #![warn(unreachable_pub)] #![warn(missing_docs)] +// TODO: silence false positives (apollo_compiler::Name) and investigate the rest +#![allow(clippy::mutable_key_type)] macro_rules! failfast_debug { ($($tokens:tt)+) => {{ diff --git a/apollo-router/src/metrics/aggregation.rs b/apollo-router/src/metrics/aggregation.rs index f5eb052903..53f36aee88 100644 --- a/apollo-router/src/metrics/aggregation.rs +++ b/apollo-router/src/metrics/aggregation.rs @@ -1,7 +1,6 @@ use std::any::Any; use std::borrow::Cow; use std::collections::HashMap; -use std::mem; use std::ops::DerefMut; use std::sync::Arc; use std::sync::Mutex; @@ -25,7 +24,6 @@ use opentelemetry::metrics::UpDownCounter; use opentelemetry::KeyValue; use opentelemetry_api::metrics::AsyncInstrument; use opentelemetry_api::metrics::CallbackRegistration; -use opentelemetry_api::metrics::MetricsError; use opentelemetry_api::metrics::Observer; use crate::metrics::filter::FilterMeterProvider; @@ -76,16 +74,34 @@ pub(crate) struct Inner { registered_instruments: Vec, } +/// Fields are never used directly but strong references here +/// keep weak references elsewhere upgradable. #[derive(From)] pub(crate) enum InstrumentWrapper { - U64Counter(Arc>), - F64Counter(Arc>), - I64UpDownCounter(Arc>), - F64UpDownCounter(Arc>), - I64Histogram(Arc>), - U64Histogram(Arc>), - F64Histogram(Arc>), - U64Gauge(Arc>), + U64Counter { + _keep_alive: Arc>, + }, + F64Counter { + _keep_alive: Arc>, + }, + I64UpDownCounter { + _keep_alive: Arc>, + }, + F64UpDownCounter { + _keep_alive: Arc>, + }, + I64Histogram { + _keep_alive: Arc>, + }, + U64Histogram { + _keep_alive: Arc>, + }, + F64Histogram { + _keep_alive: Arc>, + }, + U64Gauge { + _keep_alive: Arc>, + }, } #[derive(Eq, PartialEq, Hash)] @@ -136,7 +152,6 @@ impl AggregateMeterProvider { } /// Create a registered instrument. This enables caching at callsites and invalidation at the meter provider via weak reference. - #[allow(dead_code)] pub(crate) fn create_registered_instrument( &self, create_fn: impl Fn(&mut Inner) -> T, @@ -469,24 +484,6 @@ impl InstrumentProvider for AggregateInstrumentProvider { } } -struct AggregatedCallbackRegistrations(Vec>); -impl CallbackRegistration for AggregatedCallbackRegistrations { - fn unregister(&mut self) -> opentelemetry_api::metrics::Result<()> { - let mut errors = vec![]; - for mut registration in mem::take(&mut self.0) { - if let Err(err) = registration.unregister() { - errors.push(err); - } - } - - if errors.is_empty() { - Ok(()) - } else { - Err(MetricsError::Other(format!("{errors:?}"))) - } - } -} - #[cfg(test)] mod test { use std::sync::atomic::AtomicI64; diff --git a/apollo-router/src/metrics/mod.rs b/apollo-router/src/metrics/mod.rs index e0deed0c8e..2e0fbf2ca5 100644 --- a/apollo-router/src/metrics/mod.rs +++ b/apollo-router/src/metrics/mod.rs @@ -60,7 +60,7 @@ pub(crate) mod test_utils { pub(crate) static AGGREGATE_METER_PROVIDER_ASYNC: OnceLock<(AggregateMeterProvider, ClonableManualReader)>; } thread_local! { - pub(crate) static AGGREGATE_METER_PROVIDER: OnceLock<(AggregateMeterProvider, ClonableManualReader)> = OnceLock::new(); + pub(crate) static AGGREGATE_METER_PROVIDER: OnceLock<(AggregateMeterProvider, ClonableManualReader)> = const { OnceLock::new() }; } #[derive(Debug, Clone, Default)] @@ -120,17 +120,11 @@ pub(crate) mod test_utils { } pub(crate) fn meter_provider_and_readers() -> (AggregateMeterProvider, ClonableManualReader) { if tokio::runtime::Handle::try_current().is_ok() { - if let Ok(task_local) = AGGREGATE_METER_PROVIDER_ASYNC + AGGREGATE_METER_PROVIDER_ASYNC .try_with(|cell| cell.get_or_init(create_test_meter_provider).clone()) - { - task_local - } else { - // We need to silently fail here. Otherwise we fail every multi-threaded test that touches metrics - ( - AggregateMeterProvider::default(), - ClonableManualReader::default(), - ) - } + // We need to silently fail here. + // Otherwise we fail every multi-threaded test that touches metrics + .unwrap_or_default() } else { AGGREGATE_METER_PROVIDER .with(|cell| cell.get_or_init(create_test_meter_provider).clone()) @@ -506,9 +500,11 @@ pub(crate) fn meter_provider() -> AggregateMeterProvider { /// /// This macro is a replacement for the telemetry crate's MetricsLayer. We will eventually convert all metrics to use these macros and deprecate the MetricsLayer. /// The reason for this is that the MetricsLayer has: +/// /// * No support for dynamic attributes /// * No support dynamic metrics. /// * Imperfect mapping to metrics API that can only be checked at runtime. +/// /// New metrics should be added using these macros. #[allow(unused_macros)] macro_rules! u64_counter { @@ -545,9 +541,11 @@ macro_rules! u64_counter { /// /// This macro is a replacement for the telemetry crate's MetricsLayer. We will eventually convert all metrics to use these macros and deprecate the MetricsLayer. /// The reason for this is that the MetricsLayer has: +/// /// * No support for dynamic attributes /// * No support dynamic metrics. /// * Imperfect mapping to metrics API that can only be checked at runtime. +/// /// New metrics should be added using these macros. #[allow(unused_macros)] macro_rules! f64_counter { @@ -583,9 +581,11 @@ macro_rules! f64_counter { /// /// This macro is a replacement for the telemetry crate's MetricsLayer. We will eventually convert all metrics to use these macros and deprecate the MetricsLayer. /// The reason for this is that the MetricsLayer has: +/// /// * No support for dynamic attributes /// * No support dynamic metrics. /// * Imperfect mapping to metrics API that can only be checked at runtime. +/// /// New metrics should be added using these macros. #[allow(unused_macros)] @@ -623,9 +623,11 @@ macro_rules! i64_up_down_counter { /// /// This macro is a replacement for the telemetry crate's MetricsLayer. We will eventually convert all metrics to use these macros and deprecate the MetricsLayer. /// The reason for this is that the MetricsLayer has: +/// /// * No support for dynamic attributes /// * No support dynamic metrics. /// * Imperfect mapping to metrics API that can only be checked at runtime. +/// /// New metrics should be added using these macros. #[allow(unused_macros)] macro_rules! f64_up_down_counter { @@ -662,9 +664,11 @@ macro_rules! f64_up_down_counter { /// /// This macro is a replacement for the telemetry crate's MetricsLayer. We will eventually convert all metrics to use these macros and deprecate the MetricsLayer. /// The reason for this is that the MetricsLayer has: +/// /// * No support for dynamic attributes /// * No support dynamic metrics. /// * Imperfect mapping to metrics API that can only be checked at runtime. +/// /// New metrics should be added using these macros. #[allow(unused_macros)] macro_rules! f64_histogram { @@ -701,9 +705,11 @@ macro_rules! f64_histogram { /// /// This macro is a replacement for the telemetry crate's MetricsLayer. We will eventually convert all metrics to use these macros and deprecate the MetricsLayer. /// The reason for this is that the MetricsLayer has: +/// /// * No support for dynamic attributes /// * No support dynamic metrics. /// * Imperfect mapping to metrics API that can only be checked at runtime. +/// /// New metrics should be added using these macros. #[allow(unused_macros)] macro_rules! u64_histogram { @@ -740,9 +746,11 @@ macro_rules! u64_histogram { /// /// This macro is a replacement for the telemetry crate's MetricsLayer. We will eventually convert all metrics to use these macros and deprecate the MetricsLayer. /// The reason for this is that the MetricsLayer has: +/// /// * No support for dynamic attributes /// * No support dynamic metrics. /// * Imperfect mapping to metrics API that can only be checked at runtime. +/// /// New metrics should be added using these macros. #[allow(unused_macros)] macro_rules! i64_histogram { diff --git a/apollo-router/src/notification.rs b/apollo-router/src/notification.rs index 3dc3c01d48..77aff5db43 100644 --- a/apollo-router/src/notification.rs +++ b/apollo-router/src/notification.rs @@ -34,44 +34,21 @@ static NOTIFY_CHANNEL_SIZE: usize = 1024; static DEFAULT_MSG_CHANNEL_SIZE: usize = 128; #[derive(Error, Debug)] -pub(crate) enum NotifyError { +pub(crate) enum NotifyError { + #[error("cannot receive data from pubsub")] + RecvError(#[from] RecvError), #[error("cannot send data to pubsub")] SendError(#[from] SendError), + #[error("cannot send data to pubsub")] + NotificationSendError(#[from] SendError>), + #[error("cannot send data to pubsub")] + NotificationTrySendError(#[from] TrySendError>), #[error("cannot send data to response stream")] BroadcastSendError(#[from] broadcast::error::SendError), #[error("this topic doesn't exist")] UnknownTopic, } -impl From>> for NotifyError -where - K: Send + Hash + Eq + Clone + 'static, - V: Send + Clone + 'static, -{ - fn from(error: SendError>) -> Self { - error.into() - } -} - -impl From for NotifyError -where - V: Send + Clone + 'static, -{ - fn from(error: RecvError) -> Self { - error.into() - } -} - -impl From>> for NotifyError -where - K: Send + Hash + Eq + Clone + 'static, - V: Send + Clone + 'static, -{ - fn from(error: TrySendError>) -> Self { - error.into() - } -} - type ResponseSender = oneshot::Sender>, broadcast::Receiver>)>>; @@ -81,7 +58,7 @@ type ResponseSenderWithCreated = oneshot::Sender<( bool, )>; -enum Notification { +pub(crate) enum Notification { CreateOrSubscribe { topic: K, // Sender connected to the original source stream @@ -220,7 +197,7 @@ where K: Send + Hash + Eq + Clone + 'static, V: Send + Clone + 'static, { - pub(crate) async fn set_ttl(&self, new_ttl: Option) -> Result<(), NotifyError> { + pub(crate) async fn set_ttl(&self, new_ttl: Option) -> Result<(), NotifyError> { self.sender .send(Notification::UpdateHeartbeat { new_ttl }) .await?; @@ -233,7 +210,7 @@ where &mut self, topic: K, heartbeat_enabled: bool, - ) -> Result<(Handle, bool), NotifyError> { + ) -> Result<(Handle, bool), NotifyError> { let (sender, _receiver) = broadcast::channel(self.queue_size.unwrap_or(DEFAULT_MSG_CHANNEL_SIZE)); @@ -258,7 +235,7 @@ where Ok((handle, created)) } - pub(crate) async fn subscribe(&mut self, topic: K) -> Result, NotifyError> { + pub(crate) async fn subscribe(&mut self, topic: K) -> Result, NotifyError> { let (sender, receiver) = oneshot::channel(); self.sender @@ -284,7 +261,7 @@ where pub(crate) async fn subscribe_if_exist( &mut self, topic: K, - ) -> Result>, NotifyError> { + ) -> Result>, NotifyError> { let (sender, receiver) = oneshot::channel(); self.sender @@ -307,7 +284,7 @@ where Ok(handle.into()) } - pub(crate) async fn exist(&mut self, topic: K) -> Result> { + pub(crate) async fn exist(&mut self, topic: K) -> Result> { // Channel to check if the topic still exists or not let (response_tx, response_rx) = oneshot::channel(); @@ -326,7 +303,7 @@ where pub(crate) async fn invalid_ids( &mut self, topics: Vec, - ) -> Result<(Vec, Vec), NotifyError> { + ) -> Result<(Vec, Vec), NotifyError> { // Channel to check if the topic still exists or not let (response_tx, response_rx) = oneshot::channel(); @@ -343,7 +320,7 @@ where } /// Delete the topic even if several subscribers are still listening - pub(crate) async fn force_delete(&mut self, topic: K) -> Result<(), NotifyError> { + pub(crate) async fn force_delete(&mut self, topic: K) -> Result<(), NotifyError> { // if disconnected, we don't care (the task was stopped) self.sender .send(Notification::ForceDelete { topic }) @@ -354,7 +331,7 @@ where /// Delete the topic if and only if one or zero subscriber is still listening /// This function is not async to allow it to be used in a Drop impl #[cfg(test)] - pub(crate) fn try_delete(&mut self, topic: K) -> Result<(), NotifyError> { + pub(crate) fn try_delete(&mut self, topic: K) -> Result<(), NotifyError> { // if disconnected, we don't care (the task was stopped) self.sender .try_send(Notification::TryDelete { topic }) @@ -362,7 +339,7 @@ where } #[cfg(test)] - pub(crate) async fn broadcast(&mut self, data: V) -> Result<(), NotifyError> { + pub(crate) async fn broadcast(&mut self, data: V) -> Result<(), NotifyError> { self.sender .send(Notification::Broadcast { data }) .await @@ -370,7 +347,7 @@ where } #[cfg(test)] - pub(crate) async fn debug(&mut self) -> Result> { + pub(crate) async fn debug(&mut self) -> Result> { let (response_tx, response_rx) = oneshot::channel(); self.sender .send(Notification::Debug { @@ -561,7 +538,7 @@ where V: Clone + 'static + Send, { /// Send data to the subscribed topic - pub(crate) fn send_sync(&mut self, data: V) -> Result<(), NotifyError> { + pub(crate) fn send_sync(&mut self, data: V) -> Result<(), NotifyError> { self.msg_sender.send(data.into()).map_err(|err| { NotifyError::BroadcastSendError(broadcast::error::SendError(err.0.unwrap())) })?; diff --git a/apollo-router/src/orbiter/mod.rs b/apollo-router/src/orbiter/mod.rs index 8e47323157..ebd3383752 100644 --- a/apollo-router/src/orbiter/mod.rs +++ b/apollo-router/src/orbiter/mod.rs @@ -110,7 +110,7 @@ impl RouterSuperServiceFactory for OrbiterRouterSuperServiceFactory { extra_plugins, ) .await - .map(|factory| { + .inspect(|factory| { if !is_telemetry_disabled { let schema = factory.supergraph_creator.schema(); @@ -122,7 +122,6 @@ impl RouterSuperServiceFactory for OrbiterRouterSuperServiceFactory { } }); } - factory }) } } @@ -381,7 +380,7 @@ mod test { let config = Configuration::from_str(include_str!("testdata/redaction.router.yaml")) .expect("config must be valid"); let schema_string = include_str!("../testdata/minimal_supergraph.graphql"); - let schema = crate::spec::Schema::parse_test(schema_string, &Default::default()).unwrap(); + let schema = crate::spec::Schema::parse(schema_string, &Default::default()).unwrap(); let report = create_report(Arc::new(config), Arc::new(schema)); insta::with_settings!({sort_maps => true}, { assert_yaml_snapshot!(report, { @@ -399,7 +398,7 @@ mod test { .expect("config must be valid"); config.validated_yaml = Some(Value::Null); let schema_string = include_str!("../testdata/minimal_supergraph.graphql"); - let schema = crate::spec::Schema::parse_test(schema_string, &Default::default()).unwrap(); + let schema = crate::spec::Schema::parse(schema_string, &Default::default()).unwrap(); let report = create_report(Arc::new(config), Arc::new(schema)); insta::with_settings!({sort_maps => true}, { assert_yaml_snapshot!(report, { @@ -417,7 +416,7 @@ mod test { .expect("config must be valid"); config.validated_yaml = Some(json!({"garbage": "garbage"})); let schema_string = include_str!("../testdata/minimal_supergraph.graphql"); - let schema = crate::spec::Schema::parse_test(schema_string, &Default::default()).unwrap(); + let schema = crate::spec::Schema::parse(schema_string, &Default::default()).unwrap(); let report = create_report(Arc::new(config), Arc::new(schema)); insta::with_settings!({sort_maps => true}, { assert_yaml_snapshot!(report, { diff --git a/apollo-router/src/plugin/mod.rs b/apollo-router/src/plugin/mod.rs index eafaa19383..0a7a567d86 100644 --- a/apollo-router/src/plugin/mod.rs +++ b/apollo-router/src/plugin/mod.rs @@ -7,7 +7,8 @@ //! - router //! - execution //! - subgraph (multiple in parallel if multiple subgraphs are accessed) -//! stages. +//! +//! stages. //! //! A plugin can choose to interact with the flow of requests at any or all of these stages of //! processing. At each stage a [`Service`] is provided which provides an appropriate @@ -718,6 +719,7 @@ pub(crate) trait DynPlugin: Send + Sync + 'static { fn as_any(&self) -> &dyn std::any::Any; /// Support downcasting + #[cfg(test)] fn as_any_mut(&mut self) -> &mut dyn std::any::Any; } @@ -765,6 +767,7 @@ where self } + #[cfg(test)] fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } diff --git a/apollo-router/src/plugin/test/mock/subgraph.rs b/apollo-router/src/plugin/test/mock/subgraph.rs index 80149de1d2..8ee5e5465c 100644 --- a/apollo-router/src/plugin/test/mock/subgraph.rs +++ b/apollo-router/src/plugin/test/mock/subgraph.rs @@ -182,7 +182,7 @@ impl Service for MockSubgraph { let http_response = http_response_builder .body(response.clone()) .expect("Response is serializable; qed"); - SubgraphResponse::new_from_response(http_response, req.context) + SubgraphResponse::new_from_response(http_response, req.context, "test".to_string()) } else { let error = crate::error::Error::builder() .message(format!( diff --git a/apollo-router/src/plugin/test/service.rs b/apollo-router/src/plugin/test/service.rs index ec9ffa7c91..fa1388ff1b 100644 --- a/apollo-router/src/plugin/test/service.rs +++ b/apollo-router/src/plugin/test/service.rs @@ -95,7 +95,7 @@ macro_rules! mock_async_service { impl HasSchema for MockSupergraphService { fn schema(&self) -> Arc { Arc::new( - Schema::parse_test( + Schema::parse( include_str!("../../testdata/supergraph.graphql"), &Default::default(), ) diff --git a/apollo-router/src/plugins/authentication/jwks.rs b/apollo-router/src/plugins/authentication/jwks.rs index 6c9d582d4b..9da1a2ac74 100644 --- a/apollo-router/src/plugins/authentication/jwks.rs +++ b/apollo-router/src/plugins/authentication/jwks.rs @@ -145,24 +145,21 @@ pub(super) async fn get_jwks(url: Url, headers: Vec
) -> Option { let data = if url.scheme() == "file" { let path = url .to_file_path() - .map_err(|e| { + .inspect_err(|_| { tracing::error!("url cannot be converted to filesystem path"); - e }) .ok()?; read_to_string(path) .await - .map_err(|e| { + .inspect_err(|e| { tracing::error!(%e, "could not read JWKS path"); - e }) .ok()? } else { let my_client = CLIENT .as_ref() - .map_err(|e| { + .inspect_err(|e| { tracing::error!(%e, "could not activate authentication feature"); - e }) .ok()? .clone(); diff --git a/apollo-router/src/plugins/authentication/subgraph.rs b/apollo-router/src/plugins/authentication/subgraph.rs index bbe09666b9..9dce2b1e45 100644 --- a/apollo-router/src/plugins/authentication/subgraph.rs +++ b/apollo-router/src/plugins/authentication/subgraph.rs @@ -801,10 +801,11 @@ mod test { Ok(()) } - fn example_response(_: SubgraphRequest) -> Result { + fn example_response(req: SubgraphRequest) -> Result { Ok(SubgraphResponse::new_from_response( http::Response::default(), Context::new(), + req.subgraph_name.unwrap_or_else(|| String::from("test")), )) } diff --git a/apollo-router/src/plugins/authorization/authenticated.rs b/apollo-router/src/plugins/authorization/authenticated.rs index a4324b5548..ffe881877b 100644 --- a/apollo-router/src/plugins/authorization/authenticated.rs +++ b/apollo-router/src/plugins/authorization/authenticated.rs @@ -176,7 +176,7 @@ impl<'a> traverse::Visitor for AuthenticatedCheckVisitor<'a> { pub(crate) struct AuthenticatedVisitor<'a> { schema: &'a schema::Schema, fragments: HashMap<&'a Name, &'a ast::FragmentDefinition>, - implementers_map: &'a HashMap, + implementers_map: &'a apollo_compiler::collections::HashMap, pub(crate) query_requires_authentication: bool, pub(crate) unauthorized_paths: Vec, // store the error paths from fragments so we can add them at @@ -191,7 +191,7 @@ impl<'a> AuthenticatedVisitor<'a> { pub(crate) fn new( schema: &'a schema::Schema, executable: &'a ast::Document, - implementers_map: &'a HashMap, + implementers_map: &'a apollo_compiler::collections::HashMap, dry_run: bool, ) -> Option { Some(Self { diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs index 14b43ec039..8ded941c2b 100644 --- a/apollo-router/src/plugins/authorization/mod.rs +++ b/apollo-router/src/plugins/authorization/mod.rs @@ -371,7 +371,7 @@ impl AuthorizationPlugin { Some((filtered_doc, paths)) => { unauthorized_paths.extend(paths); - // FIXME: consider only `filtered_doc.get_operation(key.operation_name)`? + // FIXME: consider only `filtered_doc.operations.get(key.operation_name)`? if filtered_doc.definitions.is_empty() { return Err(QueryPlannerError::Unauthorized(unauthorized_paths)); } @@ -389,7 +389,7 @@ impl AuthorizationPlugin { Some((filtered_doc, paths)) => { unauthorized_paths.extend(paths); - // FIXME: consider only `filtered_doc.get_operation(key.operation_name)`? + // FIXME: consider only `filtered_doc.operations.get(key.operation_name)`? if filtered_doc.definitions.is_empty() { return Err(QueryPlannerError::Unauthorized(unauthorized_paths)); } @@ -407,7 +407,7 @@ impl AuthorizationPlugin { Some((filtered_doc, paths)) => { unauthorized_paths.extend(paths); - // FIXME: consider only `filtered_doc.get_operation(key.operation_name)`? + // FIXME: consider only `filtered_doc.operations.get(key.operation_name)`? if filtered_doc.definitions.is_empty() { return Err(QueryPlannerError::Unauthorized(unauthorized_paths)); } diff --git a/apollo-router/src/plugins/authorization/policy.rs b/apollo-router/src/plugins/authorization/policy.rs index 002c98592c..df692cf388 100644 --- a/apollo-router/src/plugins/authorization/policy.rs +++ b/apollo-router/src/plugins/authorization/policy.rs @@ -188,7 +188,7 @@ impl<'a> traverse::Visitor for PolicyExtractionVisitor<'a> { pub(crate) struct PolicyFilteringVisitor<'a> { schema: &'a schema::Schema, fragments: HashMap<&'a Name, &'a ast::FragmentDefinition>, - implementers_map: &'a HashMap, + implementers_map: &'a apollo_compiler::collections::HashMap, dry_run: bool, request_policies: HashSet, pub(crate) query_requires_policies: bool, @@ -223,7 +223,7 @@ impl<'a> PolicyFilteringVisitor<'a> { pub(crate) fn new( schema: &'a schema::Schema, executable: &'a ast::Document, - implementers_map: &'a HashMap, + implementers_map: &'a apollo_compiler::collections::HashMap, successful_policies: HashSet, dry_run: bool, ) -> Option { diff --git a/apollo-router/src/plugins/authorization/scopes.rs b/apollo-router/src/plugins/authorization/scopes.rs index a566be56d8..361b50daad 100644 --- a/apollo-router/src/plugins/authorization/scopes.rs +++ b/apollo-router/src/plugins/authorization/scopes.rs @@ -205,7 +205,7 @@ fn scopes_sets_argument(directive: &ast::Directive) -> impl Iterator { schema: &'a schema::Schema, fragments: HashMap<&'a Name, &'a ast::FragmentDefinition>, - implementers_map: &'a HashMap, + implementers_map: &'a apollo_compiler::collections::HashMap, request_scopes: HashSet, pub(crate) query_requires_scopes: bool, pub(crate) unauthorized_paths: Vec, @@ -221,7 +221,7 @@ impl<'a> ScopeFilteringVisitor<'a> { pub(crate) fn new( schema: &'a schema::Schema, executable: &'a ast::Document, - implementers_map: &'a HashMap, + implementers_map: &'a apollo_compiler::collections::HashMap, scopes: HashSet, dry_run: bool, ) -> Option { diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs index ebc6772008..2375d4fde4 100644 --- a/apollo-router/src/plugins/cache/entity.rs +++ b/apollo-router/src/plugins/cache/entity.rs @@ -25,6 +25,8 @@ use tracing::Level; use super::cache_control::CacheControl; use super::invalidation::Invalidation; +use super::invalidation::InvalidationOrigin; +use super::metrics::CacheMetricContextKey; use super::metrics::CacheMetricsService; use crate::batching::BatchQuery; use crate::cache::redis::RedisCacheStorage; @@ -48,6 +50,8 @@ use crate::services::supergraph; use crate::spec::TYPENAME; use crate::Context; +/// Change this key if you introduce a breaking change in entity caching algorithm to make sure it won't take the previous entries +pub(crate) const ENTITY_CACHE_VERSION: &str = "1.0"; pub(crate) const ENTITIES: &str = "_entities"; pub(crate) const REPRESENTATIONS: &str = "representations"; pub(crate) const CONTEXT_CACHE_KEY: &str = "apollo_entity_cache::key"; @@ -118,6 +122,17 @@ struct Metrics { pub(crate) separate_per_type: bool, } +#[derive(Default, Serialize, Deserialize, Debug)] +#[serde(default)] +pub(crate) struct CacheSubgraph(pub(crate) HashMap); + +#[derive(Default, Serialize, Deserialize, Debug)] +#[serde(default)] +pub(crate) struct CacheHitMiss { + pub(crate) hit: usize, + pub(crate) miss: usize, +} + #[async_trait::async_trait] impl Plugin for EntityCache { type Config = Config; @@ -385,6 +400,7 @@ impl InnerCacheService { .contains_key(REPRESENTATIONS) { if request.operation_kind == OperationKind::Query { + let mut cache_hit: HashMap = HashMap::new(); match cache_lookup_root( self.name.clone(), self.entity_type.as_deref(), @@ -393,11 +409,28 @@ impl InnerCacheService { private_id.as_deref(), request, ) - .instrument(tracing::info_span!("cache_lookup")) + .instrument(tracing::info_span!("cache.entity.lookup")) .await? { - ControlFlow::Break(response) => Ok(response), + ControlFlow::Break(response) => { + cache_hit.insert("Query".to_string(), CacheHitMiss { hit: 1, miss: 0 }); + let _ = response.context.insert( + CacheMetricContextKey::new( + response.subgraph_name.clone().unwrap_or_default(), + ), + CacheSubgraph(cache_hit), + ); + Ok(response) + } ControlFlow::Continue((request, mut root_cache_key)) => { + cache_hit.insert("Query".to_string(), CacheHitMiss { hit: 0, miss: 1 }); + let _ = request.context.insert( + CacheMetricContextKey::new( + request.subgraph_name.clone().unwrap_or_default(), + ), + CacheSubgraph(cache_hit), + ); + let mut response = self.service.call(request).await?; let cache_control = @@ -429,7 +462,11 @@ impl InnerCacheService { .extensions .remove("invalidation") { - self.handle_invalidation(invalidation_extensions).await; + self.handle_invalidation( + InvalidationOrigin::Extensions, + invalidation_extensions, + ) + .await; } if cache_control.should_store() { @@ -454,7 +491,11 @@ impl InnerCacheService { .extensions .remove("invalidation") { - self.handle_invalidation(invalidation_extensions).await; + self.handle_invalidation( + InvalidationOrigin::Extensions, + invalidation_extensions, + ) + .await; } Ok(response) @@ -467,7 +508,7 @@ impl InnerCacheService { private_id.as_deref(), request, ) - .instrument(tracing::info_span!("cache_lookup")) + .instrument(tracing::info_span!("cache.entity.lookup")) .await? { ControlFlow::Break(response) => Ok(response), @@ -495,7 +536,11 @@ impl InnerCacheService { .extensions .remove("invalidation") { - self.handle_invalidation(invalidation_extensions).await; + self.handle_invalidation( + InvalidationOrigin::Extensions, + invalidation_extensions, + ) + .await; } cache_store_entities_from_response( @@ -529,9 +574,13 @@ impl InnerCacheService { }) } - async fn handle_invalidation(&mut self, invalidation_extensions: Value) { + async fn handle_invalidation( + &mut self, + origin: InvalidationOrigin, + invalidation_extensions: Value, + ) { if let Ok(requests) = from_value(invalidation_extensions) { - if let Err(e) = self.invalidation.invalidate(requests).await { + if let Err(e) = self.invalidation.invalidate(origin, requests).await { tracing::error!(error = %e, message = "could not invalidate entity cache entries", ); @@ -576,6 +625,7 @@ async fn cache_lookup_root( .data(value.0.data) .extensions(Object::new()) .context(request.context) + .and_subgraph_name(request.subgraph_name.clone()) .build(); value @@ -639,7 +689,7 @@ async fn cache_lookup_entities( .expect("we already checked that representations exist"); // remove from representations the entities we already obtained from the cache let (new_representations, cache_result, cache_control) = - filter_representations(&name, representations, keys, cache_result)?; + filter_representations(&name, representations, keys, cache_result, &request.context)?; if !new_representations.is_empty() { body.variables @@ -661,6 +711,7 @@ async fn cache_lookup_entities( let mut response = subgraph::Response::builder() .data(data) .extensions(Object::new()) + .and_subgraph_name(request.subgraph_name) .context(request.context) .build(); @@ -703,7 +754,7 @@ async fn cache_store_root_from_response( .or(subgraph_ttl); if response.response.body().errors.is_empty() && cache_control.should_store() { - let span = tracing::info_span!("cache_store"); + let span = tracing::info_span!("cache.entity.store"); let data = data.clone(); tokio::spawn(async move { cache @@ -818,23 +869,23 @@ pub(crate) fn hash_additional_data( let repr_key = ByteString::from(REPRESENTATIONS); // Removing the representations variable because it's already part of the cache key let representations = body.variables.remove(&repr_key); - digest.update(&serde_json::to_vec(&body.variables).unwrap()); + digest.update(serde_json::to_vec(&body.variables).unwrap()); if let Some(representations) = representations { body.variables.insert(repr_key, representations); } - digest.update(&serde_json::to_vec(cache_key).unwrap()); + digest.update(serde_json::to_vec(cache_key).unwrap()); if let Ok(Some(cache_data)) = context.get::<&str, Object>(CONTEXT_CACHE_KEY) { if let Some(v) = cache_data.get("all") { - digest.update(&serde_json::to_vec(v).unwrap()) + digest.update(serde_json::to_vec(v).unwrap()) } if let Some(v) = body .operation_name .as_ref() .and_then(|op| cache_data.get(op.as_str())) { - digest.update(&serde_json::to_vec(v).unwrap()) + digest.update(serde_json::to_vec(v).unwrap()) } } @@ -861,6 +912,7 @@ fn extract_cache_key_root( let entity_type = entity_type_opt.unwrap_or("Query"); // the cache key is written to easily find keys matching a prefix for deletion: + // - entity cache version: current version of the hash // - subgraph name: subgraph name // - entity type: entity type // - query hash: invalidate the entry for a specific query and operation name @@ -868,7 +920,7 @@ fn extract_cache_key_root( let mut key = String::new(); let _ = write!( &mut key, - "subgraph:{subgraph_name}:type:{entity_type}:hash:{query_hash}:data:{additional_data_hash}" + "version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph_name}:type:{entity_type}:hash:{query_hash}:data:{additional_data_hash}" ); if is_known_private { @@ -911,19 +963,17 @@ fn extract_cache_keys( let typename = opt_type.as_str().unwrap_or("-"); - // We have to hash the representation because it can contains PII - let mut digest = Sha256::new(); - digest.update(serde_json::to_string(&representation).unwrap().as_bytes()); - let hashed_entity_key = hex::encode(digest.finalize().as_slice()); + let hashed_entity_key = hash_entity_key(representation); // the cache key is written to easily find keys matching a prefix for deletion: + // - entity cache version: current version of the hash // - subgraph name: caching is done per subgraph // - type: can invalidate all instances of a type // - entity key: invalidate a specific entity // - query hash: invalidate the entry for a specific query and operation name // - additional data: separate cache entries depending on info like authorization status let mut key = String::new(); - let _ = write!(&mut key, "subgraph:{subgraph_name}:{typename}:{hashed_entity_key}:{query_hash}:{additional_data_hash}"); + let _ = write!(&mut key, "version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph_name}:type:{typename}:entity:{hashed_entity_key}:hash:{query_hash}:data:{additional_data_hash}"); if is_known_private { if let Some(id) = private_id { let _ = write!(&mut key, ":{id}"); @@ -938,6 +988,13 @@ fn extract_cache_keys( Ok(res) } +pub(crate) fn hash_entity_key(representation: &Value) -> String { + // We have to hash the representation because it can contains PII + let mut digest = Sha256::new(); + digest.update(serde_json::to_string(&representation).unwrap().as_bytes()); + hex::encode(digest.finalize().as_slice()) +} + /// represents the result of a cache lookup for an entity type and key struct IntermediateResult { key: String, @@ -952,10 +1009,11 @@ fn filter_representations( representations: &mut Vec, keys: Vec, mut cache_result: Vec>, + context: &Context, ) -> Result<(Vec, Vec, Option), BoxError> { let mut new_representations: Vec = Vec::new(); let mut result = Vec::new(); - let mut cache_hit: HashMap = HashMap::new(); + let mut cache_hit: HashMap = HashMap::new(); let mut cache_control = None; for ((mut representation, key), mut cache_entry) in representations @@ -976,10 +1034,9 @@ fn filter_representations( if let Some(false) = cache_entry.as_ref().map(|c| c.control.can_use()) { cache_entry = None; } - match cache_entry.as_ref() { None => { - cache_hit.entry(typename.clone()).or_default().1 += 1; + cache_hit.entry(typename.clone()).or_default().miss += 1; representation .as_object_mut() @@ -987,7 +1044,7 @@ fn filter_representations( new_representations.push(representation); } Some(entry) => { - cache_hit.entry(typename.clone()).or_default().0 += 1; + cache_hit.entry(typename.clone()).or_default().hit += 1; match cache_control.as_mut() { None => cache_control = Some(entry.control.clone()), Some(c) => *c = c.merge(&entry.control), @@ -1002,26 +1059,10 @@ fn filter_representations( }); } - for (ty, (hit, miss)) in cache_hit { - tracing::info!( - monotonic_counter.apollo.router.operations.entity.cache = hit as u64, - entity_type = ty.as_str(), - hit = %true, - %subgraph_name - ); - tracing::info!( - monotonic_counter.apollo.router.operations.entity.cache = miss as u64, - entity_type = ty.as_str(), - miss = %true, - %subgraph_name - ); - tracing::event!( - Level::TRACE, - entity_type = ty.as_str(), - cache_hit = hit, - cache_miss = miss - ); - } + let _ = context.insert( + CacheMetricContextKey::new(subgraph_name.to_string()), + CacheSubgraph(cache_hit), + ); Ok((new_representations, result, cache_control)) } @@ -1133,10 +1174,3 @@ async fn insert_entities_in_result( Ok((new_entities, new_errors)) } - -#[derive(Debug, Clone, Serialize, Deserialize)] -struct Key { - #[serde(rename = "type")] - opt_type: Option, - id: Value, -} diff --git a/apollo-router/src/plugins/cache/invalidation.rs b/apollo-router/src/plugins/cache/invalidation.rs index 6293df2ab9..96c863e437 100644 --- a/apollo-router/src/plugins/cache/invalidation.rs +++ b/apollo-router/src/plugins/cache/invalidation.rs @@ -13,17 +13,26 @@ use crate::cache::redis::RedisCacheStorage; use crate::cache::redis::RedisKey; use crate::notification::Handle; use crate::notification::HandleStream; +use crate::plugins::cache::entity::hash_entity_key; +use crate::plugins::cache::entity::ENTITY_CACHE_VERSION; use crate::Notify; #[derive(Clone)] pub(crate) struct Invalidation { enabled: bool, - handle: Handle>, + handle: Handle)>, } -#[derive(Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub(crate) struct InvalidationTopic; +#[derive(Clone, Debug)] +#[allow(dead_code)] +pub(crate) enum InvalidationOrigin { + Endpoint, + Extensions, +} + impl Invalidation { pub(crate) async fn new(storage: Option) -> Result { let mut notify = Notify::new(None, None, None); @@ -39,11 +48,12 @@ impl Invalidation { pub(crate) async fn invalidate( &mut self, + origin: InvalidationOrigin, requests: Vec, ) -> Result<(), BoxError> { if self.enabled { let mut sink = self.handle.clone().into_sink(); - sink.send(requests).await.map_err(|e| e.message)?; + sink.send((origin, requests)).await.map_err(|e| e.message)?; } Ok(()) @@ -52,19 +62,36 @@ impl Invalidation { async fn start( storage: RedisCacheStorage, - mut handle: HandleStream>, + mut handle: HandleStream)>, ) { - while let Some(requests) = handle.next().await { - handle_request_batch(&storage, requests) - .instrument(tracing::info_span!("cache.invalidation.batch")) + while let Some((origin, requests)) = handle.next().await { + let origin = match origin { + InvalidationOrigin::Endpoint => "endpoint", + InvalidationOrigin::Extensions => "extensions", + }; + u64_counter!( + "apollo.router.operations.entity.invalidation.event", + "Entity cache received a batch of invalidation requests", + 1u64, + "origin" = origin + ); + handle_request_batch(&storage, origin, requests) + .instrument(tracing::info_span!( + "cache.invalidation.batch", + "origin" = origin + )) .await } } -async fn handle_request_batch(storage: &RedisCacheStorage, requests: Vec) { +async fn handle_request_batch( + storage: &RedisCacheStorage, + origin: &'static str, + requests: Vec, +) { for request in requests { let start = Instant::now(); - handle_request(storage, &request) + handle_request(storage, origin, &request) .instrument(tracing::info_span!("cache.invalidation.request")) .await; f64_histogram!( @@ -75,8 +102,13 @@ async fn handle_request_batch(storage: &RedisCacheStorage, requests: Vec String { match self { InvalidationRequest::Subgraph { subgraph } => { - format!("subgraph:{subgraph}*",) + format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}*",) } InvalidationRequest::Type { subgraph, r#type } => { - format!("subgraph:{subgraph}:type:{type}*",) + format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:type:{type}*",) } + InvalidationRequest::Entity { + subgraph, + r#type, + key, + } => { + let entity_key = hash_entity_key(key); + format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:type:{type}:entity:{entity_key}*") + } + } + } + + fn subgraph(&self) -> String { + match self { + InvalidationRequest::Subgraph { subgraph } => subgraph.clone(), _ => { todo!() } diff --git a/apollo-router/src/plugins/cache/metrics.rs b/apollo-router/src/plugins/cache/metrics.rs index f85ecc9833..86e802c093 100644 --- a/apollo-router/src/plugins/cache/metrics.rs +++ b/apollo-router/src/plugins/cache/metrics.rs @@ -17,6 +17,8 @@ use super::entity::REPRESENTATIONS; use crate::services::subgraph; use crate::spec::TYPENAME; +pub(crate) const CACHE_INFO_SUBGRAPH_CONTEXT_KEY: &str = + "apollo::router::entity_cache_info_subgraph"; pub(crate) struct CacheMetricsService(Option); impl CacheMetricsService { @@ -114,8 +116,7 @@ impl InnerCacheMetricsService { .into_iter() .filter_map(|val| { val.to_str().ok().map(|v| { - v.to_string() - .split(", ") + v.split(", ") .map(|s| s.to_string()) .collect::>() }) @@ -268,3 +269,17 @@ impl CacheCounter { self.created_at = Instant::now(); } } + +pub(crate) struct CacheMetricContextKey(String); + +impl CacheMetricContextKey { + pub(crate) fn new(subgraph_name: String) -> Self { + Self(subgraph_name) + } +} + +impl From for String { + fn from(val: CacheMetricContextKey) -> Self { + format!("{CACHE_INFO_SUBGRAPH_CONTEXT_KEY}_{}", val.0) + } +} diff --git a/apollo-router/src/plugins/coprocessor/mod.rs b/apollo-router/src/plugins/coprocessor/mod.rs index cba03ba5c6..ccaaf61223 100644 --- a/apollo-router/src/plugins/coprocessor/mod.rs +++ b/apollo-router/src/plugins/coprocessor/mod.rs @@ -1010,6 +1010,7 @@ where .transpose()?; let context_to_send = request_config.context.then(|| request.context.clone()); let uri = request_config.uri.then(|| parts.uri.to_string()); + let subgraph_name = service_name.clone(); let service_name = request_config.service_name.then_some(service_name); let payload = Externalizable::subgraph_builder() @@ -1079,6 +1080,7 @@ where let subgraph_response = subgraph::Response { response: http_response, context: request.context, + subgraph_name: Some(subgraph_name), }; if let Some(context) = co_processor_output.context { diff --git a/apollo-router/src/plugins/coprocessor/test.rs b/apollo-router/src/plugins/coprocessor/test.rs index c5d99e7abd..50786f336d 100644 --- a/apollo-router/src/plugins/coprocessor/test.rs +++ b/apollo-router/src/plugins/coprocessor/test.rs @@ -603,8 +603,9 @@ mod tests { let request = subgraph::Request::fake_builder().build(); - let crate::services::subgraph::Response { response, context } = - service.oneshot(request).await.unwrap(); + let crate::services::subgraph::Response { + response, context, .. + } = service.oneshot(request).await.unwrap(); assert!(context.get::<_, bool>("testKey").unwrap().unwrap()); diff --git a/apollo-router/src/plugins/csrf.rs b/apollo-router/src/plugins/csrf.rs index 0362f44a6e..6e0f08e118 100644 --- a/apollo-router/src/plugins/csrf.rs +++ b/apollo-router/src/plugins/csrf.rs @@ -34,7 +34,7 @@ pub(crate) struct CSRFConfig { /// and make sure you either: /// - did not set any `allow_headers` list (so it defaults to `mirror_request`) /// - added your required headers to the allow_headers list, as shown in the - /// `examples/cors-and-csrf/custom-headers.router.yaml` files. + /// `examples/cors-and-csrf/custom-headers.router.yaml` files. required_headers: Vec, } diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs b/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs index 23eae7d6f9..b3f3afe372 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs @@ -1,8 +1,8 @@ use apollo_compiler::ast::NamedType; use apollo_compiler::executable::Field; use apollo_compiler::executable::SelectionSet; +use apollo_compiler::parser::Parser; use apollo_compiler::validation::Valid; -use apollo_compiler::Parser; use apollo_compiler::Schema; use tower::BoxError; diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs index ab2ac33e88..f84a4fcd0a 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs @@ -351,10 +351,10 @@ impl StaticCostCalculator { should_estimate_requires: bool, ) -> Result { let mut cost = 0.0; - if let Some(op) = &query.anonymous_operation { + if let Some(op) = &query.operations.anonymous { cost += self.score_operation(op, schema, query, should_estimate_requires)?; } - for (_name, op) in query.named_operations.iter() { + for (_name, op) in query.operations.named.iter() { cost += self.score_operation(op, schema, query, should_estimate_requires)?; } Ok(cost) @@ -431,7 +431,7 @@ mod tests { query_str: &str, config: &Configuration, ) -> (spec::Schema, ParsedDocument) { - let schema = spec::Schema::parse_test(schema_str, config).unwrap(); + let schema = spec::Schema::parse(schema_str, config).unwrap(); let query = Query::parse_document(query_str, None, &schema, config).unwrap(); (schema, query) } diff --git a/apollo-router/src/plugins/demand_control/mod.rs b/apollo-router/src/plugins/demand_control/mod.rs index 381f64e8f7..476deeb737 100644 --- a/apollo-router/src/plugins/demand_control/mod.rs +++ b/apollo-router/src/plugins/demand_control/mod.rs @@ -316,12 +316,14 @@ impl Plugin for DemandControl { fn subgraph_service( &self, - _subgraph_name: &str, + subgraph_name: &str, service: subgraph::BoxService, ) -> subgraph::BoxService { if !self.config.enabled { service } else { + let subgraph_name = subgraph_name.to_owned(); + let subgraph_name_map_fut = subgraph_name.to_owned(); ServiceBuilder::new() .checkpoint(move |req: subgraph::Request| { let strategy = req.context.extensions().with_lock(|lock| { @@ -339,18 +341,22 @@ impl Plugin for DemandControl { ) .context(req.context.clone()) .extensions(crate::json_ext::Object::new()) + .subgraph_name(subgraph_name.clone()) .build(), ), }) }) .map_future_with_request_data( - |req: &subgraph::Request| { + move |req: &subgraph::Request| { //TODO convert this to expect - req.executable_document.clone().unwrap_or_else(|| { - Arc::new(Valid::assume_valid(ExecutableDocument::new())) - }) + ( + subgraph_name_map_fut.clone(), + req.executable_document.clone().unwrap_or_else(|| { + Arc::new(Valid::assume_valid(ExecutableDocument::new())) + }), + ) }, - |req: Arc>, fut| async move { + |(subgraph_name, req): (String, Arc>), fut| async move { let resp: subgraph::Response = fut.await?; let strategy = resp.context.extensions().with_lock(|lock| { lock.get::().expect("must have strategy").clone() @@ -362,6 +368,7 @@ impl Plugin for DemandControl { err.into_graphql_errors() .expect("must be able to convert to graphql error"), ) + .subgraph_name(subgraph_name) .context(resp.context.clone()) .extensions(Object::new()) .build(), diff --git a/apollo-router/src/plugins/file_uploads/map_field.rs b/apollo-router/src/plugins/file_uploads/map_field.rs index 7a044cb605..3e1d141c31 100644 --- a/apollo-router/src/plugins/file_uploads/map_field.rs +++ b/apollo-router/src/plugins/file_uploads/map_field.rs @@ -20,7 +20,7 @@ pub(super) struct MapField { impl MapField { pub(super) fn new(map_field: MapFieldRaw) -> UploadResult { - let mut files_order = IndexSet::new(); + let mut files_order = IndexSet::default(); let mut map_per_variable: MapPerVariable = HashMap::new(); for (filename, paths) in map_field.into_iter() { for path in paths.into_iter() { @@ -60,7 +60,7 @@ impl MapField { &self, variable_names: impl IntoIterator, ) -> MapFieldRaw { - let mut subgraph_map: MapFieldRaw = IndexMap::new(); + let mut subgraph_map: MapFieldRaw = IndexMap::default(); for variable_name in variable_names.into_iter() { let variable_name = variable_name.as_str(); if let Some(variable_map) = self.per_variable.get(variable_name) { diff --git a/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs b/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs index 075e3950e2..c7bfdc1ec4 100644 --- a/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs +++ b/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs @@ -38,7 +38,7 @@ pub(super) fn rearrange_query_plan( ); } - let root = rearrange_plan_node(root, &mut IndexMap::new(), &variable_ranges)?; + let root = rearrange_plan_node(root, &mut IndexMap::default(), &variable_ranges)?; Ok(QueryPlan { root: Arc::new(root), usage_reporting: query_plan.usage_reporting.clone(), @@ -97,7 +97,7 @@ fn rearrange_plan_node<'a>( // Error if 'rest' contains file variables if let Some(rest) = rest { - let mut rest_variables = IndexMap::new(); + let mut rest_variables = IndexMap::default(); // ignore result use it just to collect variables drop(rearrange_plan_node( rest, @@ -130,7 +130,7 @@ fn rearrange_plan_node<'a>( .transpose(); // Error if 'deferred' contains file variables - let mut deferred_variables = IndexMap::new(); + let mut deferred_variables = IndexMap::default(); for DeferredNode { node, .. } in deferred.iter() { if let Some(node) = node { // ignore result use it just to collect variables diff --git a/apollo-router/src/plugins/headers.rs b/apollo-router/src/plugins/headers.rs index 55fa15772f..2f19a965ff 100644 --- a/apollo-router/src/plugins/headers.rs +++ b/apollo-router/src/plugins/headers.rs @@ -928,10 +928,11 @@ mod test { Ok(()) } - fn example_response(_: SubgraphRequest) -> Result { + fn example_response(req: SubgraphRequest) -> Result { Ok(SubgraphResponse::new_from_response( http::Response::default(), Context::new(), + req.subgraph_name.unwrap_or_default(), )) } diff --git a/apollo-router/src/plugins/progressive_override/tests.rs b/apollo-router/src/plugins/progressive_override/tests.rs index 07b6bec7d8..d8b3cb31af 100644 --- a/apollo-router/src/plugins/progressive_override/tests.rs +++ b/apollo-router/src/plugins/progressive_override/tests.rs @@ -133,7 +133,7 @@ async fn assert_expected_and_absent_labels_for_supergraph_service( .unwrap() .supergraph_service(mock_service.boxed()); - let schema = crate::spec::Schema::parse_test( + let schema = crate::spec::Schema::parse( include_str!("./testdata/supergraph.graphql"), &Default::default(), ) @@ -205,7 +205,7 @@ async fn plugin_supergraph_service_trims_0pc_label() { } async fn get_json_query_plan(query: &str) -> serde_json::Value { - let schema = crate::spec::Schema::parse_test( + let schema = crate::spec::Schema::parse( include_str!("./testdata/supergraph.graphql"), &Default::default(), ) @@ -279,7 +279,7 @@ async fn query_with_labels(query: &str, labels_from_coprocessors: Vec<&str>) { .unwrap() .supergraph_service(mock_service.boxed()); - let schema = crate::spec::Schema::parse_test( + let schema = crate::spec::Schema::parse( include_str!("./testdata/supergraph.graphql"), &Default::default(), ) diff --git a/apollo-router/src/plugins/record_replay/record.rs b/apollo-router/src/plugins/record_replay/record.rs index 4e6563eafe..e821b016b2 100644 --- a/apollo-router/src/plugins/record_replay/record.rs +++ b/apollo-router/src/plugins/record_replay/record.rs @@ -63,16 +63,11 @@ impl Plugin for Record { .storage_path .unwrap_or_else(default_storage_path); - let schema_config = Default::default(); - let schema = Schema::parse(init.supergraph_sdl.clone().as_str(), &schema_config)?; - let api_schema = Schema::parse_compiler_schema(&schema.create_api_schema(&schema_config)?)?; - let schema = schema.with_api_schema(api_schema); - let plugin = Self { enabled: init.config.enabled, supergraph_sdl: init.supergraph_sdl.clone(), storage_path: storage_path.clone().into(), - schema: Arc::new(schema), + schema: Arc::new(Schema::parse(&init.supergraph_sdl, &Default::default())?), }; if init.config.enabled { diff --git a/apollo-router/src/plugins/record_replay/replay.rs b/apollo-router/src/plugins/record_replay/replay.rs index b978efd97f..dc79380f13 100644 --- a/apollo-router/src/plugins/record_replay/replay.rs +++ b/apollo-router/src/plugins/record_replay/replay.rs @@ -218,6 +218,7 @@ impl Plugin for Replay { let subgraph_response = subgraph::Response::new_from_response( http::Response::new(fetch.response.chunks[0].clone()), req.context.clone(), + subgraph_name.clone(), ); let runtime_variables = req.subgraph_request.body().variables.clone(); diff --git a/apollo-router/src/plugins/rhai/mod.rs b/apollo-router/src/plugins/rhai/mod.rs index 2f19f9cf32..8ef1e61e8f 100644 --- a/apollo-router/src/plugins/rhai/mod.rs +++ b/apollo-router/src/plugins/rhai/mod.rs @@ -792,20 +792,17 @@ fn process_error(error: Box) -> ErrorDetails { body: None, }; - // We only want to process errors raised in functions - if let EvalAltResult::ErrorInFunctionCall(..) = &*error { - let inner_error = error.unwrap_inner(); - // We only want to process runtime errors raised in functions - if let EvalAltResult::ErrorRuntime(obj, pos) = inner_error { - if let Ok(temp_error_details) = rhai::serde::from_dynamic::(obj) { - if temp_error_details.message.is_some() || temp_error_details.body.is_some() { - error_details = temp_error_details; - } else { - error_details.status = temp_error_details.status; - } + let inner_error = error.unwrap_inner(); + // We only want to process runtime errors + if let EvalAltResult::ErrorRuntime(obj, pos) = inner_error { + if let Ok(temp_error_details) = rhai::serde::from_dynamic::(obj) { + if temp_error_details.message.is_some() || temp_error_details.body.is_some() { + error_details = temp_error_details; + } else { + error_details.status = temp_error_details.status; } - error_details.position = Some(pos.into()); } + error_details.position = Some(pos.into()); } error_details } diff --git a/apollo-router/src/plugins/rhai/tests.rs b/apollo-router/src/plugins/rhai/tests.rs index f8f2d1caf3..b47c25774d 100644 --- a/apollo-router/src/plugins/rhai/tests.rs +++ b/apollo-router/src/plugins/rhai/tests.rs @@ -224,7 +224,7 @@ async fn rhai_plugin_execution_service_error() -> Result<(), BoxError> { assert_eq!( body.errors.first().unwrap().message.as_str(), - "rhai execution error: 'Runtime error: An error occured (line 30, position 5)\nin call to function 'execution_request''" + "rhai execution error: 'Runtime error: An error occured (line 30, position 5)'" ); Ok(()) } @@ -641,7 +641,7 @@ async fn it_can_process_string_subgraph_forbidden() { if let Err(error) = call_rhai_function("process_subgraph_response_string").await { let processed_error = process_error(error); assert_eq!(processed_error.status, StatusCode::INTERNAL_SERVER_ERROR); - assert_eq!(processed_error.message, Some("rhai execution error: 'Runtime error: I have raised an error (line 223, position 5)\nin call to function 'process_subgraph_response_string''".to_string())); + assert_eq!(processed_error.message, Some("rhai execution error: 'Runtime error: I have raised an error (line 223, position 5)'".to_string())); } else { // Test failed panic!("error processed incorrectly"); @@ -666,7 +666,13 @@ async fn it_cannot_process_om_subgraph_missing_message_and_body() { if let Err(error) = call_rhai_function("process_subgraph_response_om_missing_message").await { let processed_error = process_error(error); assert_eq!(processed_error.status, StatusCode::BAD_REQUEST); - assert_eq!(processed_error.message, Some("rhai execution error: 'Runtime error: #{\"status\": 400} (line 234, position 5)\nin call to function 'process_subgraph_response_om_missing_message''".to_string())); + assert_eq!( + processed_error.message, + Some( + "rhai execution error: 'Runtime error: #{\"status\": 400} (line 234, position 5)'" + .to_string() + ) + ); } else { // Test failed panic!("error processed incorrectly"); diff --git a/apollo-router/src/plugins/subscription.rs b/apollo-router/src/plugins/subscription.rs index a5c00f9cdd..4ca4d56201 100644 --- a/apollo-router/src/plugins/subscription.rs +++ b/apollo-router/src/plugins/subscription.rs @@ -210,14 +210,6 @@ pub(crate) enum Enabled { Enabled, } -/// Using websocket to directly connect to subgraph -#[derive(Debug, Clone, PartialEq, Eq, Default, Deserialize, Serialize, JsonSchema)] -#[serde(deny_unknown_fields, default)] -pub(crate) struct PassthroughMode { - /// WebSocket configuration for specific subgraphs - subgraph: SubgraphPassthroughMode, -} - #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] #[serde(deny_unknown_fields)] /// WebSocket configuration for a specific subgraph diff --git a/apollo-router/src/plugins/telemetry/apollo_exporter.rs b/apollo-router/src/plugins/telemetry/apollo_exporter.rs index 4dfe3b390f..4a1a7b3e50 100644 --- a/apollo-router/src/plugins/telemetry/apollo_exporter.rs +++ b/apollo-router/src/plugins/telemetry/apollo_exporter.rs @@ -1,5 +1,4 @@ //! Configuration for apollo telemetry exporter. -use std::error::Error; use std::fmt::Debug; use std::io::Write; use std::str::FromStr; @@ -25,8 +24,6 @@ use serde::ser::SerializeStruct; use serde_json::Value; use sys_info::hostname; use tokio::sync::mpsc; -use tokio::task::JoinError; -use tonic::codegen::http::uri::InvalidUri; use tower::BoxError; use url::Url; @@ -375,70 +372,6 @@ pub(crate) mod proto { } } -/// Reporting Error type -#[derive(Debug)] -pub(crate) struct ReporterError { - source: Box, - msg: String, -} - -impl std::error::Error for ReporterError {} - -impl From for ReporterError { - fn from(error: InvalidUri) -> Self { - ReporterError { - msg: error.to_string(), - source: Box::new(error), - } - } -} - -impl From for ReporterError { - fn from(error: tonic::transport::Error) -> Self { - ReporterError { - msg: error.to_string(), - source: Box::new(error), - } - } -} - -impl From for ReporterError { - fn from(error: std::io::Error) -> Self { - ReporterError { - msg: error.to_string(), - source: Box::new(error), - } - } -} - -impl From for ReporterError { - fn from(error: sys_info::Error) -> Self { - ReporterError { - msg: error.to_string(), - source: Box::new(error), - } - } -} - -impl From for ReporterError { - fn from(error: JoinError) -> Self { - ReporterError { - msg: error.to_string(), - source: Box::new(error), - } - } -} - -impl std::fmt::Display for ReporterError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - f, - "ReporterError: source: {}, message: {}", - self.source, self.msg - ) - } -} - pub(crate) fn serialize_timestamp( timestamp: &Option, serializer: S, diff --git a/apollo-router/src/plugins/telemetry/config_new/attributes.rs b/apollo-router/src/plugins/telemetry/config_new/attributes.rs index 8303e834ab..5ce016cc0e 100644 --- a/apollo-router/src/plugins/telemetry/config_new/attributes.rs +++ b/apollo-router/src/plugins/telemetry/config_new/attributes.rs @@ -119,21 +119,29 @@ impl DefaultForLevel for RouterAttributes { pub(crate) struct SupergraphAttributes { /// The GraphQL document being executed. /// Examples: - /// * query findBookById { bookById(id: ?) { name } } + /// + /// * `query findBookById { bookById(id: ?) { name } }` + /// /// Requirement level: Recommended #[serde(rename = "graphql.document")] pub(crate) graphql_document: Option, + /// The name of the operation being executed. /// Examples: + /// /// * findBookById + /// /// Requirement level: Recommended #[serde(rename = "graphql.operation.name")] pub(crate) graphql_operation_name: Option, + /// The type of the operation being executed. /// Examples: + /// /// * query /// * subscription /// * mutation + /// /// Requirement level: Recommended #[serde(rename = "graphql.operation.type")] pub(crate) graphql_operation_type: Option, @@ -172,27 +180,38 @@ impl DefaultForLevel for SupergraphAttributes { pub(crate) struct SubgraphAttributes { /// The name of the subgraph /// Examples: + /// /// * products + /// /// Requirement level: Required #[serde(rename = "subgraph.name")] subgraph_name: Option, + /// The GraphQL document being executed. /// Examples: - /// * query findBookById { bookById(id: ?) { name } } + /// + /// * `query findBookById { bookById(id: ?) { name } }` + /// /// Requirement level: Recommended #[serde(rename = "subgraph.graphql.document")] graphql_document: Option, + /// The name of the operation being executed. /// Examples: + /// /// * findBookById + /// /// Requirement level: Recommended #[serde(rename = "subgraph.graphql.operation.name")] graphql_operation_name: Option, + /// The type of the operation being executed. /// Examples: + /// /// * query /// * subscription /// * mutation + /// /// Requirement level: Recommended #[serde(rename = "subgraph.graphql.operation.type")] graphql_operation_type: Option, @@ -237,82 +256,102 @@ impl DefaultForLevel for SubgraphAttributes { pub(crate) struct HttpCommonAttributes { /// Describes a class of error the operation ended with. /// Examples: + /// /// * timeout /// * name_resolution_error /// * 500 + /// /// Requirement level: Conditionally Required: If request has ended with an error. #[serde(rename = "error.type")] pub(crate) error_type: Option, /// The size of the request payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. /// Examples: + /// /// * 3495 + /// /// Requirement level: Recommended #[serde(rename = "http.request.body.size")] pub(crate) http_request_body_size: Option, /// HTTP request method. /// Examples: + /// /// * GET /// * POST /// * HEAD + /// /// Requirement level: Required #[serde(rename = "http.request.method")] pub(crate) http_request_method: Option, /// Original HTTP method sent by the client in the request line. /// Examples: + /// /// * GeT /// * ACL /// * foo + /// /// Requirement level: Conditionally Required (If and only if it’s different than http.request.method) #[serde(rename = "http.request.method.original", skip)] pub(crate) http_request_method_original: Option, /// The size of the response payload body in bytes. This is the number of bytes transferred excluding headers and is often, but not always, present as the Content-Length header. For requests using transport encoding, this should be the compressed size. /// Examples: + /// /// * 3495 + /// /// Requirement level: Recommended #[serde(rename = "http.response.body.size")] pub(crate) http_response_body_size: Option, /// HTTP response status code. /// Examples: + /// /// * 200 + /// /// Requirement level: Conditionally Required: If and only if one was received/sent. #[serde(rename = "http.response.status_code")] pub(crate) http_response_status_code: Option, /// OSI application layer or non-OSI equivalent. /// Examples: + /// /// * http /// * spdy + /// /// Requirement level: Recommended: if not default (http). #[serde(rename = "network.protocol.name")] pub(crate) network_protocol_name: Option, /// Version of the protocol specified in network.protocol.name. /// Examples: + /// /// * 1.0 /// * 1.1 /// * 2 /// * 3 + /// /// Requirement level: Recommended #[serde(rename = "network.protocol.version")] pub(crate) network_protocol_version: Option, /// OSI transport layer. /// Examples: + /// /// * tcp /// * udp + /// /// Requirement level: Conditionally Required #[serde(rename = "network.transport")] pub(crate) network_transport: Option, /// OSI network layer or non-OSI equivalent. /// Examples: + /// /// * ipv4 /// * ipv6 + /// /// Requirement level: Recommended #[serde(rename = "network.type")] pub(crate) network_type: Option, @@ -373,89 +412,115 @@ impl DefaultForLevel for HttpCommonAttributes { pub(crate) struct HttpServerAttributes { /// Client address - domain name if available without reverse DNS lookup, otherwise IP address or Unix domain socket name. /// Examples: + /// /// * 83.164.160.102 + /// /// Requirement level: Recommended #[serde(rename = "client.address", skip)] pub(crate) client_address: Option, /// The port of the original client behind all proxies, if known (e.g. from Forwarded or a similar header). Otherwise, the immediate client peer port. /// Examples: + /// /// * 65123 + /// /// Requirement level: Recommended #[serde(rename = "client.port", skip)] pub(crate) client_port: Option, /// The matched route (path template in the format used by the respective server framework). /// Examples: + /// /// * /graphql + /// /// Requirement level: Conditionally Required: If and only if it’s available #[serde(rename = "http.route")] pub(crate) http_route: Option, /// Local socket address. Useful in case of a multi-IP host. /// Examples: + /// /// * 10.1.2.80 /// * /tmp/my.sock + /// /// Requirement level: Opt-In #[serde(rename = "network.local.address")] pub(crate) network_local_address: Option, /// Local socket port. Useful in case of a multi-port host. /// Examples: + /// /// * 65123 + /// /// Requirement level: Opt-In #[serde(rename = "network.local.port")] pub(crate) network_local_port: Option, /// Peer address of the network connection - IP address or Unix domain socket name. /// Examples: + /// /// * 10.1.2.80 /// * /tmp/my.sock + /// /// Requirement level: Recommended #[serde(rename = "network.peer.address")] pub(crate) network_peer_address: Option, /// Peer port number of the network connection. /// Examples: + /// /// * 65123 + /// /// Requirement level: Recommended #[serde(rename = "network.peer.port")] pub(crate) network_peer_port: Option, /// Name of the local HTTP server that received the request. /// Examples: + /// /// * example.com /// * 10.1.2.80 /// * /tmp/my.sock + /// /// Requirement level: Recommended #[serde(rename = "server.address")] pub(crate) server_address: Option, /// Port of the local HTTP server that received the request. /// Examples: + /// /// * 80 /// * 8080 /// * 443 + /// /// Requirement level: Recommended #[serde(rename = "server.port")] pub(crate) server_port: Option, /// The URI path component /// Examples: + /// /// * /search + /// /// Requirement level: Required #[serde(rename = "url.path")] pub(crate) url_path: Option, /// The URI query component /// Examples: + /// /// * q=OpenTelemetry + /// /// Requirement level: Conditionally Required: If and only if one was received/sent. #[serde(rename = "url.query")] pub(crate) url_query: Option, /// The URI scheme component identifying the used protocol. /// Examples: + /// /// * http /// * https + /// /// Requirement level: Required #[serde(rename = "url.scheme")] pub(crate) url_scheme: Option, /// Value of the HTTP User-Agent header sent by the client. /// Examples: + /// /// * CERN-LineMode/2.15 /// * libwww/2.17b3 + /// /// Requirement level: Recommended #[serde(rename = "user_agent.original")] pub(crate) user_agent_original: Option, diff --git a/apollo-router/src/plugins/telemetry/config_new/cache/attributes.rs b/apollo-router/src/plugins/telemetry/config_new/cache/attributes.rs new file mode 100644 index 0000000000..9d072cfa3c --- /dev/null +++ b/apollo-router/src/plugins/telemetry/config_new/cache/attributes.rs @@ -0,0 +1,53 @@ +use opentelemetry_api::KeyValue; +use schemars::JsonSchema; +use serde::Deserialize; +use tower::BoxError; + +use crate::plugins::telemetry::config_new::DefaultAttributeRequirementLevel; +use crate::plugins::telemetry::config_new::DefaultForLevel; +use crate::plugins::telemetry::config_new::Selectors; +use crate::plugins::telemetry::otlp::TelemetryDataKind; +use crate::services::subgraph; +use crate::Context; + +#[derive(Deserialize, JsonSchema, Clone, Default, Debug, PartialEq)] +#[serde(deny_unknown_fields, default)] +pub(crate) struct CacheAttributes { + /// Entity type + #[serde(rename = "entity.type")] + pub(crate) entity_type: Option, +} + +impl DefaultForLevel for CacheAttributes { + fn defaults_for_level( + &mut self, + requirement_level: DefaultAttributeRequirementLevel, + kind: TelemetryDataKind, + ) { + if let TelemetryDataKind::Metrics = kind { + if let DefaultAttributeRequirementLevel::Required = requirement_level { + self.entity_type.get_or_insert(false); + } + } + } +} + +// Nothing to do here because we're using a trick because entity_type is related to CacheControl data we put in the context and for one request we have several entity types +// and so several metrics to generate it can't be done here +impl Selectors for CacheAttributes { + type Request = subgraph::Request; + type Response = subgraph::Response; + type EventResponse = (); + + fn on_request(&self, _request: &Self::Request) -> Vec { + Vec::default() + } + + fn on_response(&self, _response: &Self::Response) -> Vec { + Vec::default() + } + + fn on_error(&self, _error: &BoxError, _ctx: &Context) -> Vec { + Vec::default() + } +} diff --git a/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs b/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs new file mode 100644 index 0000000000..f1d01f2393 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs @@ -0,0 +1,201 @@ +use std::sync::Arc; + +use attributes::CacheAttributes; +use opentelemetry::metrics::MeterProvider; +use opentelemetry::metrics::Unit; +use opentelemetry::Key; +use opentelemetry::KeyValue; +use parking_lot::Mutex; +use schemars::JsonSchema; +use serde::Deserialize; +use tower::BoxError; + +use super::instruments::CustomCounter; +use super::instruments::CustomCounterInner; +use super::instruments::Increment; +use super::instruments::InstrumentsConfig; +use super::instruments::METER_NAME; +use super::selectors::CacheKind; +use super::selectors::SubgraphSelector; +use crate::metrics; +use crate::plugins::cache::entity::CacheHitMiss; +use crate::plugins::cache::entity::CacheSubgraph; +use crate::plugins::cache::metrics::CacheMetricContextKey; +use crate::plugins::telemetry::config::AttributeValue; +use crate::plugins::telemetry::config_new::attributes::DefaultAttributeRequirementLevel; +use crate::plugins::telemetry::config_new::conditions::Condition; +use crate::plugins::telemetry::config_new::extendable::Extendable; +use crate::plugins::telemetry::config_new::instruments::DefaultedStandardInstrument; +use crate::plugins::telemetry::config_new::instruments::Instrumented; +use crate::plugins::telemetry::config_new::DefaultForLevel; +use crate::plugins::telemetry::otlp::TelemetryDataKind; +use crate::services::subgraph; + +pub(crate) mod attributes; + +static CACHE_METRIC: &str = "apollo.router.operations.entity.cache"; +const ENTITY_TYPE: Key = Key::from_static_str("entity.type"); +const CACHE_HIT: Key = Key::from_static_str("cache.hit"); + +#[derive(Deserialize, JsonSchema, Clone, Default, Debug)] +#[serde(deny_unknown_fields, default)] +pub(crate) struct CacheInstrumentsConfig { + /// A counter of times we have a cache hit or cache miss + #[serde(rename = "apollo.router.operations.entity.cache")] + pub(crate) cache: DefaultedStandardInstrument>, +} + +impl DefaultForLevel for CacheInstrumentsConfig { + fn defaults_for_level( + &mut self, + requirement_level: DefaultAttributeRequirementLevel, + kind: TelemetryDataKind, + ) { + if self.cache.is_enabled() { + self.cache.defaults_for_level(requirement_level, kind); + } + } +} + +pub(crate) struct CacheInstruments { + pub(crate) cache_hit: Option< + CustomCounter, + >, +} + +impl From<&InstrumentsConfig> for CacheInstruments { + fn from(value: &InstrumentsConfig) -> Self { + let meter = metrics::meter_provider().meter(METER_NAME); + CacheInstruments { + cache_hit: value.cache.attributes.cache.is_enabled().then(|| { + let mut nb_attributes = 0; + let selectors = match &value.cache.attributes.cache { + DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => { + None + } + DefaultedStandardInstrument::Extendable { attributes } => { + nb_attributes = attributes.custom.len(); + Some(attributes.clone()) + } + }; + CustomCounter { + inner: Mutex::new(CustomCounterInner { + increment: Increment::Custom(None), + condition: Condition::True, + counter: Some( + meter + .f64_counter(CACHE_METRIC) + .with_unit(Unit::new("ops")) + .with_description( + "Entity cache hit/miss operations at the subgraph level", + ) + .init(), + ), + attributes: Vec::with_capacity(nb_attributes), + selector: Some(Arc::new(SubgraphSelector::Cache { + cache: CacheKind::Hit, + entity_type: None, + })), + selectors, + incremented: false, + }), + } + }), + } + } +} + +impl Instrumented for CacheInstruments { + type Request = subgraph::Request; + type Response = subgraph::Response; + type EventResponse = (); + + fn on_request(&self, request: &Self::Request) { + if let Some(cache_hit) = &self.cache_hit { + cache_hit.on_request(request); + } + } + + fn on_response(&self, response: &Self::Response) { + let subgraph_name = match &response.subgraph_name { + Some(subgraph_name) => subgraph_name, + None => { + return; + } + }; + let cache_info: CacheSubgraph = match response + .context + .get(CacheMetricContextKey::new(subgraph_name.clone())) + .ok() + .flatten() + { + Some(cache_info) => cache_info, + None => { + return; + } + }; + + if let Some(cache_hit) = &self.cache_hit { + for (entity_type, CacheHitMiss { hit, miss }) in &cache_info.0 { + // Cache hit + { + let cloned_cache_hit = cache_hit.clone(); + { + let mut inner_cache_hit = cloned_cache_hit.inner.lock(); + inner_cache_hit.selector = Some(Arc::new(SubgraphSelector::StaticField { + r#static: AttributeValue::I64(*hit as i64), + })); + if inner_cache_hit + .selectors + .as_ref() + .map(|s| s.attributes.entity_type == Some(true)) + .unwrap_or_default() + { + inner_cache_hit.attributes.push(KeyValue::new( + ENTITY_TYPE, + opentelemetry::Value::String(entity_type.to_string().into()), + )); + } + inner_cache_hit + .attributes + .push(KeyValue::new(CACHE_HIT, opentelemetry::Value::Bool(true))); + } + cloned_cache_hit.on_response(response); + } + // Cache miss + { + let cloned_cache_miss = cache_hit.clone(); + { + let mut inner_cache_miss = cloned_cache_miss.inner.lock(); + inner_cache_miss.selector = Some(Arc::new(SubgraphSelector::StaticField { + r#static: AttributeValue::I64(*miss as i64), + })); + if inner_cache_miss + .selectors + .as_ref() + .map(|s| s.attributes.entity_type == Some(true)) + .unwrap_or_default() + { + inner_cache_miss.attributes.push(KeyValue::new( + ENTITY_TYPE, + opentelemetry::Value::String(entity_type.to_string().into()), + )); + } + inner_cache_miss + .attributes + .push(KeyValue::new(CACHE_HIT, opentelemetry::Value::Bool(false))); + } + cloned_cache_miss.on_response(response); + } + } + // Make sure it won't be incremented when dropped + let _ = cache_hit.inner.lock().counter.take(); + } + } + + fn on_error(&self, error: &BoxError, ctx: &crate::Context) { + if let Some(field_length) = &self.cache_hit { + field_length.on_error(error, ctx); + } + } +} diff --git a/apollo-router/src/plugins/telemetry/config_new/fixtures/schema.json b/apollo-router/src/plugins/telemetry/config_new/fixtures/schema.json index 17fad339d5..efa2b5fa12 100644 --- a/apollo-router/src/plugins/telemetry/config_new/fixtures/schema.json +++ b/apollo-router/src/plugins/telemetry/config_new/fixtures/schema.json @@ -373,6 +373,12 @@ "type": "integer", "format": "uint16", "minimum": 0.0 + }, + "subgraph_name": { + "type": [ + "string", + "null" + ] } }, "additionalProperties": false diff --git a/apollo-router/src/plugins/telemetry/config_new/fixtures/subgraph/caching/metrics.snap b/apollo-router/src/plugins/telemetry/config_new/fixtures/subgraph/caching/metrics.snap new file mode 100644 index 0000000000..8da724b4c2 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/config_new/fixtures/subgraph/caching/metrics.snap @@ -0,0 +1,104 @@ +--- +source: apollo-router/src/plugins/telemetry/config_new/instruments.rs +description: Custom counter using cache selector +expression: "&metrics.all()" +info: + telemetry: + instrumentation: + instruments: + default_requirement_level: none + cache: + apollo.router.operations.entity.cache: + attributes: + entity.type: true + subgraph.name: + subgraph_name: true + supergraph.operation.name: + supergraph_operation_name: string + subgraph: + only_cache_hit_on_subgraph_products: + type: counter + value: + cache: hit + unit: hit + description: counter of subgraph request cache hit on subgraph products + condition: + all: + - eq: + - subgraph_name: true + - products + - gt: + - cache: hit + entity_type: all + - 0 + attributes: + subgraph.name: true + supergraph.operation.name: + supergraph_operation_name: string + only_cache_hit_on_subgraph_products_on_product_entity: + type: counter + value: + cache: hit + entity_type: Product + unit: hit + description: counter of subgraph request cache hit on subgraph products on product entity + condition: + all: + - eq: + - subgraph_name: true + - products + - gt: + - cache: hit + - 0 + attributes: + subgraph.name: true + supergraph.operation.name: + supergraph_operation_name: string +--- +- name: apollo.router.operations.entity.cache + description: Entity cache hit/miss operations at the subgraph level + unit: ops + data: + datapoints: + - value: 0 + attributes: + cache.hit: false + entity.type: Product + subgraph.name: products + supergraph.operation.name: Test + - value: 0 + attributes: + cache.hit: false + entity.type: Review + subgraph.name: products + supergraph.operation.name: Test + - value: 3 + attributes: + cache.hit: true + entity.type: Product + subgraph.name: products + supergraph.operation.name: Test + - value: 5 + attributes: + cache.hit: true + entity.type: Review + subgraph.name: products + supergraph.operation.name: Test +- name: only_cache_hit_on_subgraph_products + description: counter of subgraph request cache hit on subgraph products + unit: hit + data: + datapoints: + - value: 8 + attributes: + subgraph.name: products + supergraph.operation.name: Test +- name: only_cache_hit_on_subgraph_products_on_product_entity + description: counter of subgraph request cache hit on subgraph products on product entity + unit: hit + data: + datapoints: + - value: 3 + attributes: + subgraph.name: products + supergraph.operation.name: Test diff --git a/apollo-router/src/plugins/telemetry/config_new/fixtures/subgraph/caching/router.yaml b/apollo-router/src/plugins/telemetry/config_new/fixtures/subgraph/caching/router.yaml new file mode 100644 index 0000000000..b6eee5fb07 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/config_new/fixtures/subgraph/caching/router.yaml @@ -0,0 +1,51 @@ +telemetry: + instrumentation: + instruments: + default_requirement_level: none + cache: + apollo.router.operations.entity.cache: + attributes: + entity.type: true + subgraph.name: + subgraph_name: true + supergraph.operation.name: + supergraph_operation_name: string + subgraph: + only_cache_hit_on_subgraph_products: + type: counter + value: + cache: hit + unit: hit + description: counter of subgraph request cache hit on subgraph products + condition: + all: + - eq: + - subgraph_name: true + - products + - gt: + - cache: hit + entity_type: all + - 0 + attributes: + subgraph.name: true + supergraph.operation.name: + supergraph_operation_name: string + only_cache_hit_on_subgraph_products_on_product_entity: + type: counter + value: + cache: hit + entity_type: Product + unit: hit + description: counter of subgraph request cache hit on subgraph products on product entity + condition: + all: + - eq: + - subgraph_name: true + - products + - gt: + - cache: hit + - 0 + attributes: + subgraph.name: true + supergraph.operation.name: + supergraph_operation_name: string diff --git a/apollo-router/src/plugins/telemetry/config_new/fixtures/subgraph/caching/test.yaml b/apollo-router/src/plugins/telemetry/config_new/fixtures/subgraph/caching/test.yaml new file mode 100644 index 0000000000..369491b02e --- /dev/null +++ b/apollo-router/src/plugins/telemetry/config_new/fixtures/subgraph/caching/test.yaml @@ -0,0 +1,47 @@ +description: Custom counter using cache selector +events: + - - router_request: + uri: "/hello" + method: GET + headers: + custom_header: "custom_value" + body: | + hello + - context: + map: + "operation_name": "Test" + - supergraph_request: + uri: "/hello" + method: GET + headers: + custom_header: custom_value + query: "query Test { hello }" + - subgraph_request: + query: "query { hello }" + operation_name: "Products" + operation_kind: query + subgraph_name: "products" + headers: + custom_header: custom_value + - context: + map: + "apollo::router::entity_cache_info_subgraph_products": + Product: + hit: 3 + miss: 0 + Review: + hit: 5 + miss: 0 + - subgraph_response: + status: 200 + subgraph_name: "products" + data: + hello: "world" + - supergraph_response: + status: 200 + data: + hello: "world" + - router_response: + body: | + hello + status: 200 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs b/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs index 653e906384..1178f0a102 100644 --- a/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs @@ -452,7 +452,7 @@ pub(crate) mod test { } fn context(schema_str: &str, query_str: &str) -> Context { - let schema = crate::spec::Schema::parse_test(schema_str, &Default::default()).unwrap(); + let schema = crate::spec::Schema::parse(schema_str, &Default::default()).unwrap(); let query = crate::spec::Query::parse_document(query_str, None, &schema, &Configuration::default()) .unwrap(); diff --git a/apollo-router/src/plugins/telemetry/config_new/instruments.rs b/apollo-router/src/plugins/telemetry/config_new/instruments.rs index 2d615d2950..d9b758f42d 100644 --- a/apollo-router/src/plugins/telemetry/config_new/instruments.rs +++ b/apollo-router/src/plugins/telemetry/config_new/instruments.rs @@ -21,6 +21,8 @@ use tokio::time::Instant; use tower::BoxError; use super::attributes::HttpServerAttributes; +use super::cache::attributes::CacheAttributes; +use super::cache::CacheInstrumentsConfig; use super::DefaultForLevel; use super::Selector; use crate::metrics; @@ -77,6 +79,11 @@ pub(crate) struct InstrumentsConfig { GraphQLInstrumentsConfig, Instrument, >, + /// Cache instruments + pub(crate) cache: Extendable< + CacheInstrumentsConfig, + Instrument, + >, } impl InstrumentsConfig { @@ -1194,7 +1201,7 @@ pub(crate) type SubgraphCustomInstruments = CustomInstruments< >; // ---------------- Counter ----------------------- -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum Increment { Unit, EventUnit, @@ -1224,6 +1231,18 @@ where pub(crate) inner: Mutex>, } +impl Clone for CustomCounter +where + A: Selectors + Default, + T: Selector + Debug + Clone, +{ + fn clone(&self) -> Self { + Self { + inner: Mutex::new(self.inner.lock().clone()), + } + } +} + pub(crate) struct CustomCounterInner where A: Selectors + Default, @@ -1239,6 +1258,24 @@ where pub(crate) incremented: bool, } +impl Clone for CustomCounterInner +where + A: Selectors + Default, + T: Selector + Debug + Clone, +{ + fn clone(&self) -> Self { + Self { + increment: self.increment.clone(), + selector: self.selector.clone(), + selectors: self.selectors.clone(), + counter: self.counter.clone(), + condition: self.condition.clone(), + attributes: self.attributes.clone(), + incremented: self.incremented, + } + } +} + impl Instrumented for CustomCounter where A: Selectors + Default, @@ -1959,6 +1996,7 @@ mod tests { use crate::http_ext::TryIntoHeaderValue; use crate::json_ext::Path; use crate::metrics::FutureMetricsExt; + use crate::plugins::telemetry::config_new::cache::CacheInstruments; use crate::plugins::telemetry::config_new::graphql::GraphQLInstruments; use crate::plugins::telemetry::config_new::instruments::Instrumented; use crate::plugins::telemetry::config_new::instruments::InstrumentsConfig; @@ -2046,6 +2084,7 @@ mod tests { }, SubgraphResponse { status: u16, + subgraph_name: Option, data: Option, #[serde(default)] #[schemars(with = "Option>")] @@ -2258,6 +2297,7 @@ mod tests { let mut router_instruments = None; let mut supergraph_instruments = None; let mut subgraph_instruments = None; + let mut cache_instruments: Option = None; let graphql_instruments: GraphQLInstruments = (&config).into(); let context = Context::new(); for event in request { @@ -2365,6 +2405,7 @@ mod tests { headers, } => { subgraph_instruments = Some(config.new_subgraph_instruments()); + cache_instruments = Some((&config).into()); let graphql_request = graphql::Request::fake_builder() .query(query) .and_operation_name(operation_name) @@ -2382,8 +2423,10 @@ mod tests { .build(); subgraph_instruments.as_mut().unwrap().on_request(&request); + cache_instruments.as_mut().unwrap().on_request(&request); } Event::SubgraphResponse { + subgraph_name, status, data, extensions, @@ -2392,6 +2435,7 @@ mod tests { } => { let response = subgraph::Response::fake2_builder() .context(context.clone()) + .and_subgraph_name(subgraph_name) .status_code(StatusCode::from_u16(status).expect("status")) .and_data(data) .errors(errors) @@ -2403,6 +2447,10 @@ mod tests { .take() .expect("subgraph request must have been made first") .on_response(&response); + cache_instruments + .take() + .expect("subgraph request must have been made first") + .on_response(&response); } Event::SupergraphError { error } => { supergraph_instruments diff --git a/apollo-router/src/plugins/telemetry/config_new/mod.rs b/apollo-router/src/plugins/telemetry/config_new/mod.rs index b493e11eb4..2a3f46edcf 100644 --- a/apollo-router/src/plugins/telemetry/config_new/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/mod.rs @@ -17,6 +17,7 @@ use crate::Context; pub(crate) mod attributes; pub(crate) mod conditions; +pub(crate) mod cache; mod conditional; pub(crate) mod cost; pub(crate) mod events; diff --git a/apollo-router/src/plugins/telemetry/config_new/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/selectors.rs index b7e05124fe..3f8cde2faf 100644 --- a/apollo-router/src/plugins/telemetry/config_new/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/selectors.rs @@ -12,6 +12,8 @@ use crate::context::OPERATION_KIND; use crate::context::OPERATION_NAME; use crate::plugin::serde::deserialize_json_query; use crate::plugin::serde::deserialize_jsonpath; +use crate::plugins::cache::entity::CacheSubgraph; +use crate::plugins::cache::metrics::CacheMetricContextKey; use crate::plugins::demand_control::CostContext; use crate::plugins::telemetry::config::AttributeValue; use crate::plugins::telemetry::config_new::cost::CostValue; @@ -227,12 +229,6 @@ pub(crate) enum SupergraphValue { Custom(SupergraphSelector), } -#[derive(Deserialize, JsonSchema, Clone, Debug)] -#[serde(deny_unknown_fields, rename_all = "snake_case")] -pub(crate) enum EventHolder { - EventCustom(SupergraphSelector), -} - impl From<&SupergraphValue> for InstrumentValue { fn from(value: &SupergraphValue) -> Self { match value { @@ -437,6 +433,10 @@ pub(crate) enum SubgraphSelector { #[allow(dead_code)] subgraph_operation_kind: OperationKind, }, + SubgraphName { + /// The subgraph name + subgraph_name: bool, + }, SubgraphQuery { /// The graphql query to the subgraph. subgraph_query: SubgraphQuery, @@ -617,10 +617,42 @@ pub(crate) enum SubgraphSelector { r#static: AttributeValue, }, Error { - #[allow(dead_code)] /// Critical error if it happens error: ErrorRepr, }, + Cache { + /// Select if you want to get cache hit or cache miss + cache: CacheKind, + /// Specify the entity type on which you want the cache data. (default: all) + entity_type: Option, + }, +} + +#[derive(Deserialize, JsonSchema, Clone, PartialEq, Debug)] +#[serde(rename_all = "snake_case", untagged)] +pub(crate) enum EntityType { + All(All), + Named(String), +} + +impl Default for EntityType { + fn default() -> Self { + Self::All(All::All) + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, JsonSchema, Default)] +#[serde(rename_all = "snake_case")] +pub(crate) enum All { + #[default] + All, +} + +#[derive(Deserialize, JsonSchema, Clone, PartialEq, Debug)] +#[serde(rename_all = "snake_case")] +pub(crate) enum CacheKind { + Hit, + Miss, } impl Selector for RouterSelector { @@ -1171,6 +1203,10 @@ impl Selector for SubgraphSelector { } .map(opentelemetry::Value::from) } + SubgraphSelector::SubgraphName { subgraph_name } if *subgraph_name => request + .subgraph_name + .clone() + .map(opentelemetry::Value::from), SubgraphSelector::SubgraphOperationKind { .. } => request .context .get::<_, String>(OPERATION_KIND) @@ -1372,6 +1408,43 @@ impl Selector for SubgraphSelector { } if *on_graphql_error => Some((!response.response.body().errors.is_empty()).into()), SubgraphSelector::Static(val) => Some(val.clone().into()), SubgraphSelector::StaticField { r#static } => Some(r#static.clone().into()), + SubgraphSelector::Cache { cache, entity_type } => { + let cache_info: CacheSubgraph = response + .context + .get(CacheMetricContextKey::new(response.subgraph_name.clone()?)) + .ok() + .flatten()?; + + match entity_type { + Some(EntityType::All(All::All)) | None => Some( + (cache_info + .0 + .iter() + .fold(0usize, |acc, (_entity_type, cache_hit_miss)| match cache { + CacheKind::Hit => acc + cache_hit_miss.hit, + CacheKind::Miss => acc + cache_hit_miss.miss, + }) as i64) + .into(), + ), + Some(EntityType::Named(entity_type_name)) => { + let res = cache_info.0.iter().fold( + 0usize, + |acc, (entity_type, cache_hit_miss)| { + if entity_type == entity_type_name { + match cache { + CacheKind::Hit => acc + cache_hit_miss.hit, + CacheKind::Miss => acc + cache_hit_miss.miss, + } + } else { + acc + } + }, + ); + + (res != 0).then_some((res as i64).into()) + } + } + } // For request _ => None, } @@ -1430,7 +1503,13 @@ mod test { use crate::context::OPERATION_KIND; use crate::context::OPERATION_NAME; use crate::graphql; + use crate::plugins::cache::entity::CacheHitMiss; + use crate::plugins::cache::entity::CacheSubgraph; + use crate::plugins::cache::metrics::CacheMetricContextKey; use crate::plugins::telemetry::config::AttributeValue; + use crate::plugins::telemetry::config_new::selectors::All; + use crate::plugins::telemetry::config_new::selectors::CacheKind; + use crate::plugins::telemetry::config_new::selectors::EntityType; use crate::plugins::telemetry::config_new::selectors::OperationKind; use crate::plugins::telemetry::config_new::selectors::OperationName; use crate::plugins::telemetry::config_new::selectors::Query; @@ -2460,6 +2539,23 @@ mod test { ); } + #[test] + fn subgraph_name() { + let selector = SubgraphSelector::SubgraphName { + subgraph_name: true, + }; + let context = crate::context::Context::new(); + assert_eq!( + selector.on_request( + &crate::services::SubgraphRequest::fake_builder() + .context(context) + .subgraph_name("test".to_string()) + .build(), + ), + Some("test".into()) + ); + } + #[test] fn supergraph_operation_name_string() { let selector = SupergraphSelector::OperationName { @@ -2490,6 +2586,82 @@ mod test { ); } + #[test] + fn subgraph_cache_hit_all_entities() { + let selector = SubgraphSelector::Cache { + cache: CacheKind::Hit, + entity_type: Some(EntityType::All(All::All)), + }; + let context = crate::context::Context::new(); + assert_eq!( + selector.on_response( + &crate::services::SubgraphResponse::fake_builder() + .subgraph_name("test".to_string()) + .context(context.clone()) + .build(), + ), + None + ); + let cache_info = CacheSubgraph( + [ + ("Products".to_string(), CacheHitMiss { hit: 3, miss: 0 }), + ("Reviews".to_string(), CacheHitMiss { hit: 2, miss: 0 }), + ] + .into_iter() + .collect(), + ); + let _ = context + .insert(CacheMetricContextKey::new("test".to_string()), cache_info) + .unwrap(); + assert_eq!( + selector.on_response( + &crate::services::SubgraphResponse::fake_builder() + .subgraph_name("test".to_string()) + .context(context.clone()) + .build(), + ), + Some(opentelemetry::Value::I64(5)) + ); + } + + #[test] + fn subgraph_cache_hit_one_entity() { + let selector = SubgraphSelector::Cache { + cache: CacheKind::Hit, + entity_type: Some(EntityType::Named("Reviews".to_string())), + }; + let context = crate::context::Context::new(); + assert_eq!( + selector.on_response( + &crate::services::SubgraphResponse::fake_builder() + .subgraph_name("test".to_string()) + .context(context.clone()) + .build(), + ), + None + ); + let cache_info = CacheSubgraph( + [ + ("Products".to_string(), CacheHitMiss { hit: 3, miss: 0 }), + ("Reviews".to_string(), CacheHitMiss { hit: 2, miss: 0 }), + ] + .into_iter() + .collect(), + ); + let _ = context + .insert(CacheMetricContextKey::new("test".to_string()), cache_info) + .unwrap(); + assert_eq!( + selector.on_response( + &crate::services::SubgraphResponse::fake_builder() + .subgraph_name("test".to_string()) + .context(context.clone()) + .build(), + ), + Some(opentelemetry::Value::I64(2)) + ); + } + #[test] fn subgraph_supergraph_operation_name_string() { let selector = SubgraphSelector::SupergraphOperationName { diff --git a/apollo-router/src/plugins/telemetry/consts.rs b/apollo-router/src/plugins/telemetry/consts.rs index e1d84c937b..c82d7b202b 100644 --- a/apollo-router/src/plugins/telemetry/consts.rs +++ b/apollo-router/src/plugins/telemetry/consts.rs @@ -18,8 +18,9 @@ pub(crate) const REQUEST_SPAN_NAME: &str = "request"; pub(crate) const QUERY_PLANNING_SPAN_NAME: &str = "query_planning"; pub(crate) const HTTP_REQUEST_SPAN_NAME: &str = "http_request"; pub(crate) const SUBGRAPH_REQUEST_SPAN_NAME: &str = "subgraph_request"; +pub(crate) const QUERY_PARSING_SPAN_NAME: &str = "parse_query"; -pub(crate) const BUILT_IN_SPAN_NAMES: [&str; 8] = [ +pub(crate) const BUILT_IN_SPAN_NAMES: [&str; 9] = [ REQUEST_SPAN_NAME, ROUTER_SPAN_NAME, SUPERGRAPH_SPAN_NAME, @@ -28,4 +29,5 @@ pub(crate) const BUILT_IN_SPAN_NAMES: [&str; 8] = [ HTTP_REQUEST_SPAN_NAME, QUERY_PLANNING_SPAN_NAME, EXECUTION_SPAN_NAME, + QUERY_PARSING_SPAN_NAME, ]; diff --git a/apollo-router/src/plugins/telemetry/dynamic_attribute.rs b/apollo-router/src/plugins/telemetry/dynamic_attribute.rs index 9d04a022ba..0b4af0964d 100644 --- a/apollo-router/src/plugins/telemetry/dynamic_attribute.rs +++ b/apollo-router/src/plugins/telemetry/dynamic_attribute.rs @@ -216,10 +216,6 @@ pub(crate) struct EventAttributes { } impl EventAttributes { - pub(crate) fn insert(&mut self, kv: KeyValue) { - self.attributes.push(kv); - } - pub(crate) fn extend(&mut self, other: impl IntoIterator) { self.attributes.extend(other); } @@ -231,63 +227,11 @@ impl EventAttributes { /// To add dynamic attributes for spans pub(crate) trait EventDynAttribute { - /// Always use before sending the event - fn set_event_dyn_attribute(&self, key: Key, value: opentelemetry::Value); /// Always use before sending the event fn set_event_dyn_attributes(&self, attributes: impl IntoIterator); } impl EventDynAttribute for ::tracing::Span { - fn set_event_dyn_attribute(&self, key: Key, value: opentelemetry::Value) { - self.with_subscriber(move |(id, dispatch)| { - if let Some(reg) = dispatch.downcast_ref::() { - match reg.span(id) { - None => eprintln!("no spanref, this is a bug"), - Some(s) => { - if key.as_str().starts_with(APOLLO_PRIVATE_PREFIX) { - return; - } - if s.is_sampled() { - let mut extensions = s.extensions_mut(); - match extensions.get_mut::() { - Some(otel_data) => match &mut otel_data.event_attributes { - Some(attributes) => { - attributes.insert(key, value); - } - None => { - let mut order_map = OrderMap::new(); - order_map.insert(key, value); - otel_data.event_attributes = Some(order_map); - } - }, - None => { - // Can't use ::tracing::error! because it could create deadlock on extensions - eprintln!("no OtelData, this is a bug"); - } - } - } else { - if key.as_str().starts_with(APOLLO_PRIVATE_PREFIX) { - return; - } - let mut extensions = s.extensions_mut(); - match extensions.get_mut::() { - Some(attributes) => { - attributes.insert(KeyValue::new(key, value)); - } - None => { - // Can't use ::tracing::error! because it could create deadlock on extensions - eprintln!("no EventAttributes, this is a bug"); - } - } - } - } - }; - } else { - ::tracing::error!("no Registry, this is a bug"); - } - }); - } - fn set_event_dyn_attributes(&self, attributes: impl IntoIterator) { let mut attributes = attributes.into_iter().peekable(); if attributes.peek().is_none() { diff --git a/apollo-router/src/plugins/telemetry/fmt_layer.rs b/apollo-router/src/plugins/telemetry/fmt_layer.rs index 4c1c3b36a4..cf5eb49c8f 100644 --- a/apollo-router/src/plugins/telemetry/fmt_layer.rs +++ b/apollo-router/src/plugins/telemetry/fmt_layer.rs @@ -162,7 +162,7 @@ where fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { thread_local! { - static BUF: RefCell = RefCell::new(String::new()); + static BUF: RefCell = const { RefCell::new(String::new()) }; } BUF.with(|buf| { diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo/studio.rs b/apollo-router/src/plugins/telemetry/metrics/apollo/studio.rs index 9e13633932..6e57f94501 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo/studio.rs +++ b/apollo-router/src/plugins/telemetry/metrics/apollo/studio.rs @@ -31,12 +31,6 @@ pub(crate) struct SingleStats { pub(crate) referenced_fields_by_type: HashMap, } -#[derive(Default, Debug, Serialize)] -pub(crate) struct Stats { - pub(crate) stats_with_context: ContextualizedStats, - pub(crate) referenced_fields_by_type: HashMap, -} - #[derive(Default, Debug, Serialize)] pub(crate) struct SingleContextualizedStats { pub(crate) context: StatsContext, diff --git a/apollo-router/src/plugins/telemetry/metrics/mod.rs b/apollo-router/src/plugins/telemetry/metrics/mod.rs index 290a409e67..d6e6797e2e 100644 --- a/apollo-router/src/plugins/telemetry/metrics/mod.rs +++ b/apollo-router/src/plugins/telemetry/metrics/mod.rs @@ -1,5 +1,4 @@ use std::collections::HashMap; -use std::time::Duration; use ::serde::Deserialize; use access_json::JSONQuery; @@ -10,9 +9,7 @@ use multimap::MultiMap; use opentelemetry::sdk::metrics::reader::AggregationSelector; use opentelemetry::sdk::metrics::Aggregation; use opentelemetry::sdk::metrics::InstrumentKind; -use opentelemetry::sdk::resource::ResourceDetector; use opentelemetry::sdk::Resource; -use opentelemetry::KeyValue; use regex::Regex; use schemars::JsonSchema; use serde::Serialize; @@ -439,40 +436,6 @@ pub(crate) struct MetricsBuilder { pub(crate) resource: Resource, } -struct ConfigResourceDetector(MetricsCommon); - -impl ResourceDetector for ConfigResourceDetector { - fn detect(&self, _timeout: Duration) -> Resource { - let mut resource = Resource::new( - vec![ - self.0.service_name.clone().map(|service_name| { - KeyValue::new( - opentelemetry_semantic_conventions::resource::SERVICE_NAME, - service_name, - ) - }), - self.0.service_namespace.clone().map(|service_namespace| { - KeyValue::new( - opentelemetry_semantic_conventions::resource::SERVICE_NAMESPACE, - service_namespace, - ) - }), - ] - .into_iter() - .flatten() - .collect::>(), - ); - resource = resource.merge(&mut Resource::new( - self.0 - .resource - .clone() - .into_iter() - .map(|(k, v)| KeyValue::new(k, v)), - )); - resource - } -} - impl MetricsBuilder { pub(crate) fn new(config: &Conf) -> Self { let resource = config.exporters.metrics.common.to_resource(); diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 14dd43407c..af5f78a0de 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -9,6 +9,7 @@ use std::time::Instant; use ::tracing::info_span; use ::tracing::Span; use axum::headers::HeaderName; +use config_new::cache::CacheInstruments; use config_new::Selectors; use dashmap::DashMap; use futures::future::ready; @@ -210,17 +211,6 @@ struct TelemetryActivation { is_active: bool, } -#[derive(Debug)] -struct ReportingError; - -impl fmt::Display for ReportingError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "ReportingError") - } -} - -impl std::error::Error for ReportingError {} - fn setup_tracing( mut builder: Builder, configurator: &T, @@ -725,18 +715,30 @@ impl Plugin for Telemetry { let custom_events = config.instrumentation.events.new_subgraph_events(); custom_events.on_request(sub_request); + let custom_cache_instruments: CacheInstruments = + (&config.instrumentation.instruments).into(); + custom_cache_instruments.on_request(sub_request); + ( sub_request.context.clone(), custom_instruments, custom_attributes, custom_events, + custom_cache_instruments, ) }, - move |(context, custom_instruments, custom_attributes, custom_events): ( + move |( + context, + custom_instruments, + custom_attributes, + custom_events, + custom_cache_instruments, + ): ( Context, SubgraphInstruments, Vec, SubgraphEvents, + CacheInstruments, ), f: BoxFuture<'static, Result>| { let subgraph_attribute = subgraph_attribute.clone(); @@ -763,6 +765,7 @@ impl Plugin for Telemetry { .attributes .on_response(resp), ); + custom_cache_instruments.on_response(resp); custom_instruments.on_response(resp); custom_events.on_response(resp); } @@ -776,6 +779,7 @@ impl Plugin for Telemetry { .attributes .on_error(err, &context), ); + custom_cache_instruments.on_error(err, &context); custom_instruments.on_error(err, &context); custom_events.on_error(err, &context); } @@ -866,7 +870,7 @@ impl Telemetry { propagators.push(Box::::default()); } if propagation.datadog || tracing.datadog.enabled() { - propagators.push(Box::::default()); + propagators.push(Box::::default()); } if propagation.aws_xray { propagators.push(Box::::default()); @@ -2147,7 +2151,6 @@ mod tests { assert_eq!(resp.status(), StatusCode::OK); let body = get_body_bytes(resp.body_mut()).await.unwrap(); String::from_utf8_lossy(&body) - .to_string() .split('\n') .filter(|l| l.contains("bucket") && !l.contains("apollo_router_span_count")) .sorted() diff --git a/apollo-router/src/plugins/telemetry/otel/layer.rs b/apollo-router/src/plugins/telemetry/otel/layer.rs index 51922f4833..495d22f8ec 100644 --- a/apollo-router/src/plugins/telemetry/otel/layer.rs +++ b/apollo-router/src/plugins/telemetry/otel/layer.rs @@ -42,6 +42,7 @@ use crate::plugins::telemetry::consts::OTEL_STATUS_CODE; use crate::plugins::telemetry::consts::OTEL_STATUS_MESSAGE; use crate::plugins::telemetry::consts::REQUEST_SPAN_NAME; use crate::plugins::telemetry::consts::ROUTER_SPAN_NAME; +use crate::plugins::telemetry::formatters::filter_metric_events; use crate::plugins::telemetry::reload::IsSampled; use crate::plugins::telemetry::reload::SampledSpan; use crate::plugins::telemetry::reload::SPAN_SAMPLING_RATE; @@ -960,6 +961,10 @@ where /// [`ERROR`]: tracing::Level::ERROR /// [`Error`]: opentelemetry::trace::StatusCode::Error fn on_event(&self, event: &Event<'_>, ctx: Context<'_, S>) { + // Don't include deprecated metric events + if !filter_metric_events(event) { + return; + } // Ignore events that are not in the context of a span if let Some(span) = ctx.lookup_current() { let mut extensions = span.extensions_mut(); diff --git a/apollo-router/src/plugins/telemetry/otel/span_ext.rs b/apollo-router/src/plugins/telemetry/otel/span_ext.rs index 4474a8232c..5d1c723b66 100644 --- a/apollo-router/src/plugins/telemetry/otel/span_ext.rs +++ b/apollo-router/src/plugins/telemetry/otel/span_ext.rs @@ -1,8 +1,6 @@ use opentelemetry::trace::SpanContext; use opentelemetry::Context; -use opentelemetry::Key; use opentelemetry::KeyValue; -use opentelemetry::Value; use super::layer::WithContext; /// Utility functions to allow tracing [`Span`]s to accept and return @@ -12,40 +10,6 @@ use super::layer::WithContext; /// [OpenTelemetry]: https://opentelemetry.io /// [`Context`]: opentelemetry::Context pub(crate) trait OpenTelemetrySpanExt { - /// Associates `self` with a given OpenTelemetry trace, using the provided - /// parent [`Context`]. - /// - /// [`Context`]: opentelemetry::Context - /// - /// # Examples - /// - /// ```rust - /// use opentelemetry::{propagation::TextMapPropagator, trace::TraceContextExt}; - /// use opentelemetry_sdk::propagation::TraceContextPropagator; - /// use tracing_opentelemetry::OpenTelemetrySpanExt; - /// use std::collections::HashMap; - /// use tracing::Span; - /// - /// // Example carrier, could be a framework header map that impls otel's `Extractor`. - /// let mut carrier = HashMap::new(); - /// - /// // Propagator can be swapped with b3 propagator, jaeger propagator, etc. - /// let propagator = TraceContextPropagator::new(); - /// - /// // Extract otel parent context via the chosen propagator - /// let parent_context = propagator.extract(&carrier); - /// - /// // Generate a tracing span as usual - /// let app_root = tracing::span!(tracing::Level::INFO, "app_start"); - /// - /// // Assign parent trace from external context - /// app_root.set_parent(parent_context.clone()); - /// - /// // Or if the current span has been created elsewhere: - /// Span::current().set_parent(parent_context); - /// ``` - fn set_parent(&self, cx: Context); - /// Associates `self` with a given OpenTelemetry trace, using the provided /// followed span [`SpanContext`]. /// @@ -118,41 +82,9 @@ pub(crate) trait OpenTelemetrySpanExt { /// make_request(Span::current().context()) /// ``` fn context(&self) -> Context; - - /// Sets an OpenTelemetry attribute directly for this span, bypassing `tracing`. - /// If fields set here conflict with `tracing` fields, the `tracing` fields will supersede fields set with `set_attribute`. - /// This allows for more than 32 fields. - /// - /// # Examples - /// - /// ```rust - /// use opentelemetry::Context; - /// use tracing_opentelemetry::OpenTelemetrySpanExt; - /// use tracing::Span; - /// - /// // Generate a tracing span as usual - /// let app_root = tracing::span!(tracing::Level::INFO, "app_start"); - /// - /// // Set the `http.request.header.x_forwarded_for` attribute to `example`. - /// app_root.set_attribute("http.request.header.x_forwarded_for", "example"); - /// ``` - fn set_attribute(&self, key: impl Into, value: impl Into); } impl OpenTelemetrySpanExt for tracing::Span { - fn set_parent(&self, cx: Context) { - let mut cx = Some(cx); - self.with_subscriber(move |(id, subscriber)| { - if let Some(get_context) = subscriber.downcast_ref::() { - get_context.with_context(subscriber, id, move |data, _tracer| { - if let Some(cx) = cx.take() { - data.parent_cx = cx; - } - }); - } - }); - } - fn add_link(&self, cx: SpanContext) { self.add_link_with_attributes(cx, Vec::new()) } @@ -190,24 +122,4 @@ impl OpenTelemetrySpanExt for tracing::Span { cx.unwrap_or_default() } - - fn set_attribute(&self, key: impl Into, value: impl Into) { - self.with_subscriber(move |(id, subscriber)| { - if let Some(get_context) = subscriber.downcast_ref::() { - let mut key = Some(key.into()); - let mut value = Some(value.into()); - get_context.with_context(subscriber, id, move |builder, _| { - if builder.builder.attributes.is_none() { - builder.builder.attributes = Some(Default::default()); - } - builder - .builder - .attributes - .as_mut() - .unwrap() - .insert(key.take().unwrap(), value.take().unwrap()); - }) - } - }); - } } diff --git a/apollo-router/src/plugins/telemetry/otel/tracer.rs b/apollo-router/src/plugins/telemetry/otel/tracer.rs index 2ea6ecda4a..8a6c7402bc 100644 --- a/apollo-router/src/plugins/telemetry/otel/tracer.rs +++ b/apollo-router/src/plugins/telemetry/otel/tracer.rs @@ -34,13 +34,11 @@ use super::OtelData; /// authors of alternate OpenTelemetry SDK implementations if they wish to have /// `tracing` compatibility. /// -/// See the [`OpenTelemetrySpanExt::set_parent`] and -/// [`OpenTelemetrySpanExt::context`] methods for example usage. +/// See the [`OpenTelemetrySpanExt::context`] method for example usage. /// /// [`Tracer`]: opentelemetry::trace::Tracer /// [`SpanBuilder`]: opentelemetry::trace::SpanBuilder /// [`PreSampledTracer::sampled_span_context`]: crate::PreSampledTracer::sampled_span_context -/// [`OpenTelemetrySpanExt::set_parent`]: crate::OpenTelemetrySpanExt::set_parent /// [`OpenTelemetrySpanExt::context`]: crate::OpenTelemetrySpanExt::context /// [`Context`]: opentelemetry::Context pub(crate) trait PreSampledTracer { diff --git a/apollo-router/src/plugins/telemetry/reload.rs b/apollo-router/src/plugins/telemetry/reload.rs index 80e1499e4c..2ca69191c9 100644 --- a/apollo-router/src/plugins/telemetry/reload.rs +++ b/apollo-router/src/plugins/telemetry/reload.rs @@ -13,7 +13,6 @@ use opentelemetry_api::trace::TraceFlags; use opentelemetry_api::trace::TraceState; use opentelemetry_api::Context; use tower::BoxError; -use tracing_subscriber::fmt::FormatFields; use tracing_subscriber::layer::Layer; use tracing_subscriber::layer::Layered; use tracing_subscriber::layer::SubscriberExt; @@ -192,15 +191,3 @@ where }) } } -/// prevents span fields from being formatted to a string when writing logs -pub(crate) struct NullFieldFormatter; - -impl<'writer> FormatFields<'writer> for NullFieldFormatter { - fn format_fields( - &self, - _writer: tracing_subscriber::fmt::format::Writer<'writer>, - _fields: R, - ) -> std::fmt::Result { - Ok(()) - } -} diff --git a/apollo-router/src/plugins/telemetry/resource.rs b/apollo-router/src/plugins/telemetry/resource.rs index 90e5b1608c..580bd25e38 100644 --- a/apollo-router/src/plugins/telemetry/resource.rs +++ b/apollo-router/src/plugins/telemetry/resource.rs @@ -11,6 +11,28 @@ use crate::plugins::telemetry::config::AttributeValue; const UNKNOWN_SERVICE: &str = "unknown_service"; const OTEL_SERVICE_NAME: &str = "OTEL_SERVICE_NAME"; +/// This resource detector fills out things like the default service version and executable name. +/// Users can always override them via config. +struct StaticResourceDetector; +impl ResourceDetector for StaticResourceDetector { + fn detect(&self, _timeout: Duration) -> Resource { + let mut config_resources = vec![]; + config_resources.push(KeyValue::new( + opentelemetry_semantic_conventions::resource::SERVICE_VERSION, + std::env!("CARGO_PKG_VERSION"), + )); + + // Some other basic resources + if let Some(executable_name) = executable_name() { + config_resources.push(KeyValue::new( + opentelemetry_semantic_conventions::resource::PROCESS_EXECUTABLE_NAME, + executable_name, + )); + } + Resource::new(config_resources) + } +} + struct EnvServiceNameDetector; // Used instead of SdkProvidedResourceDetector impl ResourceDetector for EnvServiceNameDetector { @@ -42,6 +64,7 @@ pub(crate) trait ConfigResource { let resource = Resource::from_detectors( Duration::from_secs(0), vec![ + Box::new(StaticResourceDetector), Box::new(config_resource_detector), Box::new(EnvResourceDetector::new()), Box::new(EnvServiceNameDetector), @@ -84,24 +107,10 @@ impl ResourceDetector for ConfigResourceDetector { let mut config_resources = vec![]; // For config resources last entry wins - - // Add any other resources from config for (key, value) in self.resources.iter() { config_resources.push(KeyValue::new(key.clone(), value.clone())); } - // Some other basic resources - config_resources.push(KeyValue::new( - opentelemetry_semantic_conventions::resource::SERVICE_VERSION, - std::env!("CARGO_PKG_VERSION"), - )); - if let Some(executable_name) = executable_name() { - config_resources.push(KeyValue::new( - opentelemetry_semantic_conventions::resource::PROCESS_EXECUTABLE_NAME, - executable_name, - )); - } - // Service namespace if let Some(service_namespace) = self.service_namespace.clone() { config_resources.push(KeyValue::new( diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog.rs b/apollo-router/src/plugins/telemetry/tracing/datadog.rs index 345c54dae9..e360ee5cb3 100644 --- a/apollo-router/src/plugins/telemetry/tracing/datadog.rs +++ b/apollo-router/src/plugins/telemetry/tracing/datadog.rs @@ -1,13 +1,24 @@ //! Configuration for datadog tracing. -use std::collections::HashMap; +use std::fmt::Debug; +use std::fmt::Formatter; +use std::time::Duration; +use ahash::HashMap; +use ahash::HashMapExt; +use futures::future::BoxFuture; use http::Uri; use opentelemetry::sdk; use opentelemetry::sdk::trace::BatchSpanProcessor; use opentelemetry::sdk::trace::Builder; use opentelemetry::Value; +use opentelemetry_api::trace::SpanContext; +use opentelemetry_api::trace::SpanKind; use opentelemetry_api::Key; +use opentelemetry_api::KeyValue; +use opentelemetry_sdk::export::trace::ExportResult; +use opentelemetry_sdk::export::trace::SpanData; +use opentelemetry_sdk::export::trace::SpanExporter; use opentelemetry_semantic_conventions::resource::SERVICE_NAME; use opentelemetry_semantic_conventions::resource::SERVICE_VERSION; use schemars::JsonSchema; @@ -27,6 +38,9 @@ use crate::plugins::telemetry::consts::SUBGRAPH_REQUEST_SPAN_NAME; use crate::plugins::telemetry::consts::SUBGRAPH_SPAN_NAME; use crate::plugins::telemetry::consts::SUPERGRAPH_SPAN_NAME; use crate::plugins::telemetry::endpoint::UriEndpoint; +use crate::plugins::telemetry::tracing::datadog_exporter; +use crate::plugins::telemetry::tracing::datadog_exporter::propagator::TRACE_STATE_MEASURE; +use crate::plugins::telemetry::tracing::datadog_exporter::propagator::TRACE_STATE_TRUE_VALUE; use crate::plugins::telemetry::tracing::BatchProcessorConfig; use crate::plugins::telemetry::tracing::SpanProcessorExt; use crate::plugins::telemetry::tracing::TracingConfigurator; @@ -79,6 +93,19 @@ pub(crate) struct Config { /// http_request -> http.route #[serde(default)] resource_mapping: HashMap, + + /// Which spans will be eligible for span stats to be collected for viewing in the APM view. + /// Defaults to true for `request`, `router`, `query_parsing`, `supergraph`, `execution`, `query_planning`, `subgraph`, `subgraph_request` and `http_request`. + #[serde(default = "default_span_metrics")] + span_metrics: HashMap, +} + +fn default_span_metrics() -> HashMap { + let mut map = HashMap::with_capacity(BUILT_IN_SPAN_NAMES.len()); + for name in BUILT_IN_SPAN_NAMES { + map.insert(name.to_string(), true); + } + map } fn default_true() -> bool { @@ -111,7 +138,7 @@ impl TracingConfigurator for Config { let fixed_span_names = self.fixed_span_names; - let exporter = opentelemetry_datadog::new_pipeline() + let exporter = datadog_exporter::new_pipeline() .with( &self.endpoint.to_uri(&Uri::from_static(DEFAULT_ENDPOINT)), |builder, e| builder.with_agent_endpoint(e.to_string().trim_end_matches('/')), @@ -170,13 +197,98 @@ impl TracingConfigurator for Config { .expect("cargo version is set as a resource default;qed") .to_string(), ) + .with_http_client( + reqwest::Client::builder() + // https://github.com/open-telemetry/opentelemetry-rust-contrib/issues/7 + // Set the idle timeout to something low to prevent termination of connections. + .pool_idle_timeout(Duration::from_millis(1)) + .build()?, + ) .with_trace_config(common) .build_exporter()?; + + // Use the default span metrics and override with the ones from the config + let mut span_metrics = default_span_metrics(); + span_metrics.extend(self.span_metrics.clone()); + Ok(builder.with_span_processor( - BatchSpanProcessor::builder(exporter, opentelemetry::runtime::Tokio) - .with_batch_config(self.batch_processor.clone().into()) - .build() - .filtered(), + BatchSpanProcessor::builder( + ExporterWrapper { + delegate: exporter, + span_metrics, + }, + opentelemetry::runtime::Tokio, + ) + .with_batch_config(self.batch_processor.clone().into()) + .build() + .filtered(), )) } } + +struct ExporterWrapper { + delegate: datadog_exporter::DatadogExporter, + span_metrics: HashMap, +} + +impl Debug for ExporterWrapper { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.delegate.fmt(f) + } +} + +impl SpanExporter for ExporterWrapper { + fn export(&mut self, mut batch: Vec) -> BoxFuture<'static, ExportResult> { + // Here we do some special processing of the spans before passing them to the delegate + // In particular we default the span.kind to the span kind, and also override the trace measure status if we need to. + for span in &mut batch { + // If the span metrics are enabled for this span, set the trace state to measuring. + // We do all this dancing to avoid allocating. + let original_span_name = span + .attributes + .get(&Key::from_static_str(OTEL_ORIGINAL_NAME)) + .map(|v| v.as_str()); + let final_span_name = if let Some(span_name) = &original_span_name { + span_name.as_ref() + } else { + span.name.as_ref() + }; + + // Unfortunately trace state is immutable, so we have to create a new one + if let Some(true) = self.span_metrics.get(final_span_name) { + let new_trace_state = span + .span_context + .trace_state() + .insert(TRACE_STATE_MEASURE, TRACE_STATE_TRUE_VALUE) + .expect("valid trace state"); + span.span_context = SpanContext::new( + span.span_context.trace_id(), + span.span_context.span_id(), + span.span_context.trace_flags(), + span.span_context.is_remote(), + new_trace_state, + ) + } + + // Set the span kind https://github.com/DataDog/dd-trace-go/blob/main/ddtrace/ext/span_kind.go + let span_kind = match &span.span_kind { + SpanKind::Client => "client", + SpanKind::Server => "server", + SpanKind::Producer => "producer", + SpanKind::Consumer => "consumer", + SpanKind::Internal => "internal", + }; + span.attributes + .insert(KeyValue::new("span.kind", span_kind)); + + // Note we do NOT set span.type as it isn't a good fit for otel. + } + self.delegate.export(batch) + } + fn shutdown(&mut self) { + self.delegate.shutdown() + } + fn force_flush(&mut self) -> BoxFuture<'static, ExportResult> { + self.delegate.force_flush() + } +} diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/README.md b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/README.md new file mode 100644 index 0000000000..eeb009b68e --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/README.md @@ -0,0 +1,5 @@ +This is temporary interning of the datadog exporter until we update otel. +The newest version of the exporter does support setting span metrics, but we +can't upgrade until we upgrade Otel. + +Once otel is upgraded, we can remove this code and use the exporter directly. \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/intern.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/intern.rs new file mode 100644 index 0000000000..fd1f69375f --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/intern.rs @@ -0,0 +1,517 @@ +use std::cell::RefCell; +use std::hash::BuildHasherDefault; +use std::hash::Hash; + +use indexmap::set::IndexSet; +use opentelemetry::StringValue; +use opentelemetry::Value; +use rmp::encode::RmpWrite; +use rmp::encode::ValueWriteError; + +type InternHasher = ahash::AHasher; + +#[derive(PartialEq)] +pub(crate) enum InternValue<'a> { + RegularString(&'a str), + OpenTelemetryValue(&'a Value), +} + +impl<'a> Hash for InternValue<'a> { + fn hash(&self, state: &mut H) { + match &self { + InternValue::RegularString(s) => s.hash(state), + InternValue::OpenTelemetryValue(v) => match v { + Value::Bool(x) => x.hash(state), + Value::I64(x) => x.hash(state), + Value::String(x) => x.hash(state), + Value::F64(x) => x.to_bits().hash(state), + Value::Array(a) => match a { + opentelemetry::Array::Bool(x) => x.hash(state), + opentelemetry::Array::I64(x) => x.hash(state), + opentelemetry::Array::F64(floats) => { + for f in floats { + f.to_bits().hash(state); + } + } + opentelemetry::Array::String(x) => x.hash(state), + }, + }, + } + } +} + +impl<'a> Eq for InternValue<'a> {} + +const BOOLEAN_TRUE: &str = "true"; +const BOOLEAN_FALSE: &str = "false"; +const LEFT_SQUARE_BRACKET: u8 = b'['; +const RIGHT_SQUARE_BRACKET: u8 = b']'; +const COMMA: u8 = b','; +const DOUBLE_QUOTE: u8 = b'"'; +const EMPTY_ARRAY: &str = "[]"; + +trait WriteAsLiteral { + fn write_to(&self, buffer: &mut Vec); +} + +impl WriteAsLiteral for bool { + fn write_to(&self, buffer: &mut Vec) { + buffer.extend_from_slice(if *self { BOOLEAN_TRUE } else { BOOLEAN_FALSE }.as_bytes()); + } +} + +impl WriteAsLiteral for i64 { + fn write_to(&self, buffer: &mut Vec) { + buffer.extend_from_slice(itoa::Buffer::new().format(*self).as_bytes()); + } +} + +impl WriteAsLiteral for f64 { + fn write_to(&self, buffer: &mut Vec) { + buffer.extend_from_slice(ryu::Buffer::new().format(*self).as_bytes()); + } +} + +impl WriteAsLiteral for StringValue { + fn write_to(&self, buffer: &mut Vec) { + buffer.push(DOUBLE_QUOTE); + buffer.extend_from_slice(self.as_str().as_bytes()); + buffer.push(DOUBLE_QUOTE); + } +} + +impl<'a> InternValue<'a> { + pub(crate) fn write_as_str( + &self, + payload: &mut W, + reusable_buffer: &mut Vec, + ) -> Result<(), ValueWriteError> { + match self { + InternValue::RegularString(x) => rmp::encode::write_str(payload, x), + InternValue::OpenTelemetryValue(v) => match v { + Value::Bool(x) => { + rmp::encode::write_str(payload, if *x { BOOLEAN_TRUE } else { BOOLEAN_FALSE }) + } + Value::I64(x) => rmp::encode::write_str(payload, itoa::Buffer::new().format(*x)), + Value::F64(x) => rmp::encode::write_str(payload, ryu::Buffer::new().format(*x)), + Value::String(x) => rmp::encode::write_str(payload, x.as_ref()), + Value::Array(array) => match array { + opentelemetry::Array::Bool(x) => { + Self::write_generic_array(payload, reusable_buffer, x) + } + opentelemetry::Array::I64(x) => { + Self::write_generic_array(payload, reusable_buffer, x) + } + opentelemetry::Array::F64(x) => { + Self::write_generic_array(payload, reusable_buffer, x) + } + opentelemetry::Array::String(x) => { + Self::write_generic_array(payload, reusable_buffer, x) + } + }, + }, + } + } + + fn write_empty_array(payload: &mut W) -> Result<(), ValueWriteError> { + rmp::encode::write_str(payload, EMPTY_ARRAY) + } + + fn write_buffer_as_string( + payload: &mut W, + reusable_buffer: &[u8], + ) -> Result<(), ValueWriteError> { + rmp::encode::write_str_len(payload, reusable_buffer.len() as u32)?; + payload + .write_bytes(reusable_buffer) + .map_err(ValueWriteError::InvalidDataWrite) + } + + fn write_generic_array( + payload: &mut W, + reusable_buffer: &mut Vec, + array: &[T], + ) -> Result<(), ValueWriteError> { + if array.is_empty() { + return Self::write_empty_array(payload); + } + + reusable_buffer.clear(); + reusable_buffer.push(LEFT_SQUARE_BRACKET); + + array[0].write_to(reusable_buffer); + + for value in array[1..].iter() { + reusable_buffer.push(COMMA); + value.write_to(reusable_buffer); + } + + reusable_buffer.push(RIGHT_SQUARE_BRACKET); + + Self::write_buffer_as_string(payload, reusable_buffer) + } +} + +pub(crate) struct StringInterner<'a> { + data: IndexSet, BuildHasherDefault>, +} + +impl<'a> StringInterner<'a> { + pub(crate) fn new() -> StringInterner<'a> { + StringInterner { + data: IndexSet::with_capacity_and_hasher(128, BuildHasherDefault::default()), + } + } + + pub(crate) fn intern(&mut self, data: &'a str) -> u32 { + if let Some(idx) = self.data.get_index_of(&InternValue::RegularString(data)) { + return idx as u32; + } + self.data.insert_full(InternValue::RegularString(data)).0 as u32 + } + + pub(crate) fn intern_value(&mut self, data: &'a Value) -> u32 { + if let Some(idx) = self + .data + .get_index_of(&InternValue::OpenTelemetryValue(data)) + { + return idx as u32; + } + self.data + .insert_full(InternValue::OpenTelemetryValue(data)) + .0 as u32 + } + + pub(crate) fn write_dictionary( + &self, + payload: &mut W, + ) -> Result<(), ValueWriteError> { + thread_local! { + static BUFFER: RefCell> = RefCell::new(Vec::with_capacity(4096)); + } + + BUFFER.with(|cell| { + let reusable_buffer = &mut cell.borrow_mut(); + rmp::encode::write_array_len(payload, self.data.len() as u32)?; + for data in self.data.iter() { + data.write_as_str(payload, reusable_buffer)?; + } + + Ok(()) + }) + } +} + +#[cfg(test)] +mod tests { + use opentelemetry::Array; + + use super::*; + + #[test] + fn test_intern() { + let a = "a".to_string(); + let b = "b"; + let c = "c"; + + let mut intern = StringInterner::new(); + let a_idx = intern.intern(a.as_str()); + let b_idx = intern.intern(b); + let c_idx = intern.intern(c); + let d_idx = intern.intern(a.as_str()); + let e_idx = intern.intern(c); + + assert_eq!(a_idx, 0); + assert_eq!(b_idx, 1); + assert_eq!(c_idx, 2); + assert_eq!(d_idx, a_idx); + assert_eq!(e_idx, c_idx); + } + + #[test] + fn test_intern_bool() { + let a = Value::Bool(true); + let b = Value::Bool(false); + let c = "c"; + + let mut intern = StringInterner::new(); + let a_idx = intern.intern_value(&a); + let b_idx = intern.intern_value(&b); + let c_idx = intern.intern(c); + let d_idx = intern.intern_value(&a); + let e_idx = intern.intern(c); + + assert_eq!(a_idx, 0); + assert_eq!(b_idx, 1); + assert_eq!(c_idx, 2); + assert_eq!(d_idx, a_idx); + assert_eq!(e_idx, c_idx); + } + + #[test] + fn test_intern_i64() { + let a = Value::I64(1234567890); + let b = Value::I64(-1234567890); + let c = "c"; + let d = Value::I64(1234567890); + + let mut intern = StringInterner::new(); + let a_idx = intern.intern_value(&a); + let b_idx = intern.intern_value(&b); + let c_idx = intern.intern(c); + let d_idx = intern.intern_value(&a); + let e_idx = intern.intern(c); + let f_idx = intern.intern_value(&d); + + assert_eq!(a_idx, 0); + assert_eq!(b_idx, 1); + assert_eq!(c_idx, 2); + assert_eq!(d_idx, a_idx); + assert_eq!(e_idx, c_idx); + assert_eq!(f_idx, a_idx); + } + + #[test] + fn test_intern_f64() { + let a = Value::F64(123456.7890); + let b = Value::F64(-1234567.890); + let c = "c"; + let d = Value::F64(-1234567.890); + + let mut intern = StringInterner::new(); + let a_idx = intern.intern_value(&a); + let b_idx = intern.intern_value(&b); + let c_idx = intern.intern(c); + let d_idx = intern.intern_value(&a); + let e_idx = intern.intern(c); + let f_idx = intern.intern_value(&d); + + assert_eq!(a_idx, 0); + assert_eq!(b_idx, 1); + assert_eq!(c_idx, 2); + assert_eq!(d_idx, a_idx); + assert_eq!(e_idx, c_idx); + assert_eq!(b_idx, f_idx); + } + + #[test] + fn test_intern_array_of_booleans() { + let a = Value::Array(Array::Bool(vec![true, false])); + let b = Value::Array(Array::Bool(vec![false, true])); + let c = "c"; + let d = Value::Array(Array::Bool(vec![])); + let f = Value::Array(Array::Bool(vec![false, true])); + + let mut intern = StringInterner::new(); + let a_idx = intern.intern_value(&a); + let b_idx = intern.intern_value(&b); + let c_idx = intern.intern(c); + let d_idx = intern.intern_value(&a); + let e_idx = intern.intern(c); + let f_idx = intern.intern_value(&d); + let g_idx = intern.intern_value(&f); + + assert_eq!(a_idx, 0); + assert_eq!(b_idx, 1); + assert_eq!(c_idx, 2); + assert_eq!(d_idx, a_idx); + assert_eq!(e_idx, c_idx); + assert_eq!(f_idx, 3); + assert_eq!(g_idx, b_idx); + } + + #[test] + fn test_intern_array_of_i64() { + let a = Value::Array(Array::I64(vec![123, -123])); + let b = Value::Array(Array::I64(vec![-123, 123])); + let c = "c"; + let d = Value::Array(Array::I64(vec![])); + let f = Value::Array(Array::I64(vec![-123, 123])); + + let mut intern = StringInterner::new(); + let a_idx = intern.intern_value(&a); + let b_idx = intern.intern_value(&b); + let c_idx = intern.intern(c); + let d_idx = intern.intern_value(&a); + let e_idx = intern.intern(c); + let f_idx = intern.intern_value(&d); + let g_idx = intern.intern_value(&f); + + assert_eq!(a_idx, 0); + assert_eq!(b_idx, 1); + assert_eq!(c_idx, 2); + assert_eq!(d_idx, a_idx); + assert_eq!(e_idx, c_idx); + assert_eq!(f_idx, 3); + assert_eq!(g_idx, b_idx); + } + + #[test] + fn test_intern_array_of_f64() { + let f1 = 123.0f64; + let f2 = 0f64; + + let a = Value::Array(Array::F64(vec![f1, f2])); + let b = Value::Array(Array::F64(vec![f2, f1])); + let c = "c"; + let d = Value::Array(Array::F64(vec![])); + let f = Value::Array(Array::F64(vec![f2, f1])); + + let mut intern = StringInterner::new(); + let a_idx = intern.intern_value(&a); + let b_idx = intern.intern_value(&b); + let c_idx = intern.intern(c); + let d_idx = intern.intern_value(&a); + let e_idx = intern.intern(c); + let f_idx = intern.intern_value(&d); + let g_idx = intern.intern_value(&f); + + assert_eq!(a_idx, 0); + assert_eq!(b_idx, 1); + assert_eq!(c_idx, 2); + assert_eq!(d_idx, a_idx); + assert_eq!(e_idx, c_idx); + assert_eq!(f_idx, 3); + assert_eq!(g_idx, b_idx); + } + + #[test] + fn test_intern_array_of_string() { + let s1 = "a"; + let s2 = "b"; + + let a = Value::Array(Array::String(vec![ + StringValue::from(s1), + StringValue::from(s2), + ])); + let b = Value::Array(Array::String(vec![ + StringValue::from(s2), + StringValue::from(s1), + ])); + let c = "c"; + let d = Value::Array(Array::String(vec![])); + let f = Value::Array(Array::String(vec![ + StringValue::from(s2), + StringValue::from(s1), + ])); + + let mut intern = StringInterner::new(); + let a_idx = intern.intern_value(&a); + let b_idx = intern.intern_value(&b); + let c_idx = intern.intern(c); + let d_idx = intern.intern_value(&a); + let e_idx = intern.intern(c); + let f_idx = intern.intern_value(&d); + let g_idx = intern.intern_value(&f); + + assert_eq!(a_idx, 0); + assert_eq!(b_idx, 1); + assert_eq!(c_idx, 2); + assert_eq!(d_idx, a_idx); + assert_eq!(e_idx, c_idx); + assert_eq!(f_idx, 3); + assert_eq!(g_idx, b_idx); + } + + #[test] + fn test_write_boolean_literal() { + let mut buffer: Vec = vec![]; + + true.write_to(&mut buffer); + + assert_eq!(&buffer[..], b"true"); + + buffer.clear(); + + false.write_to(&mut buffer); + + assert_eq!(&buffer[..], b"false"); + } + + #[test] + fn test_write_i64_literal() { + let mut buffer: Vec = vec![]; + + 1234567890i64.write_to(&mut buffer); + + assert_eq!(&buffer[..], b"1234567890"); + + buffer.clear(); + + (-1234567890i64).write_to(&mut buffer); + + assert_eq!(&buffer[..], b"-1234567890"); + } + + #[test] + fn test_write_f64_literal() { + let mut buffer: Vec = vec![]; + + let f1 = 12345.678f64; + let f2 = -12345.678f64; + + f1.write_to(&mut buffer); + + assert_eq!(&buffer[..], format!("{}", f1).as_bytes()); + + buffer.clear(); + + f2.write_to(&mut buffer); + + assert_eq!(&buffer[..], format!("{}", f2).as_bytes()); + } + + #[test] + fn test_write_string_literal() { + let mut buffer: Vec = vec![]; + + let s1 = StringValue::from("abc"); + let s2 = StringValue::from(""); + + s1.write_to(&mut buffer); + + assert_eq!(&buffer[..], format!("\"{}\"", s1).as_bytes()); + + buffer.clear(); + + s2.write_to(&mut buffer); + + assert_eq!(&buffer[..], format!("\"{}\"", s2).as_bytes()); + } + + fn test_encoding_intern_value(value: InternValue<'_>) { + let mut expected: Vec = vec![]; + let mut actual: Vec = vec![]; + + let mut buffer = vec![]; + + value.write_as_str(&mut actual, &mut buffer).unwrap(); + + let InternValue::OpenTelemetryValue(value) = value else { + return; + }; + + rmp::encode::write_str(&mut expected, value.as_str().as_ref()).unwrap(); + + assert_eq!(expected, actual); + } + + #[test] + fn test_encode_boolean() { + test_encoding_intern_value(InternValue::OpenTelemetryValue(&Value::Bool(true))); + test_encoding_intern_value(InternValue::OpenTelemetryValue(&Value::Bool(false))); + } + + #[test] + fn test_encode_i64() { + test_encoding_intern_value(InternValue::OpenTelemetryValue(&Value::I64(123))); + test_encoding_intern_value(InternValue::OpenTelemetryValue(&Value::I64(0))); + test_encoding_intern_value(InternValue::OpenTelemetryValue(&Value::I64(-123))); + } + + #[test] + fn test_encode_f64() { + test_encoding_intern_value(InternValue::OpenTelemetryValue(&Value::F64(123.456f64))); + test_encoding_intern_value(InternValue::OpenTelemetryValue(&Value::F64(-123.456f64))); + } +} diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/mod.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/mod.rs new file mode 100644 index 0000000000..ae4a37ba07 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/mod.rs @@ -0,0 +1,562 @@ +mod intern; +mod model; + +use std::borrow::Cow; +use std::fmt::Debug; +use std::fmt::Formatter; +use std::sync::Arc; +use std::time::Duration; + +use futures::future::BoxFuture; +use http::Method; +use http::Request; +use http::Uri; +pub use model::ApiVersion; +pub use model::Error; +pub use model::FieldMappingFn; +use opentelemetry::global; +use opentelemetry::sdk; +use opentelemetry::trace::TraceError; +use opentelemetry::KeyValue; +use opentelemetry_api::trace::TracerProvider; +use opentelemetry_http::HttpClient; +use opentelemetry_http::ResponseExt; +use opentelemetry_sdk::export::trace::ExportResult; +use opentelemetry_sdk::export::trace::SpanData; +use opentelemetry_sdk::export::trace::SpanExporter; +use opentelemetry_sdk::resource::ResourceDetector; +use opentelemetry_sdk::resource::SdkProvidedResourceDetector; +use opentelemetry_sdk::runtime::RuntimeChannel; +use opentelemetry_sdk::trace::BatchMessage; +use opentelemetry_sdk::trace::Config; +use opentelemetry_sdk::trace::Tracer; +use opentelemetry_sdk::Resource; +use opentelemetry_semantic_conventions as semcov; +use url::Url; + +use self::model::unified_tags::UnifiedTags; +use crate::plugins::telemetry::tracing::datadog_exporter::exporter::model::FieldMapping; + +/// Default Datadog collector endpoint +const DEFAULT_AGENT_ENDPOINT: &str = "http://127.0.0.1:8126"; + +/// Header name used to inform the Datadog agent of the number of traces in the payload +const DATADOG_TRACE_COUNT_HEADER: &str = "X-Datadog-Trace-Count"; + +/// Header name use to inform datadog as to what version +const DATADOG_META_LANG_HEADER: &str = "Datadog-Meta-Lang"; +const DATADOG_META_TRACER_VERSION_HEADER: &str = "Datadog-Meta-Tracer-Version"; + +// Struct to hold the mapping between Opentelemetry spans and datadog spans. +pub struct Mapping { + resource: Option, + name: Option, + service_name: Option, +} + +impl Mapping { + pub fn new( + resource: Option, + name: Option, + service_name: Option, + ) -> Self { + Mapping { + resource, + name, + service_name, + } + } + pub fn empty() -> Self { + Self::new(None, None, None) + } +} + +/// Datadog span exporter +pub struct DatadogExporter { + client: Arc, + request_url: Uri, + model_config: ModelConfig, + api_version: ApiVersion, + mapping: Mapping, + unified_tags: UnifiedTags, +} + +impl DatadogExporter { + fn new( + model_config: ModelConfig, + request_url: Uri, + api_version: ApiVersion, + client: Arc, + mapping: Mapping, + unified_tags: UnifiedTags, + ) -> Self { + DatadogExporter { + client, + request_url, + model_config, + api_version, + mapping, + unified_tags, + } + } + + fn build_request( + &self, + mut batch: Vec, + ) -> Result>, TraceError> { + let traces: Vec<&[SpanData]> = group_into_traces(&mut batch); + let trace_count = traces.len(); + let data = self.api_version.encode( + &self.model_config, + traces, + &self.mapping, + &self.unified_tags, + )?; + let req = Request::builder() + .method(Method::POST) + .uri(self.request_url.clone()) + .header(http::header::CONTENT_TYPE, self.api_version.content_type()) + .header(DATADOG_TRACE_COUNT_HEADER, trace_count) + .header(DATADOG_META_LANG_HEADER, "rust") + .header( + DATADOG_META_TRACER_VERSION_HEADER, + env!("CARGO_PKG_VERSION"), + ) + .body(data) + .map_err::(Into::into)?; + + Ok(req) + } +} + +impl Debug for DatadogExporter { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DatadogExporter") + .field("model_config", &self.model_config) + .field("request_url", &self.request_url) + .field("api_version", &self.api_version) + .field("client", &self.client) + .field("resource_mapping", &mapping_debug(&self.mapping.resource)) + .field("name_mapping", &mapping_debug(&self.mapping.name)) + .field( + "service_name_mapping", + &mapping_debug(&self.mapping.service_name), + ) + .finish() + } +} + +/// Create a new Datadog exporter pipeline builder. +pub fn new_pipeline() -> DatadogPipelineBuilder { + DatadogPipelineBuilder::default() +} + +/// Builder for `ExporterConfig` struct. +pub struct DatadogPipelineBuilder { + agent_endpoint: String, + trace_config: Option, + api_version: ApiVersion, + client: Option>, + mapping: Mapping, + unified_tags: UnifiedTags, +} + +impl Default for DatadogPipelineBuilder { + fn default() -> Self { + DatadogPipelineBuilder { + agent_endpoint: DEFAULT_AGENT_ENDPOINT.to_string(), + trace_config: None, + mapping: Mapping::empty(), + api_version: ApiVersion::Version05, + unified_tags: UnifiedTags::new(), + #[cfg(all( + not(feature = "reqwest-client"), + not(feature = "reqwest-blocking-client"), + not(feature = "surf-client"), + ))] + client: None, + #[cfg(all( + not(feature = "reqwest-client"), + not(feature = "reqwest-blocking-client"), + feature = "surf-client" + ))] + client: Some(Arc::new(surf::Client::new())), + #[cfg(all( + not(feature = "surf-client"), + not(feature = "reqwest-blocking-client"), + feature = "reqwest-client" + ))] + client: Some(Arc::new(reqwest::Client::new())), + #[cfg(feature = "reqwest-blocking-client")] + client: Some(Arc::new(reqwest::blocking::Client::new())), + } + } +} + +impl Debug for DatadogPipelineBuilder { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DatadogExporter") + .field("agent_endpoint", &self.agent_endpoint) + .field("trace_config", &self.trace_config) + .field("client", &self.client) + .field("resource_mapping", &mapping_debug(&self.mapping.resource)) + .field("name_mapping", &mapping_debug(&self.mapping.name)) + .field( + "service_name_mapping", + &mapping_debug(&self.mapping.service_name), + ) + .finish() + } +} + +impl DatadogPipelineBuilder { + /// Building a new exporter. + /// + /// This is useful if you are manually constructing a pipeline. + pub fn build_exporter(mut self) -> Result { + let (_, service_name) = self.build_config_and_service_name(); + self.build_exporter_with_service_name(service_name) + } + + fn build_config_and_service_name(&mut self) -> (Config, String) { + let service_name = self.unified_tags.service(); + if let Some(service_name) = service_name { + let config = if let Some(mut cfg) = self.trace_config.take() { + cfg.resource = Cow::Owned(Resource::new( + cfg.resource + .iter() + .filter(|(k, _v)| *k != &semcov::resource::SERVICE_NAME) + .map(|(k, v)| KeyValue::new(k.clone(), v.clone())), + )); + cfg + } else { + Config { + resource: Cow::Owned(Resource::empty()), + ..Default::default() + } + }; + (config, service_name) + } else { + let service_name = SdkProvidedResourceDetector + .detect(Duration::from_secs(0)) + .get(semcov::resource::SERVICE_NAME) + .unwrap() + .to_string(); + ( + Config { + // use a empty resource to prevent TracerProvider to assign a service name. + resource: Cow::Owned(Resource::empty()), + ..Default::default() + }, + service_name, + ) + } + } + + // parse the endpoint and append the path based on versions. + // keep the query and host the same. + fn build_endpoint(agent_endpoint: &str, version: &str) -> Result { + // build agent endpoint based on version + let mut endpoint = agent_endpoint + .parse::() + .map_err::(Into::into)?; + let mut paths = endpoint + .path_segments() + .map(|c| c.filter(|s| !s.is_empty()).collect::>()) + .unwrap_or_default(); + paths.push(version); + + let path_str = paths.join("/"); + endpoint.set_path(path_str.as_str()); + + Ok(endpoint.as_str().parse().map_err::(Into::into)?) + } + + fn build_exporter_with_service_name( + self, + service_name: String, + ) -> Result { + if let Some(client) = self.client { + let model_config = ModelConfig { service_name }; + + let exporter = DatadogExporter::new( + model_config, + Self::build_endpoint(&self.agent_endpoint, self.api_version.path())?, + self.api_version, + client, + self.mapping, + self.unified_tags, + ); + Ok(exporter) + } else { + Err(Error::NoHttpClient.into()) + } + } + + /// Install the Datadog trace exporter pipeline using a simple span processor. + pub fn install_simple(mut self) -> Result { + let (config, service_name) = self.build_config_and_service_name(); + let exporter = self.build_exporter_with_service_name(service_name)?; + let mut provider_builder = + sdk::trace::TracerProvider::builder().with_simple_exporter(exporter); + provider_builder = provider_builder.with_config(config); + let provider = provider_builder.build(); + let tracer = provider.versioned_tracer( + "opentelemetry-datadog", + Some(env!("CARGO_PKG_VERSION")), + Some(semcov::SCHEMA_URL), + None, + ); + let _ = global::set_tracer_provider(provider); + Ok(tracer) + } + + /// Install the Datadog trace exporter pipeline using a batch span processor with the specified + /// runtime. + pub fn install_batch>( + mut self, + runtime: R, + ) -> Result { + let (config, service_name) = self.build_config_and_service_name(); + let exporter = self.build_exporter_with_service_name(service_name)?; + let mut provider_builder = + sdk::trace::TracerProvider::builder().with_batch_exporter(exporter, runtime); + provider_builder = provider_builder.with_config(config); + let provider = provider_builder.build(); + let tracer = provider.versioned_tracer( + "opentelemetry-datadog", + Some(env!("CARGO_PKG_VERSION")), + Some(semcov::SCHEMA_URL), + None, + ); + let _ = global::set_tracer_provider(provider); + Ok(tracer) + } + + /// Assign the service name under which to group traces + pub fn with_service_name>(mut self, service_name: T) -> Self { + self.unified_tags.set_service(Some(service_name.into())); + self + } + + /// Assign the version under which to group traces + pub fn with_version>(mut self, version: T) -> Self { + self.unified_tags.set_version(Some(version.into())); + self + } + + /// Assign the env under which to group traces + pub fn with_env>(mut self, env: T) -> Self { + self.unified_tags.set_env(Some(env.into())); + self + } + + /// Assign the Datadog collector endpoint. + /// + /// The endpoint of the datadog agent, by default it is `http://127.0.0.1:8126`. + pub fn with_agent_endpoint>(mut self, endpoint: T) -> Self { + self.agent_endpoint = endpoint.into(); + self + } + + /// Choose the http client used by uploader + pub fn with_http_client(mut self, client: T) -> Self { + self.client = Some(Arc::new(client)); + self + } + + /// Assign the SDK trace configuration + pub fn with_trace_config(mut self, config: Config) -> Self { + self.trace_config = Some(config); + self + } + + /// Set version of Datadog trace ingestion API + pub fn with_api_version(mut self, api_version: ApiVersion) -> Self { + self.api_version = api_version; + self + } + + /// Custom the value used for `resource` field in datadog spans. + /// See [`FieldMappingFn`] for details. + pub fn with_resource_mapping(mut self, f: F) -> Self + where + F: for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync + 'static, + { + self.mapping.resource = Some(Arc::new(f)); + self + } + + /// Custom the value used for `name` field in datadog spans. + /// See [`FieldMappingFn`] for details. + pub fn with_name_mapping(mut self, f: F) -> Self + where + F: for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync + 'static, + { + self.mapping.name = Some(Arc::new(f)); + self + } + + /// Custom the value used for `service_name` field in datadog spans. + /// See [`FieldMappingFn`] for details. + pub fn with_service_name_mapping(mut self, f: F) -> Self + where + F: for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync + 'static, + { + self.mapping.service_name = Some(Arc::new(f)); + self + } +} + +fn group_into_traces(spans: &mut [SpanData]) -> Vec<&[SpanData]> { + if spans.is_empty() { + return vec![]; + } + + spans.sort_by_key(|x| x.span_context.trace_id().to_bytes()); + + let mut traces: Vec<&[SpanData]> = Vec::with_capacity(spans.len()); + + let mut start = 0; + let mut start_trace_id = spans[start].span_context.trace_id(); + for (idx, span) in spans.iter().enumerate() { + let current_trace_id = span.span_context.trace_id(); + if start_trace_id != current_trace_id { + traces.push(&spans[start..idx]); + start = idx; + start_trace_id = current_trace_id; + } + } + traces.push(&spans[start..]); + traces +} + +async fn send_request( + client: Arc, + request: http::Request>, +) -> ExportResult { + let _ = client.send(request).await?.error_for_status()?; + Ok(()) +} + +impl SpanExporter for DatadogExporter { + /// Export spans to datadog-agent + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { + let request = match self.build_request(batch) { + Ok(req) => req, + Err(err) => return Box::pin(std::future::ready(Err(err))), + }; + + let client = self.client.clone(); + Box::pin(send_request(client, request)) + } +} + +/// Helper struct to custom the mapping between Opentelemetry spans and datadog spans. +/// +/// This struct will be passed to [`FieldMappingFn`] +#[derive(Default, Debug)] +#[non_exhaustive] +pub struct ModelConfig { + pub service_name: String, +} + +fn mapping_debug(f: &Option) -> String { + if f.is_some() { + "custom mapping" + } else { + "default mapping" + } + .to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::plugins::telemetry::tracing::datadog_exporter::exporter::model::tests::get_span; + use crate::plugins::telemetry::tracing::datadog_exporter::ApiVersion::Version05; + + #[test] + fn test_out_of_order_group() { + let mut batch = vec![get_span(1, 1, 1), get_span(2, 2, 2), get_span(1, 1, 3)]; + let expected = vec![ + vec![get_span(1, 1, 1), get_span(1, 1, 3)], + vec![get_span(2, 2, 2)], + ]; + + let mut traces = group_into_traces(&mut batch); + // We need to sort the output in order to compare, but this is not required by the Datadog agent + traces.sort_by_key(|t| u128::from_be_bytes(t[0].span_context.trace_id().to_bytes())); + + assert_eq!(traces, expected); + } + + #[test] + fn test_agent_endpoint_with_api_version() { + let with_tail_slash = + DatadogPipelineBuilder::build_endpoint("http://localhost:8126/", Version05.path()); + let without_tail_slash = + DatadogPipelineBuilder::build_endpoint("http://localhost:8126", Version05.path()); + let with_query = DatadogPipelineBuilder::build_endpoint( + "http://localhost:8126?api_key=123", + Version05.path(), + ); + let invalid = DatadogPipelineBuilder::build_endpoint( + "http://localhost:klsajfjksfh", + Version05.path(), + ); + + assert_eq!( + with_tail_slash.unwrap().to_string(), + "http://localhost:8126/v0.5/traces" + ); + assert_eq!( + without_tail_slash.unwrap().to_string(), + "http://localhost:8126/v0.5/traces" + ); + assert_eq!( + with_query.unwrap().to_string(), + "http://localhost:8126/v0.5/traces?api_key=123" + ); + assert!(invalid.is_err()) + } + + #[derive(Debug)] + struct DummyClient; + + #[async_trait::async_trait] + impl HttpClient for DummyClient { + async fn send( + &self, + _request: Request>, + ) -> Result, opentelemetry_http::HttpError> { + Ok(http::Response::new("dummy response".into())) + } + } + + #[test] + fn test_custom_http_client() { + new_pipeline() + .with_http_client(DummyClient) + .build_exporter() + .unwrap(); + } + + #[test] + fn test_install_simple() { + new_pipeline() + .with_service_name("test_service") + .with_http_client(DummyClient) + .install_simple() + .unwrap(); + } + + #[test] + fn test_install_batch() { + new_pipeline() + .with_service_name("test_service") + .with_http_client(DummyClient) + .install_batch(opentelemetry_sdk::runtime::AsyncStd {}) + .unwrap(); + } +} diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/mod.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/mod.rs new file mode 100644 index 0000000000..d6db4b72b4 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/mod.rs @@ -0,0 +1,310 @@ +use std::fmt::Debug; + +use http::uri; +use opentelemetry_sdk::export::trace::SpanData; +use opentelemetry_sdk::export::trace::{self}; +use opentelemetry_sdk::export::ExportError; +use url::ParseError; + +use self::unified_tags::UnifiedTags; +use super::Mapping; +use crate::plugins::telemetry::tracing::datadog_exporter::ModelConfig; + +pub mod unified_tags; +mod v03; +mod v05; + +// todo: we should follow the same mapping defined in https://github.com/DataDog/datadog-agent/blob/main/pkg/trace/api/otlp.go + +// https://github.com/DataDog/dd-trace-js/blob/c89a35f7d27beb4a60165409376e170eacb194c5/packages/dd-trace/src/constants.js#L4 +static SAMPLING_PRIORITY_KEY: &str = "_sampling_priority_v1"; + +// https://github.com/DataDog/datadog-agent/blob/ec96f3c24173ec66ba235bda7710504400d9a000/pkg/trace/traceutil/span.go#L20 +static DD_MEASURED_KEY: &str = "_dd.measured"; + +/// Custom mapping between opentelemetry spans and datadog spans. +/// +/// User can provide custom function to change the mapping. It currently supports customizing the following +/// fields in Datadog span protocol. +/// +/// |field name|default value| +/// |---------------|-------------| +/// |service name| service name configuration from [`ModelConfig`]| +/// |name | opentelemetry instrumentation library name | +/// |resource| opentelemetry name| +/// +/// The function takes a reference to [`SpanData`]() and a reference to [`ModelConfig`]() as parameters. +/// It should return a `&str` which will be used as the value for the field. +/// +/// If no custom mapping is provided. Default mapping detailed above will be used. +/// +/// For example, +/// ```no_run +/// use opentelemetry_datadog::{ApiVersion, new_pipeline}; +/// fn main() -> Result<(), opentelemetry::trace::TraceError> { +/// let tracer = new_pipeline() +/// .with_service_name("my_app") +/// .with_api_version(ApiVersion::Version05) +/// // the custom mapping below will change the all spans' name to datadog spans +/// .with_name_mapping(|span, model_config|{ +/// "datadog spans" +/// }) +/// .with_agent_endpoint("http://localhost:8126") +/// .install_batch(opentelemetry_sdk::runtime::Tokio)?; +/// +/// Ok(()) +/// } +/// ``` +pub type FieldMappingFn = dyn for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync; + +pub(crate) type FieldMapping = std::sync::Arc; + +// Datadog uses some magic tags in their models. There is no recommended mapping defined in +// opentelemetry spec. Below is default mapping we gonna uses. Users can override it by providing +// their own implementations. +fn default_service_name_mapping<'a>(_span: &'a SpanData, config: &'a ModelConfig) -> &'a str { + config.service_name.as_str() +} + +fn default_name_mapping<'a>(span: &'a SpanData, _config: &'a ModelConfig) -> &'a str { + span.instrumentation_lib.name.as_ref() +} + +fn default_resource_mapping<'a>(span: &'a SpanData, _config: &'a ModelConfig) -> &'a str { + span.name.as_ref() +} + +/// Wrap type for errors from opentelemetry datadog exporter +#[allow(clippy::enum_variant_names)] +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Message pack error + #[error("message pack error")] + MessagePackError, + /// No http client founded. User should provide one or enable features + #[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")] + NoHttpClient, + /// Http requests failed with following errors + #[error(transparent)] + RequestError(#[from] http::Error), + /// The Uri was invalid + #[error("invalid url {0}")] + InvalidUri(String), + /// Other errors + #[error("{0}")] + Other(String), +} + +impl ExportError for Error { + fn exporter_name(&self) -> &'static str { + "datadog" + } +} + +impl From for Error { + fn from(_: rmp::encode::ValueWriteError) -> Self { + Self::MessagePackError + } +} + +impl From for Error { + fn from(err: ParseError) -> Self { + Self::InvalidUri(err.to_string()) + } +} + +impl From for Error { + fn from(err: uri::InvalidUri) -> Self { + Self::InvalidUri(err.to_string()) + } +} + +/// Version of datadog trace ingestion API +#[derive(Debug, Copy, Clone)] +#[non_exhaustive] +pub enum ApiVersion { + /// Version 0.3 + Version03, + /// Version 0.5 - requires datadog-agent v7.22.0 or above + Version05, +} + +impl ApiVersion { + pub(crate) fn path(self) -> &'static str { + match self { + ApiVersion::Version03 => "/v0.3/traces", + ApiVersion::Version05 => "/v0.5/traces", + } + } + + pub(crate) fn content_type(self) -> &'static str { + match self { + ApiVersion::Version03 => "application/msgpack", + ApiVersion::Version05 => "application/msgpack", + } + } + + pub(crate) fn encode( + self, + model_config: &ModelConfig, + traces: Vec<&[trace::SpanData]>, + mapping: &Mapping, + unified_tags: &UnifiedTags, + ) -> Result, Error> { + match self { + Self::Version03 => v03::encode( + model_config, + traces, + |span, config| match &mapping.service_name { + Some(f) => f(span, config), + None => default_service_name_mapping(span, config), + }, + |span, config| match &mapping.name { + Some(f) => f(span, config), + None => default_name_mapping(span, config), + }, + |span, config| match &mapping.resource { + Some(f) => f(span, config), + None => default_resource_mapping(span, config), + }, + ), + Self::Version05 => v05::encode( + model_config, + traces, + |span, config| match &mapping.service_name { + Some(f) => f(span, config), + None => default_service_name_mapping(span, config), + }, + |span, config| match &mapping.name { + Some(f) => f(span, config), + None => default_name_mapping(span, config), + }, + |span, config| match &mapping.resource { + Some(f) => f(span, config), + None => default_resource_mapping(span, config), + }, + unified_tags, + ), + } + } +} + +#[cfg(test)] +pub(crate) mod tests { + use std::borrow::Cow; + use std::time::Duration; + use std::time::SystemTime; + + use base64::Engine; + use opentelemetry::trace::SpanContext; + use opentelemetry::trace::SpanId; + use opentelemetry::trace::SpanKind; + use opentelemetry::trace::Status; + use opentelemetry::trace::TraceFlags; + use opentelemetry::trace::TraceId; + use opentelemetry::trace::TraceState; + use opentelemetry::KeyValue; + use opentelemetry_sdk::trace::EvictedHashMap; + use opentelemetry_sdk::trace::EvictedQueue; + use opentelemetry_sdk::InstrumentationLibrary; + use opentelemetry_sdk::Resource; + use opentelemetry_sdk::{self}; + + use super::*; + + fn get_traces() -> Vec> { + vec![vec![get_span(7, 1, 99)]] + } + + pub(crate) fn get_span(trace_id: u128, parent_span_id: u64, span_id: u64) -> trace::SpanData { + let span_context = SpanContext::new( + TraceId::from_u128(trace_id), + SpanId::from_u64(span_id), + TraceFlags::default(), + false, + TraceState::default(), + ); + + let start_time = SystemTime::UNIX_EPOCH; + let end_time = start_time.checked_add(Duration::from_secs(1)).unwrap(); + + let mut attributes: EvictedHashMap = EvictedHashMap::new(1, 1); + attributes.insert(KeyValue::new("span.type", "web")); + let resource = Resource::new(vec![KeyValue::new("host.name", "test")]); + let instrumentation_lib = InstrumentationLibrary::new( + "component", + None::<&'static str>, + None::<&'static str>, + None, + ); + + trace::SpanData { + span_context, + parent_span_id: SpanId::from_u64(parent_span_id), + span_kind: SpanKind::Client, + name: "resource".into(), + start_time, + end_time, + attributes, + events: EvictedQueue::new(0), + links: EvictedQueue::new(0), + status: Status::Ok, + resource: Cow::Owned(resource), + instrumentation_lib, + } + } + + #[test] + fn test_encode_v03() -> Result<(), Box> { + let traces = get_traces(); + let model_config = ModelConfig { + service_name: "service_name".to_string(), + ..Default::default() + }; + let encoded = + base64::engine::general_purpose::STANDARD.encode(ApiVersion::Version03.encode( + &model_config, + traces.iter().map(|x| &x[..]).collect(), + &Mapping::empty(), + &UnifiedTags::new(), + )?); + + assert_eq!(encoded.as_str(), "kZGMpHR5cGWjd2Vip3NlcnZpY2Wsc2VydmljZV9uYW1lpG5hbWWpY29tcG9uZW\ + 50qHJlc291cmNlqHJlc291cmNlqHRyYWNlX2lkzwAAAAAAAAAHp3NwYW5faWTPAAAAAAAAAGOpcGFyZW50X2lkzwAAAA\ + AAAAABpXN0YXJ00wAAAAAAAAAAqGR1cmF0aW9u0wAAAAA7msoApWVycm9y0gAAAACkbWV0YYKpaG9zdC5uYW1lpHRlc3\ + Spc3Bhbi50eXBlo3dlYqdtZXRyaWNzgbVfc2FtcGxpbmdfcHJpb3JpdHlfdjHLAAAAAAAAAAA="); + + Ok(()) + } + + #[test] + fn test_encode_v05() -> Result<(), Box> { + let traces = get_traces(); + let model_config = ModelConfig { + service_name: "service_name".to_string(), + ..Default::default() + }; + + let mut unified_tags = UnifiedTags::new(); + unified_tags.set_env(Some(String::from("test-env"))); + unified_tags.set_version(Some(String::from("test-version"))); + unified_tags.set_service(Some(String::from("test-service"))); + + let _encoded = + base64::engine::general_purpose::STANDARD.encode(ApiVersion::Version05.encode( + &model_config, + traces.iter().map(|x| &x[..]).collect(), + &Mapping::empty(), + &unified_tags, + )?); + + // TODO: Need someone to generate the expected result or instructions to do so. + // assert_eq!(encoded.as_str(), "kp6jd2VirHNlcnZpY2VfbmFtZaljb21wb25lbnSocmVzb3VyY2WpaG9zdC5uYW\ + // 1lpHRlc3Snc2VydmljZax0ZXN0LXNlcnZpY2WjZW52qHRlc3QtZW52p3ZlcnNpb26sdGVzdC12ZXJzaW9uqXNwYW4udH\ + // lwZbVfc2FtcGxpbmdfcHJpb3JpdHlfdjGRkZzOAAAAAc4AAAACzgAAAAPPAAAAAAAAAAfPAAAAAAAAAGPPAAAAAAAAAA\ + // HTAAAAAAAAAADTAAAAADuaygDSAAAAAIXOAAAABM4AAAAFzgAAAAbOAAAAB84AAAAIzgAAAAnOAAAACs4AAAALzgAAAA\ + // zOAAAAAIHOAAAADcsAAAAAAAAAAM4AAAAA"); + + Ok(()) + } +} diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/unified_tags.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/unified_tags.rs new file mode 100644 index 0000000000..e4e835c550 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/unified_tags.rs @@ -0,0 +1,123 @@ +/// Unified tags - See: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging + +pub struct UnifiedTags { + pub service: UnifiedTagField, + pub env: UnifiedTagField, + pub version: UnifiedTagField, +} + +impl UnifiedTags { + pub fn new() -> Self { + UnifiedTags { + service: UnifiedTagField::new(UnifiedTagEnum::Service), + env: UnifiedTagField::new(UnifiedTagEnum::Env), + version: UnifiedTagField::new(UnifiedTagEnum::Version), + } + } + pub fn set_service(&mut self, service: Option) { + self.service.value = service; + } + pub fn set_version(&mut self, version: Option) { + self.version.value = version; + } + pub fn set_env(&mut self, env: Option) { + self.env.value = env; + } + pub fn service(&self) -> Option { + self.service.value.clone() + } + pub fn compute_attribute_size(&self) -> u32 { + self.service.len() + self.env.len() + self.version.len() + } +} + +pub struct UnifiedTagField { + pub value: Option, + pub kind: UnifiedTagEnum, +} + +impl UnifiedTagField { + pub fn new(kind: UnifiedTagEnum) -> Self { + UnifiedTagField { + value: kind.find_unified_tag_value(), + kind, + } + } + pub fn len(&self) -> u32 { + if self.value.is_some() { + return 1; + } + 0 + } + pub fn get_tag_name(&self) -> &'static str { + self.kind.get_tag_name() + } +} + +pub enum UnifiedTagEnum { + Service, + Version, + Env, +} + +impl UnifiedTagEnum { + fn get_env_variable_name(&self) -> &'static str { + match self { + UnifiedTagEnum::Service => "DD_SERVICE", + UnifiedTagEnum::Version => "DD_VERSION", + UnifiedTagEnum::Env => "DD_ENV", + } + } + fn get_tag_name(&self) -> &'static str { + match self { + UnifiedTagEnum::Service => "service", + UnifiedTagEnum::Version => "version", + UnifiedTagEnum::Env => "env", + } + } + fn find_unified_tag_value(&self) -> Option { + let env_name_to_check = self.get_env_variable_name(); + match std::env::var(env_name_to_check) { + Ok(tag_value) => Some(tag_value.to_lowercase()), + _ => None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_service() { + std::env::set_var("DD_SERVICE", "test-SERVICE"); + let mut unified_tags = UnifiedTags::new(); + assert_eq!("test-service", unified_tags.service.value.clone().unwrap()); + unified_tags.set_service(Some(String::from("new_service"))); + assert_eq!("new_service", unified_tags.service().unwrap()); + std::env::remove_var("DD_SERVICE"); + } + + #[test] + fn test_env() { + std::env::set_var("DD_ENV", "test-env"); + let mut unified_tags = UnifiedTags::new(); + assert_eq!("test-env", unified_tags.env.value.clone().unwrap()); + unified_tags.set_env(Some(String::from("new_env"))); + assert_eq!("new_env", unified_tags.env.value.unwrap()); + std::env::remove_var("DD_ENV"); + } + + #[test] + fn test_version() { + std::env::set_var("DD_VERSION", "test-version-1.2.3"); + let mut unified_tags = UnifiedTags::new(); + assert_eq!( + "test-version-1.2.3", + unified_tags.version.value.clone().unwrap() + ); + unified_tags.set_version(Some(String::from("new_version"))); + assert_eq!("new_version", unified_tags.version.value.unwrap()); + std::env::remove_var("DD_VERSION"); + } +} diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v03.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v03.rs new file mode 100644 index 0000000000..8f7242ea36 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v03.rs @@ -0,0 +1,129 @@ +use std::time::SystemTime; + +use opentelemetry::trace::Status; +use opentelemetry_sdk::export::trace::SpanData; + +use crate::plugins::telemetry::tracing::datadog_exporter::exporter::model::SAMPLING_PRIORITY_KEY; +use crate::plugins::telemetry::tracing::datadog_exporter::Error; +use crate::plugins::telemetry::tracing::datadog_exporter::ModelConfig; + +pub(crate) fn encode( + model_config: &ModelConfig, + traces: Vec<&[SpanData]>, + get_service_name: S, + get_name: N, + get_resource: R, +) -> Result, Error> +where + for<'a> S: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> N: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> R: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, +{ + let mut encoded = Vec::new(); + rmp::encode::write_array_len(&mut encoded, traces.len() as u32)?; + + for trace in traces.into_iter() { + rmp::encode::write_array_len(&mut encoded, trace.len() as u32)?; + + for span in trace { + // Safe until the year 2262 when Datadog will need to change their API + let start = span + .start_time + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_nanos() as i64; + + let duration = span + .end_time + .duration_since(span.start_time) + .map(|x| x.as_nanos() as i64) + .unwrap_or(0); + + let mut span_type_found = false; + for (key, value) in &span.attributes { + if key.as_str() == "span.type" { + span_type_found = true; + rmp::encode::write_map_len(&mut encoded, 12)?; + rmp::encode::write_str(&mut encoded, "type")?; + rmp::encode::write_str(&mut encoded, value.as_str().as_ref())?; + break; + } + } + + if !span_type_found { + rmp::encode::write_map_len(&mut encoded, 11)?; + } + + // Datadog span name is OpenTelemetry component name - see module docs for more information + rmp::encode::write_str(&mut encoded, "service")?; + rmp::encode::write_str(&mut encoded, get_service_name(span, model_config))?; + + rmp::encode::write_str(&mut encoded, "name")?; + rmp::encode::write_str(&mut encoded, get_name(span, model_config))?; + + rmp::encode::write_str(&mut encoded, "resource")?; + rmp::encode::write_str(&mut encoded, get_resource(span, model_config))?; + + rmp::encode::write_str(&mut encoded, "trace_id")?; + rmp::encode::write_u64( + &mut encoded, + u128::from_be_bytes(span.span_context.trace_id().to_bytes()) as u64, + )?; + + rmp::encode::write_str(&mut encoded, "span_id")?; + rmp::encode::write_u64( + &mut encoded, + u64::from_be_bytes(span.span_context.span_id().to_bytes()), + )?; + + rmp::encode::write_str(&mut encoded, "parent_id")?; + rmp::encode::write_u64( + &mut encoded, + u64::from_be_bytes(span.parent_span_id.to_bytes()), + )?; + + rmp::encode::write_str(&mut encoded, "start")?; + rmp::encode::write_i64(&mut encoded, start)?; + + rmp::encode::write_str(&mut encoded, "duration")?; + rmp::encode::write_i64(&mut encoded, duration)?; + + rmp::encode::write_str(&mut encoded, "error")?; + rmp::encode::write_i32( + &mut encoded, + match span.status { + Status::Error { .. } => 1, + _ => 0, + }, + )?; + + rmp::encode::write_str(&mut encoded, "meta")?; + rmp::encode::write_map_len( + &mut encoded, + (span.attributes.len() + span.resource.len()) as u32, + )?; + for (key, value) in span.resource.iter() { + rmp::encode::write_str(&mut encoded, key.as_str())?; + rmp::encode::write_str(&mut encoded, value.as_str().as_ref())?; + } + for (key, value) in span.attributes.iter() { + rmp::encode::write_str(&mut encoded, key.as_str())?; + rmp::encode::write_str(&mut encoded, value.as_str().as_ref())?; + } + + rmp::encode::write_str(&mut encoded, "metrics")?; + rmp::encode::write_map_len(&mut encoded, 1)?; + rmp::encode::write_str(&mut encoded, SAMPLING_PRIORITY_KEY)?; + rmp::encode::write_f64( + &mut encoded, + if span.span_context.is_sampled() { + 1.0 + } else { + 0.0 + }, + )?; + } + } + + Ok(encoded) +} diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs new file mode 100644 index 0000000000..8cd3f8e66f --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs @@ -0,0 +1,259 @@ +use std::time::SystemTime; + +use opentelemetry::trace::Status; +use opentelemetry_sdk::export::trace::SpanData; + +use super::unified_tags::UnifiedTagField; +use super::unified_tags::UnifiedTags; +use crate::plugins::telemetry::tracing::datadog_exporter::exporter::intern::StringInterner; +use crate::plugins::telemetry::tracing::datadog_exporter::exporter::model::DD_MEASURED_KEY; +use crate::plugins::telemetry::tracing::datadog_exporter::exporter::model::SAMPLING_PRIORITY_KEY; +use crate::plugins::telemetry::tracing::datadog_exporter::DatadogTraceState; +use crate::plugins::telemetry::tracing::datadog_exporter::Error; +use crate::plugins::telemetry::tracing::datadog_exporter::ModelConfig; + +const SPAN_NUM_ELEMENTS: u32 = 12; +const METRICS_LEN: u32 = 2; +const GIT_META_TAGS_COUNT: u32 = if matches!( + ( + option_env!("DD_GIT_REPOSITORY_URL"), + option_env!("DD_GIT_COMMIT_SHA") + ), + (Some(_), Some(_)) +) { + 2 +} else { + 0 +}; + +// Protocol documentation sourced from https://github.com/DataDog/datadog-agent/blob/c076ea9a1ffbde4c76d35343dbc32aecbbf99cb9/pkg/trace/api/version.go +// +// The payload is an array containing exactly 12 elements: +// +// 1. An array of all unique strings present in the payload (a dictionary referred to by index). +// 2. An array of traces, where each trace is an array of spans. A span is encoded as an array having +// exactly 12 elements, representing all span properties, in this exact order: +// +// 0: Service (uint32) +// 1: Name (uint32) +// 2: Resource (uint32) +// 3: TraceID (uint64) +// 4: SpanID (uint64) +// 5: ParentID (uint64) +// 6: Start (int64) +// 7: Duration (int64) +// 8: Error (int32) +// 9: Meta (map[uint32]uint32) +// 10: Metrics (map[uint32]float64) +// 11: Type (uint32) +// +// Considerations: +// +// - The "uint32" typed values in "Service", "Name", "Resource", "Type", "Meta" and "Metrics" represent +// the index at which the corresponding string is found in the dictionary. If any of the values are the +// empty string, then the empty string must be added into the dictionary. +// +// - None of the elements can be nil. If any of them are unset, they should be given their "zero-value". Here +// is an example of a span with all unset values: +// +// 0: 0 // Service is "" (index 0 in dictionary) +// 1: 0 // Name is "" +// 2: 0 // Resource is "" +// 3: 0 // TraceID +// 4: 0 // SpanID +// 5: 0 // ParentID +// 6: 0 // Start +// 7: 0 // Duration +// 8: 0 // Error +// 9: map[uint32]uint32{} // Meta (empty map) +// 10: map[uint32]float64{} // Metrics (empty map) +// 11: 0 // Type is "" +// +// The dictionary in this case would be []string{""}, having only the empty string at index 0. +// +pub(crate) fn encode( + model_config: &ModelConfig, + traces: Vec<&[SpanData]>, + get_service_name: S, + get_name: N, + get_resource: R, + unified_tags: &UnifiedTags, +) -> Result, Error> +where + for<'a> S: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> N: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> R: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, +{ + let mut interner = StringInterner::new(); + let mut encoded_traces = encode_traces( + &mut interner, + model_config, + get_service_name, + get_name, + get_resource, + &traces, + unified_tags, + )?; + + let mut payload = Vec::with_capacity(traces.len() * 512); + rmp::encode::write_array_len(&mut payload, 2)?; + + interner.write_dictionary(&mut payload)?; + + payload.append(&mut encoded_traces); + + Ok(payload) +} + +fn write_unified_tags<'a>( + encoded: &mut Vec, + interner: &mut StringInterner<'a>, + unified_tags: &'a UnifiedTags, +) -> Result<(), Error> { + write_unified_tag(encoded, interner, &unified_tags.service)?; + write_unified_tag(encoded, interner, &unified_tags.env)?; + write_unified_tag(encoded, interner, &unified_tags.version)?; + Ok(()) +} + +fn write_unified_tag<'a>( + encoded: &mut Vec, + interner: &mut StringInterner<'a>, + tag: &'a UnifiedTagField, +) -> Result<(), Error> { + if let Some(tag_value) = &tag.value { + rmp::encode::write_u32(encoded, interner.intern(tag.get_tag_name()))?; + rmp::encode::write_u32(encoded, interner.intern(tag_value.as_str().as_ref()))?; + } + Ok(()) +} + +fn get_sampling_priority(_span: &SpanData) -> f64 { + 1.0 +} + +fn get_measuring(span: &SpanData) -> f64 { + if span.span_context.trace_state().measuring_enabled() { + 1.0 + } else { + 0.0 + } +} + +fn encode_traces<'interner, S, N, R>( + interner: &mut StringInterner<'interner>, + model_config: &'interner ModelConfig, + get_service_name: S, + get_name: N, + get_resource: R, + traces: &'interner [&[SpanData]], + unified_tags: &'interner UnifiedTags, +) -> Result, Error> +where + for<'a> S: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> N: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> R: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, +{ + let mut encoded = Vec::new(); + rmp::encode::write_array_len(&mut encoded, traces.len() as u32)?; + + for trace in traces.iter() { + rmp::encode::write_array_len(&mut encoded, trace.len() as u32)?; + + for span in trace.iter() { + // Safe until the year 2262 when Datadog will need to change their API + let start = span + .start_time + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_nanos() as i64; + + let duration = span + .end_time + .duration_since(span.start_time) + .map(|x| x.as_nanos() as i64) + .unwrap_or(0); + + let mut span_type = interner.intern(""); + for (key, value) in &span.attributes { + if key.as_str() == "span.type" { + span_type = interner.intern_value(value); + break; + } + } + + // Datadog span name is OpenTelemetry component name - see module docs for more information + rmp::encode::write_array_len(&mut encoded, SPAN_NUM_ELEMENTS)?; + rmp::encode::write_u32( + &mut encoded, + interner.intern(get_service_name(span, model_config)), + )?; + rmp::encode::write_u32(&mut encoded, interner.intern(get_name(span, model_config)))?; + rmp::encode::write_u32( + &mut encoded, + interner.intern(get_resource(span, model_config)), + )?; + rmp::encode::write_u64( + &mut encoded, + u128::from_be_bytes(span.span_context.trace_id().to_bytes()) as u64, + )?; + rmp::encode::write_u64( + &mut encoded, + u64::from_be_bytes(span.span_context.span_id().to_bytes()), + )?; + rmp::encode::write_u64( + &mut encoded, + u64::from_be_bytes(span.parent_span_id.to_bytes()), + )?; + rmp::encode::write_i64(&mut encoded, start)?; + rmp::encode::write_i64(&mut encoded, duration)?; + rmp::encode::write_i32( + &mut encoded, + match span.status { + Status::Error { .. } => 1, + _ => 0, + }, + )?; + + rmp::encode::write_map_len( + &mut encoded, + (span.attributes.len() + span.resource.len()) as u32 + + unified_tags.compute_attribute_size() + + GIT_META_TAGS_COUNT, + )?; + for (key, value) in span.resource.iter() { + rmp::encode::write_u32(&mut encoded, interner.intern(key.as_str()))?; + rmp::encode::write_u32(&mut encoded, interner.intern_value(value))?; + } + + write_unified_tags(&mut encoded, interner, unified_tags)?; + + for (key, value) in span.attributes.iter() { + rmp::encode::write_u32(&mut encoded, interner.intern(key.as_str()))?; + rmp::encode::write_u32(&mut encoded, interner.intern_value(value))?; + } + + if let (Some(repository_url), Some(commit_sha)) = ( + option_env!("DD_GIT_REPOSITORY_URL"), + option_env!("DD_GIT_COMMIT_SHA"), + ) { + rmp::encode::write_u32(&mut encoded, interner.intern("git.repository_url"))?; + rmp::encode::write_u32(&mut encoded, interner.intern(repository_url))?; + rmp::encode::write_u32(&mut encoded, interner.intern("git.commit.sha"))?; + rmp::encode::write_u32(&mut encoded, interner.intern(commit_sha))?; + } + + rmp::encode::write_map_len(&mut encoded, METRICS_LEN)?; + rmp::encode::write_u32(&mut encoded, interner.intern(SAMPLING_PRIORITY_KEY))?; + let sampling_priority = get_sampling_priority(span); + rmp::encode::write_f64(&mut encoded, sampling_priority)?; + + rmp::encode::write_u32(&mut encoded, interner.intern(DD_MEASURED_KEY))?; + let measuring = get_measuring(span); + rmp::encode::write_f64(&mut encoded, measuring)?; + rmp::encode::write_u32(&mut encoded, span_type)?; + } + } + + Ok(encoded) +} diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs new file mode 100644 index 0000000000..d632eb5872 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs @@ -0,0 +1,505 @@ +//! # OpenTelemetry Datadog Exporter +//! +//! An OpenTelemetry datadog exporter implementation +//! +//! See the [Datadog Docs](https://docs.datadoghq.com/agent/) for information on how to run the datadog-agent +//! +//! ## Quirks +//! +//! There are currently some incompatibilities between Datadog and OpenTelemetry, and this manifests +//! as minor quirks to this exporter. +//! +//! Firstly Datadog uses operation_name to describe what OpenTracing would call a component. +//! Or to put it another way, in OpenTracing the operation / span name's are relatively +//! granular and might be used to identify a specific endpoint. In datadog, however, they +//! are less granular - it is expected in Datadog that a service will have single +//! primary span name that is the root of all traces within that service, with an additional piece of +//! metadata called resource_name providing granularity. See [here](https://docs.datadoghq.com/tracing/guide/configuring-primary-operation/) +//! +//! The Datadog Golang API takes the approach of using a `resource.name` OpenTelemetry attribute to set the +//! resource_name. See [here](https://github.com/DataDog/dd-trace-go/blob/ecb0b805ef25b00888a2fb62d465a5aa95e7301e/ddtrace/opentracer/tracer.go#L10) +//! +//! Unfortunately, this breaks compatibility with other OpenTelemetry exporters which expect +//! a more granular operation name - as per the OpenTracing specification. +//! +//! This exporter therefore takes a different approach of naming the span with the name of the +//! tracing provider, and using the span name to set the resource_name. This should in most cases +//! lead to the behaviour that users expect. +//! +//! Datadog additionally has a span_type string that alters the rendering of the spans in the web UI. +//! This can be set as the `span.type` OpenTelemetry span attribute. +//! +//! For standard values see [here](https://github.com/DataDog/dd-trace-go/blob/ecb0b805ef25b00888a2fb62d465a5aa95e7301e/ddtrace/ext/app_types.go#L31). +//! +//! If the default mapping is not fit for your use case, you may change some of them by providing [`FieldMappingFn`]s in pipeline. +//! +//! ## Performance +//! +//! For optimal performance, a batch exporter is recommended as the simple exporter will export +//! each span synchronously on drop. You can enable the [`rt-tokio`], [`rt-tokio-current-thread`] +//! or [`rt-async-std`] features and specify a runtime on the pipeline to have a batch exporter +//! configured for you automatically. +//! +//! ```toml +//! [dependencies] +//! opentelemetry = { version = "*", features = ["rt-tokio"] } +//! opentelemetry-datadog = "*" +//! ``` +//! +//! ```no_run +//! # fn main() -> Result<(), opentelemetry::trace::TraceError> { +//! let tracer = opentelemetry_datadog::new_pipeline() +//! .install_batch(opentelemetry_sdk::runtime::Tokio)?; +//! # Ok(()) +//! # } +//! ``` +//! +//! [`rt-tokio`]: https://tokio.rs +//! [`rt-tokio-current-thread`]: https://tokio.rs +//! [`rt-async-std`]: https://async.rs +//! +//! ## Bring your own http client +//! +//! Users can choose appropriate http clients to align with their runtime. +//! +//! Based on the feature enabled. The default http client will be different. If user doesn't specific +//! features or enabled `reqwest-blocking-client` feature. The blocking reqwest http client will be used as +//! default client. If `reqwest-client` feature is enabled. The async reqwest http client will be used. If +//! `surf-client` feature is enabled. The surf http client will be used. +//! +//! Note that async http clients may need specific runtime otherwise it will panic. User should make +//! sure the http client is running in appropriate runime. +//! +//! Users can always use their own http clients by implementing `HttpClient` trait. +//! +//! ## Kitchen Sink Full Configuration +//! +//! Example showing how to override all configuration options. See the +//! [`DatadogPipelineBuilder`] docs for details of each option. +//! +//! [`DatadogPipelineBuilder`]: struct.DatadogPipelineBuilder.html +//! +//! ```no_run +//! use opentelemetry::{KeyValue, trace::Tracer}; +//! use opentelemetry_sdk::{trace::{self, RandomIdGenerator, Sampler}, Resource}; +//! use opentelemetry_sdk::export::trace::ExportResult; +//! use opentelemetry::global::shutdown_tracer_provider; +//! use opentelemetry_datadog::{new_pipeline, ApiVersion, Error}; +//! use opentelemetry_http::{HttpClient, HttpError}; +//! use async_trait::async_trait; +//! use bytes::Bytes; +//! use futures_util::io::AsyncReadExt as _; +//! use http::{Request, Response}; +//! use std::convert::TryInto as _; +//! +//! // `reqwest` and `surf` are supported through features, if you prefer an +//! // alternate http client you can add support by implementing `HttpClient` as +//! // shown here. +//! #[derive(Debug)] +//! struct IsahcClient(isahc::HttpClient); +//! +//! #[async_trait] +//! impl HttpClient for IsahcClient { +//! async fn send(&self, request: Request>) -> Result, HttpError> { +//! let mut response = self.0.send_async(request).await?; +//! let status = response.status(); +//! let mut bytes = Vec::with_capacity(response.body().len().unwrap_or(0).try_into()?); +//! isahc::AsyncReadResponseExt::copy_to(&mut response, &mut bytes).await?; +//! +//! Ok(Response::builder() +//! .status(response.status()) +//! .body(bytes.into())?) +//! } +//! } +//! +//! fn main() -> Result<(), opentelemetry::trace::TraceError> { +//! let tracer = new_pipeline() +//! .with_service_name("my_app") +//! .with_api_version(ApiVersion::Version05) +//! .with_agent_endpoint("http://localhost:8126") +//! .with_trace_config( +//! trace::config() +//! .with_sampler(Sampler::AlwaysOn) +//! .with_id_generator(RandomIdGenerator::default()) +//! ) +//! .install_batch(opentelemetry_sdk::runtime::Tokio)?; +//! +//! tracer.in_span("doing_work", |cx| { +//! // Traced app logic here... +//! }); +//! +//! shutdown_tracer_provider(); // sending remaining spans before exit +//! +//! Ok(()) +//! } +//! ``` + +mod exporter; + +#[allow(unused_imports)] +pub use exporter::new_pipeline; +#[allow(unused_imports)] +pub use exporter::ApiVersion; +#[allow(unused_imports)] +pub use exporter::DatadogExporter; +#[allow(unused_imports)] +pub use exporter::DatadogPipelineBuilder; +#[allow(unused_imports)] +pub use exporter::Error; +#[allow(unused_imports)] +pub use exporter::FieldMappingFn; +#[allow(unused_imports)] +pub use exporter::ModelConfig; +#[allow(unused_imports)] +pub use propagator::DatadogPropagator; +#[allow(unused_imports)] +pub use propagator::DatadogTraceState; +#[allow(unused_imports)] +pub use propagator::DatadogTraceStateBuilder; + +pub(crate) mod propagator { + use once_cell::sync::Lazy; + use opentelemetry::propagation::text_map_propagator::FieldIter; + use opentelemetry::propagation::Extractor; + use opentelemetry::propagation::Injector; + use opentelemetry::propagation::TextMapPropagator; + use opentelemetry::trace::SpanContext; + use opentelemetry::trace::SpanId; + use opentelemetry::trace::TraceContextExt; + use opentelemetry::trace::TraceFlags; + use opentelemetry::trace::TraceId; + use opentelemetry::trace::TraceState; + use opentelemetry::Context; + + const DATADOG_TRACE_ID_HEADER: &str = "x-datadog-trace-id"; + const DATADOG_PARENT_ID_HEADER: &str = "x-datadog-parent-id"; + const DATADOG_SAMPLING_PRIORITY_HEADER: &str = "x-datadog-sampling-priority"; + + const TRACE_FLAG_DEFERRED: TraceFlags = TraceFlags::new(0x02); + pub(crate) const TRACE_STATE_PRIORITY_SAMPLING: &str = "psr"; + pub(crate) const TRACE_STATE_MEASURE: &str = "m"; + pub(crate) const TRACE_STATE_TRUE_VALUE: &str = "1"; + pub(crate) const TRACE_STATE_FALSE_VALUE: &str = "0"; + + static DATADOG_HEADER_FIELDS: Lazy<[String; 3]> = Lazy::new(|| { + [ + DATADOG_TRACE_ID_HEADER.to_string(), + DATADOG_PARENT_ID_HEADER.to_string(), + DATADOG_SAMPLING_PRIORITY_HEADER.to_string(), + ] + }); + + #[derive(Default)] + pub struct DatadogTraceStateBuilder { + priority_sampling: bool, + measuring: bool, + } + + fn boolean_to_trace_state_flag(value: bool) -> &'static str { + if value { + TRACE_STATE_TRUE_VALUE + } else { + TRACE_STATE_FALSE_VALUE + } + } + + fn trace_flag_to_boolean(value: &str) -> bool { + value == TRACE_STATE_TRUE_VALUE + } + + #[allow(clippy::needless_update)] + impl DatadogTraceStateBuilder { + pub fn with_priority_sampling(self, enabled: bool) -> Self { + Self { + priority_sampling: enabled, + ..self + } + } + + pub fn with_measuring(self, enabled: bool) -> Self { + Self { + measuring: enabled, + ..self + } + } + + pub fn build(self) -> TraceState { + let values = [ + ( + TRACE_STATE_MEASURE, + boolean_to_trace_state_flag(self.measuring), + ), + ( + TRACE_STATE_PRIORITY_SAMPLING, + boolean_to_trace_state_flag(self.priority_sampling), + ), + ]; + + TraceState::from_key_value(values).unwrap_or_default() + } + } + + pub trait DatadogTraceState { + fn with_measuring(&self, enabled: bool) -> TraceState; + + fn measuring_enabled(&self) -> bool; + } + + impl DatadogTraceState for TraceState { + fn with_measuring(&self, enabled: bool) -> TraceState { + self.insert(TRACE_STATE_MEASURE, boolean_to_trace_state_flag(enabled)) + .unwrap_or_else(|_err| self.clone()) + } + + fn measuring_enabled(&self) -> bool { + self.get(TRACE_STATE_MEASURE) + .map(trace_flag_to_boolean) + .unwrap_or_default() + } + } + + enum SamplingPriority { + UserReject = -1, + AutoReject = 0, + AutoKeep = 1, + UserKeep = 2, + } + + #[derive(Debug)] + enum ExtractError { + TraceId, + SpanId, + SamplingPriority, + } + + /// Extracts and injects `SpanContext`s into `Extractor`s or `Injector`s using Datadog's header format. + /// + /// The Datadog header format does not have an explicit spec, but can be divined from the client libraries, + /// such as [dd-trace-go] + /// + /// ## Example + /// + /// ``` + /// use opentelemetry::global; + /// use opentelemetry_datadog::DatadogPropagator; + /// + /// global::set_text_map_propagator(DatadogPropagator::default()); + /// ``` + /// + /// [dd-trace-go]: https://github.com/DataDog/dd-trace-go/blob/v1.28.0/ddtrace/tracer/textmap.go#L293 + #[derive(Clone, Debug, Default)] + pub struct DatadogPropagator { + _private: (), + } + + fn create_trace_state_and_flags(trace_flags: TraceFlags) -> (TraceState, TraceFlags) { + (TraceState::default(), trace_flags) + } + + impl DatadogPropagator { + /// Creates a new `DatadogPropagator`. + pub fn new() -> Self { + DatadogPropagator::default() + } + + fn extract_trace_id(&self, trace_id: &str) -> Result { + trace_id + .parse::() + .map(|id| TraceId::from(id as u128)) + .map_err(|_| ExtractError::TraceId) + } + + fn extract_span_id(&self, span_id: &str) -> Result { + span_id + .parse::() + .map(SpanId::from) + .map_err(|_| ExtractError::SpanId) + } + + fn extract_sampling_priority( + &self, + sampling_priority: &str, + ) -> Result { + let i = sampling_priority + .parse::() + .map_err(|_| ExtractError::SamplingPriority)?; + + match i { + -1 => Ok(SamplingPriority::UserReject), + 0 => Ok(SamplingPriority::AutoReject), + 1 => Ok(SamplingPriority::AutoKeep), + 2 => Ok(SamplingPriority::UserKeep), + _ => Err(ExtractError::SamplingPriority), + } + } + + fn extract_span_context( + &self, + extractor: &dyn Extractor, + ) -> Result { + let trace_id = + self.extract_trace_id(extractor.get(DATADOG_TRACE_ID_HEADER).unwrap_or(""))?; + // If we have a trace_id but can't get the parent span, we default it to invalid instead of completely erroring + // out so that the rest of the spans aren't completely lost + let span_id = self + .extract_span_id(extractor.get(DATADOG_PARENT_ID_HEADER).unwrap_or("")) + .unwrap_or(SpanId::INVALID); + let sampling_priority = self.extract_sampling_priority( + extractor + .get(DATADOG_SAMPLING_PRIORITY_HEADER) + .unwrap_or(""), + ); + let sampled = match sampling_priority { + Ok(SamplingPriority::UserReject) | Ok(SamplingPriority::AutoReject) => { + TraceFlags::default() + } + Ok(SamplingPriority::UserKeep) | Ok(SamplingPriority::AutoKeep) => { + TraceFlags::SAMPLED + } + // Treat the sampling as DEFERRED instead of erroring on extracting the span context + Err(_) => TRACE_FLAG_DEFERRED, + }; + + let (trace_state, trace_flags) = create_trace_state_and_flags(sampled); + + Ok(SpanContext::new( + trace_id, + span_id, + trace_flags, + true, + trace_state, + )) + } + } + + fn get_sampling_priority(span_context: &SpanContext) -> SamplingPriority { + if span_context.is_sampled() { + SamplingPriority::AutoKeep + } else { + SamplingPriority::AutoReject + } + } + + impl TextMapPropagator for DatadogPropagator { + fn inject_context(&self, cx: &Context, injector: &mut dyn Injector) { + let span = cx.span(); + let span_context = span.span_context(); + if span_context.is_valid() { + injector.set( + DATADOG_TRACE_ID_HEADER, + (u128::from_be_bytes(span_context.trace_id().to_bytes()) as u64).to_string(), + ); + injector.set( + DATADOG_PARENT_ID_HEADER, + u64::from_be_bytes(span_context.span_id().to_bytes()).to_string(), + ); + + if span_context.trace_flags() & TRACE_FLAG_DEFERRED != TRACE_FLAG_DEFERRED { + let sampling_priority = get_sampling_priority(span_context); + + injector.set( + DATADOG_SAMPLING_PRIORITY_HEADER, + (sampling_priority as i32).to_string(), + ); + } + } + } + + fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { + self.extract_span_context(extractor) + .map(|sc| cx.with_remote_span_context(sc)) + .unwrap_or_else(|_| cx.clone()) + } + + fn fields(&self) -> FieldIter<'_> { + FieldIter::new(DATADOG_HEADER_FIELDS.as_ref()) + } + } + + #[cfg(test)] + mod tests { + use std::collections::HashMap; + + use opentelemetry::trace::TraceState; + use opentelemetry_sdk::testing::trace::TestSpan; + + use super::*; + + #[rustfmt::skip] + fn extract_test_data() -> Vec<(Vec<(&'static str, &'static str)>, SpanContext)> { + vec![ + (vec![], SpanContext::empty_context()), + (vec![(DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::empty_context()), + (vec![(DATADOG_TRACE_ID_HEADER, "garbage")], SpanContext::empty_context()), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "garbage")], SpanContext::new(TraceId::from_u128(1234), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), + ] + } + + #[rustfmt::skip] + fn inject_test_data() -> Vec<(Vec<(&'static str, &'static str)>, SpanContext)> { + vec![ + (vec![], SpanContext::empty_context()), + (vec![], SpanContext::new(TraceId::INVALID, SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), + ] + } + + #[test] + fn test_extract() { + for (header_list, expected) in extract_test_data() { + let map: HashMap = header_list + .into_iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect(); + + let propagator = DatadogPropagator::default(); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &expected); + } + } + + #[test] + fn test_extract_empty() { + let map: HashMap = HashMap::new(); + let propagator = DatadogPropagator::default(); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &SpanContext::empty_context()) + } + + #[test] + fn test_extract_with_empty_remote_context() { + let map: HashMap = HashMap::new(); + let propagator = DatadogPropagator::default(); + let context = propagator.extract_with_context(&Context::new(), &map); + assert!(!context.has_active_span()) + } + + #[test] + fn test_inject() { + let propagator = DatadogPropagator::default(); + for (header_values, span_context) in inject_test_data() { + let mut injector: HashMap = HashMap::new(); + propagator.inject_context( + &Context::current_with_span(TestSpan(span_context)), + &mut injector, + ); + + if !header_values.is_empty() { + for (k, v) in header_values.into_iter() { + let injected_value: Option<&String> = injector.get(k); + assert_eq!(injected_value, Some(&v.to_string())); + injector.remove(k); + } + } + assert!(injector.is_empty()); + } + } + } +} diff --git a/apollo-router/src/plugins/telemetry/tracing/mod.rs b/apollo-router/src/plugins/telemetry/tracing/mod.rs index 99ea905472..0172f3e094 100644 --- a/apollo-router/src/plugins/telemetry/tracing/mod.rs +++ b/apollo-router/src/plugins/telemetry/tracing/mod.rs @@ -22,6 +22,8 @@ use crate::plugins::telemetry::config::TracingCommon; pub(crate) mod apollo; pub(crate) mod apollo_telemetry; pub(crate) mod datadog; +#[allow(unreachable_pub, dead_code)] +pub(crate) mod datadog_exporter; pub(crate) mod jaeger; pub(crate) mod otlp; pub(crate) mod reload; diff --git a/apollo-router/src/plugins/traffic_shaping/deduplication.rs b/apollo-router/src/plugins/traffic_shaping/deduplication.rs index 30c6293eb5..639d0d12b9 100644 --- a/apollo-router/src/plugins/traffic_shaping/deduplication.rs +++ b/apollo-router/src/plugins/traffic_shaping/deduplication.rs @@ -48,6 +48,7 @@ impl Clone for CloneSubgraphResponse { Self(SubgraphResponse { response: http_ext::Response::from(&self.0.response).inner, context: self.0.context.clone(), + subgraph_name: self.0.subgraph_name.clone(), }) } } @@ -103,6 +104,7 @@ where SubgraphResponse::new_from_response( response.0.response, request.context, + request.subgraph_name.unwrap_or_default(), ) }) .map_err(|e| e.into()) @@ -140,6 +142,9 @@ where }; // Let our waiters know + + // Clippy is wrong, the suggestion adds a useless clone of the error + #[allow(clippy::useless_asref)] let broadcast_value = res .as_ref() .map(|response| response.clone()) @@ -153,7 +158,11 @@ where .expect("can only fail if the task is aborted or if the internal code panics, neither is possible here; qed"); return res.map(|response| { - SubgraphResponse::new_from_response(response.0.response, context) + SubgraphResponse::new_from_response( + response.0.response, + context, + response.0.subgraph_name.unwrap_or_default(), + ) }); } } diff --git a/apollo-router/src/protocols/websocket.rs b/apollo-router/src/protocols/websocket.rs index c4ed74039b..bd556232ac 100644 --- a/apollo-router/src/protocols/websocket.rs +++ b/apollo-router/src/protocols/websocket.rs @@ -686,11 +686,6 @@ where } } -#[derive(Deserialize, Serialize)] -struct WithId { - id: String, -} - #[cfg(test)] mod tests { use std::convert::Infallible; diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index 85ee2d15b7..cdd1d710ef 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -330,79 +330,6 @@ impl BridgeQueryPlanner { let schema = Schema::parse(&schema, &configuration)?; let planner = PlannerMode::new(&schema, &configuration, old_planner).await?; - let api_schema_string = match configuration.experimental_api_schema_generation_mode { - crate::configuration::ApiSchemaMode::Legacy => { - let api_schema = planner - .js_for_api_schema_and_introspection_and_operation_signature() - .api_schema() - .await?; - api_schema.schema - } - crate::configuration::ApiSchemaMode::New => schema.create_api_schema(&configuration)?, - - crate::configuration::ApiSchemaMode::Both => { - let js_result = planner - .js_for_api_schema_and_introspection_and_operation_signature() - .api_schema() - .await - .map(|api_schema| api_schema.schema); - let rust_result = schema.create_api_schema(&configuration); - - let is_matched; - match (&js_result, &rust_result) { - (Err(js_error), Ok(_)) => { - tracing::warn!("JS API schema error: {}", js_error); - is_matched = false; - } - (Ok(_), Err(rs_error)) => { - tracing::warn!("Rust API schema error: {}", rs_error); - is_matched = false; - } - (Ok(left), Ok(right)) => { - // To compare results, we re-parse, standardize, and print with apollo-rs, - // so the formatting is identical. - let (left, right) = if let (Ok(parsed_left), Ok(parsed_right)) = ( - apollo_compiler::Schema::parse(left, "js.graphql"), - apollo_compiler::Schema::parse(right, "rust.graphql"), - ) { - ( - standardize_schema(parsed_left).to_string(), - standardize_schema(parsed_right).to_string(), - ) - } else { - (left.clone(), right.clone()) - }; - is_matched = left == right; - if !is_matched { - let differences = diff::lines(&left, &right); - tracing::debug!( - "different API schema between apollo-federation and router-bridge:\n{}", - render_diff(&differences), - ); - } - } - (Err(_), Err(_)) => { - is_matched = true; - } - } - - u64_counter!( - "apollo.router.lifecycle.api_schema", - "Comparing JS v.s. Rust API schema generation", - 1, - "generation.is_matched" = is_matched, - "generation.js_error" = js_result.is_err(), - "generation.rust_error" = rust_result.is_err() - ); - - js_result? - } - }; - - let api_schema = Schema::parse_compiler_schema(&api_schema_string)?; - - let schema = Arc::new(schema.with_api_schema(api_schema)); - let subgraph_schemas = Arc::new(planner.subgraphs().await?); let introspection = if configuration.supergraph.introspection { @@ -426,7 +353,7 @@ impl BridgeQueryPlanner { Ok(Self { planner, - schema, + schema: Arc::new(schema), subgraph_schemas, introspection, enable_authorization_directives, @@ -967,136 +894,6 @@ pub(super) struct QueryPlan { pub(super) node: Option>, } -fn standardize_schema(mut schema: apollo_compiler::Schema) -> apollo_compiler::Schema { - use apollo_compiler::schema::ExtendedType; - - fn standardize_value_for_comparison(value: &mut apollo_compiler::ast::Value) { - use apollo_compiler::ast::Value; - match value { - Value::Object(object) => { - for (_name, value) in object.iter_mut() { - standardize_value_for_comparison(value.make_mut()); - } - object.sort_by_key(|(name, _value)| name.clone()); - } - Value::List(list) => { - for value in list { - standardize_value_for_comparison(value.make_mut()); - } - } - _ => {} - } - } - - fn standardize_directive_for_comparison(directive: &mut apollo_compiler::ast::Directive) { - for arg in &mut directive.arguments { - standardize_value_for_comparison(arg.make_mut().value.make_mut()); - } - directive - .arguments - .sort_by_cached_key(|arg| arg.name.to_ascii_lowercase()); - } - - for ty in schema.types.values_mut() { - match ty { - ExtendedType::Object(object) => { - let object = object.make_mut(); - object.fields.sort_keys(); - for field in object.fields.values_mut() { - let field = field.make_mut(); - for arg in &mut field.arguments { - let arg = arg.make_mut(); - if let Some(value) = &mut arg.default_value { - standardize_value_for_comparison(value.make_mut()); - } - for directive in &mut arg.directives { - standardize_directive_for_comparison(directive.make_mut()); - } - } - field - .arguments - .sort_by_cached_key(|arg| arg.name.to_ascii_lowercase()); - for directive in &mut field.directives { - standardize_directive_for_comparison(directive.make_mut()); - } - } - for directive in &mut object.directives.0 { - standardize_directive_for_comparison(directive.make_mut()); - } - } - ExtendedType::Interface(interface) => { - let interface = interface.make_mut(); - interface.fields.sort_keys(); - for field in interface.fields.values_mut() { - let field = field.make_mut(); - for arg in &mut field.arguments { - let arg = arg.make_mut(); - if let Some(value) = &mut arg.default_value { - standardize_value_for_comparison(value.make_mut()); - } - for directive in &mut arg.directives { - standardize_directive_for_comparison(directive.make_mut()); - } - } - field - .arguments - .sort_by_cached_key(|arg| arg.name.to_ascii_lowercase()); - for directive in &mut field.directives { - standardize_directive_for_comparison(directive.make_mut()); - } - } - for directive in &mut interface.directives.0 { - standardize_directive_for_comparison(directive.make_mut()); - } - } - ExtendedType::InputObject(input_object) => { - let input_object = input_object.make_mut(); - input_object.fields.sort_keys(); - for field in input_object.fields.values_mut() { - let field = field.make_mut(); - if let Some(value) = &mut field.default_value { - standardize_value_for_comparison(value.make_mut()); - } - for directive in &mut field.directives { - standardize_directive_for_comparison(directive.make_mut()); - } - } - for directive in &mut input_object.directives { - standardize_directive_for_comparison(directive.make_mut()); - } - } - ExtendedType::Enum(enum_) => { - let enum_ = enum_.make_mut(); - enum_.values.sort_keys(); - for directive in &mut enum_.directives { - standardize_directive_for_comparison(directive.make_mut()); - } - } - ExtendedType::Union(union_) => { - let union_ = union_.make_mut(); - for directive in &mut union_.directives { - standardize_directive_for_comparison(directive.make_mut()); - } - } - ExtendedType::Scalar(scalar) => { - let scalar = scalar.make_mut(); - for directive in &mut scalar.directives { - standardize_directive_for_comparison(directive.make_mut()); - } - } - } - } - - schema - .directive_definitions - .sort_by_cached_key(|key, _value| key.to_ascii_lowercase()); - schema - .types - .sort_by_cached_key(|key, _value| key.to_ascii_lowercase()); - - schema -} - pub(crate) fn render_diff(differences: &[diff::Result<&str>]) -> String { let mut output = String::new(); for diff_line in differences { @@ -1222,7 +1019,7 @@ mod tests { #[test(tokio::test)] async fn empty_query_plan_should_be_a_planner_error() { - let schema = Schema::parse_test(EXAMPLE_SCHEMA, &Default::default()).unwrap(); + let schema = Schema::parse(EXAMPLE_SCHEMA, &Default::default()).unwrap(); let query = include_str!("testdata/unknown_introspection_query.graphql"); let planner = BridgeQueryPlanner::new(EXAMPLE_SCHEMA.to_string(), Default::default(), None) diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index ff28cbc4c5..db4923f17c 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -269,7 +269,10 @@ where } in all_cache_keys { let context = Context::new(); - let doc = match query_analysis.parse_document(&query, operation.as_deref()) { + let doc = match query_analysis + .parse_document(&query, operation.as_deref()) + .await + { Ok(doc) => doc, Err(_) => continue, }; @@ -310,7 +313,10 @@ where }) .await; if entry.is_first() { - let doc = match query_analysis.parse_document(&query, operation.as_deref()) { + let doc = match query_analysis + .parse_document(&query, operation.as_deref()) + .await + { Ok(doc) => doc, Err(error) => { let e = Arc::new(QueryPlannerError::SpecError(error)); @@ -400,7 +406,7 @@ where let qp = self.clone(); Box::pin(async move { let context = request.context.clone(); - qp.plan(request).await.map(|response| { + qp.plan(request).await.inspect(|response| { if let Some(usage_reporting) = context .extensions() .with_lock(|lock| lock.get::>().cloned()) @@ -414,7 +420,6 @@ where usage_reporting.stats_report_key.clone(), ); } - response }) }) } @@ -639,12 +644,11 @@ impl std::fmt::Display for CachingQueryKey { let operation = hex::encode(hasher.finalize()); let mut hasher = Sha256::new(); - hasher.update(&serde_json::to_vec(&self.metadata).expect("serialization should not fail")); - hasher.update( - &serde_json::to_vec(&self.plan_options).expect("serialization should not fail"), - ); + hasher.update(serde_json::to_vec(&self.metadata).expect("serialization should not fail")); + hasher + .update(serde_json::to_vec(&self.plan_options).expect("serialization should not fail")); hasher - .update(&serde_json::to_vec(&self.config_mode).expect("serialization should not fail")); + .update(serde_json::to_vec(&self.config_mode).expect("serialization should not fail")); hasher.update(&*self.schema_id); hasher.update([self.introspection as u8]); let metadata = hex::encode(hasher.finalize()); @@ -749,14 +753,14 @@ mod tests { let configuration = Arc::new(crate::Configuration::default()); let schema = include_str!("testdata/schema.graphql"); - let schema = Arc::new(Schema::parse_test(schema, &configuration).unwrap()); + let schema = Arc::new(Schema::parse(schema, &configuration).unwrap()); let mut planner = CachingQueryPlanner::new( delegate, schema.clone(), Default::default(), &configuration, - IndexMap::new(), + IndexMap::default(), ) .await .unwrap(); @@ -847,7 +851,7 @@ mod tests { let configuration = Configuration::default(); let schema = - Schema::parse_test(include_str!("testdata/schema.graphql"), &configuration).unwrap(); + Schema::parse(include_str!("testdata/schema.graphql"), &configuration).unwrap(); let doc = Query::parse_document( "query Me { me { username } }", @@ -862,7 +866,7 @@ mod tests { Arc::new(schema), Default::default(), &configuration, - IndexMap::new(), + IndexMap::default(), ) .await .unwrap(); @@ -933,14 +937,14 @@ mod tests { ..Default::default() }); let schema = include_str!("testdata/schema.graphql"); - let schema = Arc::new(Schema::parse_test(schema, &configuration).unwrap()); + let schema = Arc::new(Schema::parse(schema, &configuration).unwrap()); let mut planner = CachingQueryPlanner::new( delegate, schema.clone(), Default::default(), &configuration, - IndexMap::new(), + IndexMap::default(), ) .await .unwrap(); diff --git a/apollo-router/src/query_planner/dual_query_planner.rs b/apollo-router/src/query_planner/dual_query_planner.rs index db860cd43b..2360b17703 100644 --- a/apollo-router/src/query_planner/dual_query_planner.rs +++ b/apollo-router/src/query_planner/dual_query_planner.rs @@ -14,7 +14,6 @@ use apollo_compiler::ExecutableDocument; use apollo_compiler::Name; use apollo_federation::query_plan::query_planner::QueryPlanner; use apollo_federation::query_plan::QueryPlan; -use apollo_federation::subgraph::spec::ENTITIES_QUERY; use super::fetch::FetchNode; use super::fetch::SubgraphOperation; @@ -109,7 +108,7 @@ impl BothModeComparisonJob { }); let name = self.operation_name.as_deref(); - let operation_desc = if let Ok(operation) = self.document.get_operation(name) { + let operation_desc = if let Ok(operation) = self.document.operations.get(name) { if let Some(parsed_name) = &operation.name { format!(" in {} `{parsed_name}`", operation.operation_type) } else { @@ -279,18 +278,6 @@ fn vec_matches_sorted_by( vec_matches(&this_sorted, &other_sorted, T::eq) } -fn vec_matches_sorted_by_key( - this: &[T], - other: &[T], - key_fn: impl Fn(&T) -> u64, -) -> bool { - let mut this_sorted = this.to_owned(); - let mut other_sorted = other.to_owned(); - this_sorted.sort_by_key(&key_fn); - other_sorted.sort_by_key(&key_fn); - vec_matches(&this_sorted, &other_sorted, T::eq) -} - // performs a set comparison, ignoring order fn vec_matches_as_set(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) -> bool) -> bool { // Set-inclusion test in both directions @@ -385,26 +372,47 @@ fn flatten_node_matches(this: &FlattenNode, other: &FlattenNode) -> bool { // AST comparison functions fn same_ast_document(x: &ast::Document, y: &ast::Document) -> bool { - x.definitions - .iter() - .zip(y.definitions.iter()) - .all(|(x_def, y_def)| same_ast_definition(x_def, y_def)) -} - -fn same_ast_definition(x: &ast::Definition, y: &ast::Definition) -> bool { - match (x, y) { - (ast::Definition::OperationDefinition(x), ast::Definition::OperationDefinition(y)) => { - same_ast_operation_definition(x, y) + fn split_definitions( + doc: &ast::Document, + ) -> ( + Vec<&ast::OperationDefinition>, + Vec<&ast::FragmentDefinition>, + Vec<&ast::Definition>, + ) { + let mut operations: Vec<&ast::OperationDefinition> = Vec::new(); + let mut fragments: Vec<&ast::FragmentDefinition> = Vec::new(); + let mut others: Vec<&ast::Definition> = Vec::new(); + for def in doc.definitions.iter() { + match def { + ast::Definition::OperationDefinition(op) => operations.push(op), + ast::Definition::FragmentDefinition(frag) => fragments.push(frag), + _ => others.push(def), + } } - (ast::Definition::FragmentDefinition(x), ast::Definition::FragmentDefinition(y)) => x == y, - _ => false, + fragments.sort_by_key(|frag| frag.name.clone()); + (operations, fragments, others) } -} -fn hash_value(x: &T) -> u64 { - let mut hasher = DefaultHasher::new(); - x.hash(&mut hasher); - hasher.finish() + let (x_ops, x_frags, x_others) = split_definitions(x); + let (y_ops, y_frags, y_others) = split_definitions(y); + + debug_assert!(x_others.is_empty(), "Unexpected definition types"); + debug_assert!(y_others.is_empty(), "Unexpected definition types"); + debug_assert!( + x_ops.len() == y_ops.len(), + "Different number of operation definitions" + ); + + x_ops.len() == y_ops.len() + && x_ops + .iter() + .zip(y_ops.iter()) + .all(|(x_op, y_op)| same_ast_operation_definition(x_op, y_op)) + && x_frags.len() == y_frags.len() + && x_frags + .iter() + .zip(y_frags.iter()) + .all(|(x_frag, y_frag)| same_ast_fragment_definition(x_frag, y_frag)) } fn same_ast_operation_definition( @@ -415,26 +423,107 @@ fn same_ast_operation_definition( x.operation_type == y.operation_type && vec_matches_sorted_by(&x.variables, &y.variables, |x, y| x.name.cmp(&y.name)) && x.directives == y.directives - && same_ast_top_level_selection_set(&x.selection_set, &y.selection_set) + && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set) } -fn same_ast_top_level_selection_set(x: &[ast::Selection], y: &[ast::Selection]) -> bool { - match (x.split_first(), y.split_first()) { - (Some((ast::Selection::Field(x0), [])), Some((ast::Selection::Field(y0), []))) - if x0.name == ENTITIES_QUERY && y0.name == ENTITIES_QUERY => - { - // Note: Entity-fetch query selection sets may be reordered. - same_ast_selection_set_sorted(&x0.selection_set, &y0.selection_set) - } - _ => x == y, +fn same_ast_fragment_definition(x: &ast::FragmentDefinition, y: &ast::FragmentDefinition) -> bool { + x.name == y.name + && x.type_condition == y.type_condition + && x.directives == y.directives + && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set) +} + +// Copied and modified from `apollo_federation::operation::SelectionKey` +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) enum SelectionKey { + Field { + /// The field alias (if specified) or field name in the resulting selection set. + response_name: Name, + directives: ast::DirectiveList, + }, + FragmentSpread { + /// The name of the fragment. + fragment_name: Name, + directives: ast::DirectiveList, + }, + InlineFragment { + /// The optional type condition of the fragment. + type_condition: Option, + directives: ast::DirectiveList, + }, +} + +fn get_selection_key(selection: &ast::Selection) -> SelectionKey { + match selection { + ast::Selection::Field(field) => SelectionKey::Field { + response_name: field.response_name().clone(), + directives: field.directives.clone(), + }, + ast::Selection::FragmentSpread(fragment) => SelectionKey::FragmentSpread { + fragment_name: fragment.fragment_name.clone(), + directives: fragment.directives.clone(), + }, + ast::Selection::InlineFragment(fragment) => SelectionKey::InlineFragment { + type_condition: fragment.type_condition.clone(), + directives: fragment.directives.clone(), + }, } } -// This comparison does not sort selection sets recursively. This is good enough to handle -// reordered `_entities` selection sets. -// TODO: Make this recursive. +use std::ops::Not; + +/// Get the sub-selections of a selection. +fn get_selection_set(selection: &ast::Selection) -> Option<&Vec> { + match selection { + ast::Selection::Field(field) => field + .selection_set + .is_empty() + .not() + .then(|| &field.selection_set), + ast::Selection::FragmentSpread(_) => None, + ast::Selection::InlineFragment(fragment) => Some(&fragment.selection_set), + } +} + +fn same_ast_selection(x: &ast::Selection, y: &ast::Selection) -> bool { + let x_key = get_selection_key(x); + let y_key = get_selection_key(y); + if x_key != y_key { + return false; + } + let x_selections = get_selection_set(x); + let y_selections = get_selection_set(y); + match (x_selections, y_selections) { + (Some(x), Some(y)) => same_ast_selection_set_sorted(x, y), + (None, None) => true, + _ => false, + } +} + +fn hash_value(x: &T) -> u64 { + let mut hasher = DefaultHasher::new(); + x.hash(&mut hasher); + hasher.finish() +} + +fn hash_selection_key(selection: &ast::Selection) -> u64 { + hash_value(&get_selection_key(selection)) +} + fn same_ast_selection_set_sorted(x: &[ast::Selection], y: &[ast::Selection]) -> bool { - vec_matches_sorted_by_key(x, y, hash_value) + fn sorted_by_selection_key(s: &[ast::Selection]) -> Vec<&ast::Selection> { + let mut sorted: Vec<&ast::Selection> = s.iter().collect(); + sorted.sort_by_key(|x| hash_selection_key(x)); + sorted + } + + if x.len() != y.len() { + return false; + } + sorted_by_selection_key(x) + .into_iter() + .zip(sorted_by_selection_key(y)) + .all(|(x, y)| same_ast_selection(x, y)) } #[cfg(test)] @@ -468,11 +557,18 @@ mod ast_comparison_tests { } #[test] - #[should_panic(expected = "assertion failed")] - // Reordered selection sets are not supported yet. fn test_top_level_selection_order() { - let op_x = r#"{ x { w } y }"#; - let op_y = r#"{ y x { w } }"#; + let op_x = r#"{ x { w z } y }"#; + let op_y = r#"{ y x { z w } }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y)); + } + + #[test] + fn test_fragment_definition_order() { + let op_x = r#"{ q { ...f1 ...f2 } } fragment f1 on T { x y } fragment f2 on T { w z }"#; + let op_y = r#"{ q { ...f1 ...f2 } } fragment f2 on T { w z } fragment f1 on T { x y }"#; let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); assert!(super::same_ast_document(&ast_x, &ast_y)); diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 1d40ea8be2..05b871e60e 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -294,7 +294,7 @@ impl Variables { })); let mut inverted_paths: Vec> = Vec::new(); - let mut values: IndexSet = IndexSet::new(); + let mut values: IndexSet = IndexSet::default(); data.select_values_and_paths(schema, current_dir, |path, value| { // first get contextual values that are required if let Some(context) = subgraph_context.as_mut() { diff --git a/apollo-router/src/query_planner/rewrites.rs b/apollo-router/src/query_planner/rewrites.rs index 6c941fc88d..49733e9ad1 100644 --- a/apollo-router/src/query_planner/rewrites.rs +++ b/apollo-router/src/query_planner/rewrites.rs @@ -165,7 +165,7 @@ mod tests { }); dr.maybe_apply( - &Schema::parse_test(SCHEMA, &Default::default()).unwrap(), + &Schema::parse(SCHEMA, &Default::default()).unwrap(), &mut data, ); @@ -203,7 +203,7 @@ mod tests { }); dr.maybe_apply( - &Schema::parse_test(SCHEMA, &Default::default()).unwrap(), + &Schema::parse(SCHEMA, &Default::default()).unwrap(), &mut data, ); diff --git a/apollo-router/src/query_planner/selection.rs b/apollo-router/src/query_planner/selection.rs index 6e40dc4a0e..b6dd46ffa4 100644 --- a/apollo-router/src/query_planner/selection.rs +++ b/apollo-router/src/query_planner/selection.rs @@ -285,7 +285,7 @@ mod tests { macro_rules! select { ($schema:expr, $content:expr $(,)?) => {{ - let schema = Schema::parse_test(&$schema, &Default::default()).unwrap(); + let schema = Schema::parse(&$schema, &Default::default()).unwrap(); let response = Response::builder() .data($content) .build(); @@ -386,7 +386,7 @@ mod tests { type MainObject { mainObjectList: [SubObject] } type SubObject { key: String name: String }", ); - let schema = Schema::parse_test(&schema, &Default::default()).unwrap(); + let schema = Schema::parse(&schema, &Default::default()).unwrap(); let response = bjson!({ "__typename": "MainObject", @@ -483,7 +483,7 @@ mod tests { id: Int! }", ); - let schema = Schema::parse_test(&schema, &Default::default()).unwrap(); + let schema = Schema::parse(&schema, &Default::default()).unwrap(); let response = bjson!({ "__typename": "Entity", diff --git a/apollo-router/src/query_planner/subgraph_context.rs b/apollo-router/src/query_planner/subgraph_context.rs index e0bd6e06a3..e841cf27c3 100644 --- a/apollo-router/src/query_planner/subgraph_context.rs +++ b/apollo-router/src/query_planner/subgraph_context.rs @@ -215,16 +215,16 @@ pub(crate) fn build_operation_with_aliasing( // for every operation in the document, go ahead and transform even though it's likely that only one exists if let Ok(document) = parsed_document { - if let Some(anonymous_op) = &document.anonymous_operation { + if let Some(anonymous_op) = &document.operations.anonymous { let mut cloned = anonymous_op.clone(); transform_operation(&mut cloned, arguments, count)?; - ed.insert_operation(cloned); + ed.operations.insert(cloned); } - for (_, op) in &document.named_operations { + for (_, op) in &document.operations.named { let mut cloned = op.clone(); transform_operation(&mut cloned, arguments, count)?; - ed.insert_operation(cloned); + ed.operations.insert(cloned); } return ed @@ -347,7 +347,8 @@ fn transform_field_arguments( #[derive(Debug)] pub(crate) enum ContextBatchingError { NoSelectionSet, - InvalidDocumentGenerated(WithErrors), + // The only use of the field is in `Debug`, on purpose. + InvalidDocumentGenerated(#[allow(unused)] WithErrors), InvalidRelativePath, UnexpectedSelection, } diff --git a/apollo-router/src/query_planner/tests.rs b/apollo-router/src/query_planner/tests.rs index ba91c1a3e3..fd7fb6d8b6 100644 --- a/apollo-router/src/query_planner/tests.rs +++ b/apollo-router/src/query_planner/tests.rs @@ -26,7 +26,6 @@ use crate::plugin::test::MockSubgraph; use crate::query_planner; use crate::query_planner::fetch::FetchNode; use crate::query_planner::fetch::SubgraphOperation; -use crate::query_planner::BridgeQueryPlanner; use crate::services::subgraph_service::MakeSubgraphService; use crate::services::supergraph; use crate::services::SubgraphResponse; @@ -116,7 +115,7 @@ async fn mock_subgraph_service_withf_panics_should_be_reported_as_service_closed &Context::new(), &sf, &Default::default(), - &Arc::new(Schema::parse_test(test_schema!(), &Default::default()).unwrap()), + &Arc::new(Schema::parse(test_schema!(), &Default::default()).unwrap()), &Default::default(), sender, None, @@ -179,7 +178,7 @@ async fn fetch_includes_operation_name() { &Context::new(), &sf, &Default::default(), - &Arc::new(Schema::parse_test(test_schema!(), &Default::default()).unwrap()), + &Arc::new(Schema::parse(test_schema!(), &Default::default()).unwrap()), &Default::default(), sender, None, @@ -239,7 +238,7 @@ async fn fetch_makes_post_requests() { &Context::new(), &sf, &Default::default(), - &Arc::new(Schema::parse_test(test_schema!(), &Default::default()).unwrap()), + &Arc::new(Schema::parse(test_schema!(), &Default::default()).unwrap()), &Default::default(), sender, None, @@ -373,7 +372,7 @@ async fn defer() { let (sender, receiver) = tokio::sync::mpsc::channel(10); let schema = include_str!("testdata/defer_schema.graphql"); - let schema = Arc::new(Schema::parse_test(schema, &Default::default()).unwrap()); + let schema = Arc::new(Schema::parse(schema, &Default::default()).unwrap()); let sf = Arc::new(SubgraphServiceFactory { services: Arc::new(HashMap::from([ ( @@ -432,14 +431,13 @@ async fn defer_if_condition() { } }"#; - let schema = include_str!("testdata/defer_clause.graphql"); - // we need to use the planner here instead of Schema::parse_test because that one uses the router bridge's api_schema function - // does not keep the defer directive definition - let planner = - BridgeQueryPlanner::new(schema.to_string(), Arc::new(Configuration::default()), None) - .await - .unwrap(); - let schema = planner.schema(); + let schema = Arc::new( + Schema::parse( + include_str!("testdata/defer_clause.graphql"), + &Configuration::default(), + ) + .unwrap(), + ); let root: Arc = serde_json::from_str(include_str!("testdata/defer_clause_plan.json")).unwrap(); @@ -681,7 +679,7 @@ async fn dependent_mutations() { &Context::new(), &sf, &Default::default(), - &Arc::new(Schema::parse_test(schema, &Default::default()).unwrap()), + &Arc::new(Schema::parse(schema, &Default::default()).unwrap()), &Default::default(), sender, None, diff --git a/apollo-router/src/router/event/license.rs b/apollo-router/src/router/event/license.rs index 5a3ca41525..343efd6c7c 100644 --- a/apollo-router/src/router/event/license.rs +++ b/apollo-router/src/router/event/license.rs @@ -6,7 +6,6 @@ use derivative::Derivative; use derive_more::Display; use derive_more::From; use futures::prelude::*; -use thiserror::Error; use crate::router::Event; use crate::router::Event::NoMoreLicense; @@ -21,12 +20,6 @@ const APOLLO_ROUTER_LICENSE_INVALID: &str = "APOLLO_ROUTER_LICENSE_INVALID"; type LicenseStream = Pin + Send>>; -#[derive(Debug, Display, From, Error)] -enum Error { - /// The license is invalid. - InvalidLicense, -} - /// License controls availability of certain features of the Router. /// This API experimental and is subject to change outside of semver. #[derive(From, Display, Derivative)] diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 1ebcc71f36..ca1dd4cb7b 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -439,7 +439,7 @@ pub(crate) async fn create_subgraph_services( .and_then(|plugin| (*plugin.1).as_any().downcast_ref::()) .expect("traffic shaping should always be part of the plugin list"); - let mut subgraph_services = IndexMap::new(); + let mut subgraph_services = IndexMap::default(); for (name, _) in schema.subgraphs() { let http_service = crate::services::http::HttpClientService::from_config( name, @@ -570,7 +570,7 @@ pub(crate) async fn create_plugins( .map(|factory| (factory.name.as_str(), &**factory)) .collect(); let mut errors = Vec::new(); - let mut plugin_instances = Plugins::new(); + let mut plugin_instances = Plugins::default(); // Use function-like macros to avoid borrow conflicts of captures macro_rules! add_plugin { @@ -858,8 +858,6 @@ fn can_use_with_experimental_query_planner( } #[cfg(test)] mod test { - use std::error::Error; - use std::fmt; use std::sync::Arc; use schemars::JsonSchema; @@ -878,17 +876,6 @@ mod test { use crate::router_factory::YamlRouterFactory; use crate::spec::Schema; - #[derive(Debug)] - struct PluginError; - - impl fmt::Display for PluginError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "PluginError") - } - } - - impl Error for PluginError {} - // Always starts and stops plugin #[derive(Debug)] @@ -1023,7 +1010,7 @@ mod test { ..Default::default() }; let schema = include_str!("testdata/supergraph_with_context.graphql"); - let schema = Arc::new(Schema::parse_test(schema, &config).unwrap()); + let schema = Arc::new(Schema::parse(schema, &config).unwrap()); assert!( can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), "experimental_query_planner_mode: both cannot be used with @context" @@ -1054,7 +1041,7 @@ mod test { ..Default::default() }; let schema = include_str!("testdata/supergraph_with_override_label.graphql"); - let schema = Arc::new(Schema::parse_test(schema, &config).unwrap()); + let schema = Arc::new(Schema::parse(schema, &config).unwrap()); assert!( can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), "experimental_query_planner_mode: both cannot be used with progressive overrides" @@ -1084,7 +1071,7 @@ mod test { ..Default::default() }; let schema = include_str!("testdata/supergraph.graphql"); - let schema = Arc::new(Schema::parse_test(schema, &config).unwrap()); + let schema = Arc::new(Schema::parse(schema, &config).unwrap()); assert!( can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), "experimental_query_planner_mode: both cannot be used with fed1 supergraph" @@ -1114,7 +1101,7 @@ mod test { ..Default::default() }; let schema = include_str!("testdata/minimal_fed2_supergraph.graphql"); - let schema = Arc::new(Schema::parse_test(schema, &config).unwrap()); + let schema = Arc::new(Schema::parse(schema, &config).unwrap()); assert!( can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), "experimental_query_planner_mode: both can be used" diff --git a/apollo-router/src/services/http.rs b/apollo-router/src/services/http.rs index 629044ace5..980c20ce70 100644 --- a/apollo-router/src/services/http.rs +++ b/apollo-router/src/services/http.rs @@ -60,7 +60,7 @@ impl HttpClientServiceFactory { HttpClientServiceFactory { service: Arc::new(service), - plugins: Arc::new(IndexMap::new()), + plugins: Arc::new(IndexMap::default()), } } diff --git a/apollo-router/src/services/layers/allow_only_http_post_mutations.rs b/apollo-router/src/services/layers/allow_only_http_post_mutations.rs index 2b56660856..16179f07bb 100644 --- a/apollo-router/src/services/layers/allow_only_http_post_mutations.rs +++ b/apollo-router/src/services/layers/allow_only_http_post_mutations.rs @@ -72,7 +72,8 @@ where let op = doc .executable - .get_operation(req.supergraph_request.body().operation_name.as_deref()); + .operations + .get(req.supergraph_request.body().operation_name.as_deref()); match op { Err(_) => { diff --git a/apollo-router/src/services/layers/persisted_queries/mod.rs b/apollo-router/src/services/layers/persisted_queries/mod.rs index e4f8e1e68b..0444590958 100644 --- a/apollo-router/src/services/layers/persisted_queries/mod.rs +++ b/apollo-router/src/services/layers/persisted_queries/mod.rs @@ -200,7 +200,8 @@ impl PersistedQueryLayer { if self.introspection_enabled && doc .executable - .all_operations() + .operations + .iter() .all(|op| op.is_introspection(&doc.executable)) { return Ok(request); @@ -699,7 +700,7 @@ mod tests { let pq_layer = PersistedQueryLayer::new(&config).await.unwrap(); let schema = Arc::new( - Schema::parse_test( + Schema::parse( include_str!("../../../testdata/supergraph.graphql"), &Default::default(), ) diff --git a/apollo-router/src/services/layers/query_analysis.rs b/apollo-router/src/services/layers/query_analysis.rs index 4beb5a1e21..9bcdfcceae 100644 --- a/apollo-router/src/services/layers/query_analysis.rs +++ b/apollo-router/src/services/layers/query_analysis.rs @@ -11,6 +11,7 @@ use http::StatusCode; use lru::LruCache; use router_bridge::planner::UsageReporting; use tokio::sync::Mutex; +use tokio::task; use crate::apollo_studio_interop::generate_extended_references; use crate::apollo_studio_interop::ExtendedReferenceStats; @@ -22,6 +23,7 @@ use crate::graphql::IntoGraphQLErrors; use crate::plugins::authorization::AuthorizationPlugin; use crate::plugins::telemetry::config::ApolloMetricsReferenceMode; use crate::plugins::telemetry::config::Conf as TelemetryConfig; +use crate::plugins::telemetry::consts::QUERY_PARSING_SPAN_NAME; use crate::query_planner::fetch::QueryHash; use crate::query_planner::OperationKind; use crate::services::SupergraphRequest; @@ -32,8 +34,6 @@ use crate::spec::SpecError; use crate::Configuration; use crate::Context; -pub(crate) const QUERY_PARSING_SPAN_NAME: &str = "parse_query"; - /// [`Layer`] for QueryAnalysis implementation. #[derive(Clone)] #[allow(clippy::type_complexity)] @@ -73,12 +73,32 @@ impl QueryAnalysisLayer { } } - pub(crate) fn parse_document( + pub(crate) async fn parse_document( &self, query: &str, operation_name: Option<&str>, ) -> Result { - Query::parse_document(query, operation_name, &self.schema, &self.configuration) + let query = query.to_string(); + let operation_name = operation_name.map(|o| o.to_string()); + let schema = self.schema.clone(); + let conf = self.configuration.clone(); + + // Must be created *outside* of the spawn_blocking or the span is not connected to the + // parent + let span = tracing::info_span!(QUERY_PARSING_SPAN_NAME, "otel.kind" = "INTERNAL"); + + task::spawn_blocking(move || { + span.in_scope(|| { + Query::parse_document( + &query, + operation_name.as_deref(), + schema.as_ref(), + conf.as_ref(), + ) + }) + }) + .await + .expect("parse_document task panicked") } pub(crate) async fn supergraph_request( @@ -127,8 +147,7 @@ impl QueryAnalysisLayer { let res = match entry { None => { - let span = tracing::info_span!(QUERY_PARSING_SPAN_NAME, "otel.kind" = "INTERNAL"); - match span.in_scope(|| self.parse_document(&query, op_name.as_deref())) { + match self.parse_document(&query, op_name.as_deref()).await { Err(errors) => { (*self.cache.lock().await).put( QueryAnalysisKey { @@ -155,7 +174,7 @@ impl QueryAnalysisLayer { Ok(doc) => { let context = Context::new(); - let operation = doc.executable.get_operation(op_name.as_deref()).ok(); + let operation = doc.executable.operations.get(op_name.as_deref()).ok(); let operation_name = operation.as_ref().and_then(|operation| { operation.name.as_ref().map(|s| s.as_str().to_owned()) }); diff --git a/apollo-router/src/services/subgraph.rs b/apollo-router/src/services/subgraph.rs index 59aafb8389..23ebc608e3 100644 --- a/apollo-router/src/services/subgraph.rs +++ b/apollo-router/src/services/subgraph.rs @@ -48,6 +48,7 @@ pub struct Request { pub context: Context, + // FIXME for router 2.x /// Name of the subgraph, it's an Option to not introduce breaking change pub(crate) subgraph_name: Option, /// Channel to send the subscription stream to listen on events coming from subgraph in a task @@ -162,7 +163,9 @@ assert_impl_all!(Response: Send); #[non_exhaustive] pub struct Response { pub response: http::Response, - + // FIXME for router 2.x + /// Name of the subgraph, it's an Option to not introduce breaking change + pub(crate) subgraph_name: Option, pub context: Context, } @@ -175,8 +178,13 @@ impl Response { pub(crate) fn new_from_response( response: http::Response, context: Context, + subgraph_name: String, ) -> Response { - Self { response, context } + Self { + response, + context, + subgraph_name: Some(subgraph_name), + } } /// This is the constructor (or builder) to use when constructing a real Response. @@ -193,6 +201,7 @@ impl Response { status_code: Option, context: Context, headers: Option>, + subgraph_name: Option, ) -> Response { // Build a response let res = graphql::Response::builder() @@ -211,7 +220,11 @@ impl Response { *response.headers_mut() = headers.unwrap_or_default(); - Self { response, context } + Self { + response, + context, + subgraph_name, + } } /// This is the constructor (or builder) to use when constructing a "fake" Response. @@ -230,6 +243,7 @@ impl Response { status_code: Option, context: Option, headers: Option>, + subgraph_name: Option, ) -> Response { Response::new( label, @@ -240,6 +254,7 @@ impl Response { status_code, context.unwrap_or_default(), headers, + subgraph_name, ) } @@ -260,6 +275,7 @@ impl Response { status_code: Option, context: Option, headers: MultiMap, + subgraph_name: Option, ) -> Result { Ok(Response::new( label, @@ -270,6 +286,7 @@ impl Response { status_code, context.unwrap_or_default(), Some(header_map(headers)?), + subgraph_name, )) } @@ -281,6 +298,7 @@ impl Response { errors: Vec, status_code: Option, context: Context, + subgraph_name: Option, ) -> Result { Ok(Response::new( Default::default(), @@ -291,6 +309,7 @@ impl Response { status_code, context, Default::default(), + subgraph_name, )) } } diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index 48627109af..c2274f6bc8 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -311,6 +311,7 @@ impl tower::Service for SubgraphService { ); // Dedup happens here return Ok(SubgraphResponse::builder() + .subgraph_name(service_name.clone()) .context(context) .extensions(Object::default()) .build()); @@ -521,6 +522,7 @@ async fn call_websocket( // Dedup happens here return Ok(SubgraphResponse::builder() .context(context) + .subgraph_name(service_name.clone()) .extensions(Object::default()) .build()); } @@ -669,7 +671,7 @@ async fn call_websocket( .into_subscription(body, subgraph_cfg.heartbeat_interval.into_option()) .await .map_err(|err| FetchError::SubrequestWsError { - service: service_name, + service: service_name.clone(), reason: format!("cannot send the subgraph request to websocket stream: {err:?}"), })?; @@ -703,6 +705,7 @@ async fn call_websocket( Ok(SubgraphResponse::new_from_response( resp.map(|_| graphql::Response::default()), context, + service_name, )) } @@ -972,10 +975,12 @@ pub(crate) async fn process_batch( // We are going to pop contexts from the back, so let's reverse our contexts contexts.reverse(); + let subgraph_name = service.clone(); // Build an http Response for each graphql response let subgraph_responses: Result, _> = graphql_responses .into_iter() .map(|res| { + let subgraph_name = subgraph_name.clone(); http::Response::builder() .status(parts.status) .version(parts.version) @@ -984,7 +989,8 @@ pub(crate) async fn process_batch( *http_res.headers_mut() = parts.headers.clone(); // Use the original context for the request to create the response let context = contexts.pop().expect("we have a context for each response"); - let resp = SubgraphResponse::new_from_response(http_res, context); + let resp = + SubgraphResponse::new_from_response(http_res, context, subgraph_name); tracing::debug!("we have a resp: {resp:?}"); resp @@ -1308,6 +1314,7 @@ pub(crate) async fn call_single_http( .body(graphql::Response::default()) .expect("it won't fail everything is coming from an existing response"), context.clone(), + service_name.to_owned(), ); should_log = condition.lock().evaluate_response(&subgraph_response); } @@ -1348,7 +1355,11 @@ pub(crate) async fn call_single_http( http_response_to_graphql_response(service_name, content_type, body, &parts); let resp = http::Response::from_parts(parts, graphql_response); - Ok(SubgraphResponse::new_from_response(resp, context)) + Ok(SubgraphResponse::new_from_response( + resp, + context, + service_name.to_owned(), + )) } #[derive(Clone, Debug)] diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index 2bb7daf14e..a5ea8403d5 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -801,7 +801,7 @@ impl PluggableSupergraphServiceBuilder { schema.clone(), subgraph_schemas, &configuration, - IndexMap::new(), + IndexMap::default(), ) .await?; diff --git a/apollo-router/src/services/supergraph/tests.rs b/apollo-router/src/services/supergraph/tests.rs index c200068299..e624dd07a9 100644 --- a/apollo-router/src/services/supergraph/tests.rs +++ b/apollo-router/src/services/supergraph/tests.rs @@ -1057,7 +1057,7 @@ async fn subscription_callback_schema_reload() { let new_schema = format!("{SCHEMA} "); // reload schema - let schema = Schema::parse_test(&new_schema, &configuration).unwrap(); + let schema = Schema::parse(&new_schema, &configuration).unwrap(); notify.broadcast_schema(Arc::new(schema)); insta::assert_json_snapshot!(tokio::time::timeout( Duration::from_secs(1), diff --git a/apollo-router/src/spec/operation_limits.rs b/apollo-router/src/spec/operation_limits.rs index 4b325ad840..c280ba4c62 100644 --- a/apollo-router/src/spec/operation_limits.rs +++ b/apollo-router/src/spec/operation_limits.rs @@ -70,7 +70,7 @@ pub(crate) fn check( root_fields: config_limits.max_root_fields, aliases: config_limits.max_aliases, }; - let Ok(operation) = document.get_operation(operation_name) else { + let Ok(operation) = document.operations.get(operation_name) else { // Undefined or ambiguous operation name. // The request is invalid and will be rejected by some other part of the router, // if it wasn’t already before we got to this code path. diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 147f1e6805..d40b247b4d 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -107,7 +107,7 @@ impl Query { defer_stats: DeferStats { has_defer: false, has_unconditional_defer: false, - conditional_defer_variable_names: IndexSet::new(), + conditional_defer_variable_names: IndexSet::default(), }, is_original: true, schema_aware_hash: vec![], @@ -275,7 +275,7 @@ impl Query { schema: &Schema, configuration: &Configuration, ) -> Result { - let parser = &mut apollo_compiler::Parser::new() + let parser = &mut apollo_compiler::parser::Parser::new() .recursion_limit(configuration.limits.parser_max_recursion) .token_limit(configuration.limits.parser_max_tokens); let ast = match parser.parse_ast(query, "query.graphql") { @@ -346,11 +346,12 @@ impl Query { let mut defer_stats = DeferStats { has_defer: false, has_unconditional_defer: false, - conditional_defer_variable_names: IndexSet::new(), + conditional_defer_variable_names: IndexSet::default(), }; let fragments = Fragments::from_hir(document, schema, &mut defer_stats)?; let operations = document - .all_operations() + .operations + .iter() .map(|operation| Operation::from_hir(operation, schema, &mut defer_stats, &fragments)) .collect::, SpecError>>()?; diff --git a/apollo-router/src/spec/query/change.rs b/apollo-router/src/spec/query/change.rs index bbc39b2232..9db9554a04 100644 --- a/apollo-router/src/spec/query/change.rs +++ b/apollo-router/src/spec/query/change.rs @@ -7,13 +7,13 @@ use apollo_compiler::ast; use apollo_compiler::ast::Argument; use apollo_compiler::ast::FieldDefinition; use apollo_compiler::executable; +use apollo_compiler::parser::Parser; use apollo_compiler::schema; use apollo_compiler::schema::DirectiveList; use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Node; -use apollo_compiler::Parser; use sha2::Digest; use sha2::Sha256; use tower::BoxError; diff --git a/apollo-router/src/spec/query/tests.rs b/apollo-router/src/spec/query/tests.rs index a73bc025dd..d05aa767b6 100644 --- a/apollo-router/src/spec/query/tests.rs +++ b/apollo-router/src/spec/query/tests.rs @@ -1,4 +1,4 @@ -use apollo_compiler::Parser; +use apollo_compiler::parser::Parser; use insta::assert_json_snapshot; use serde_json_bytes::json; use test_log::test; @@ -122,8 +122,7 @@ impl FormatTest { FederationVersion::Fed2 => with_supergraph_boilerplate_fed2(schema, query_type_name), }; - let schema = - Schema::parse_test(&schema, &Default::default()).expect("could not parse schema"); + let schema = Schema::parse(&schema, &Default::default()).expect("could not parse schema"); let api_schema = schema.api_schema(); let query = @@ -1394,8 +1393,7 @@ macro_rules! run_validation { Value::Object(object) => object, _ => unreachable!("variables must be an object"), }; - let schema = - Schema::parse_test(&$schema, &Default::default()).expect("could not parse schema"); + let schema = Schema::parse(&$schema, &Default::default()).expect("could not parse schema"); let request = Request::builder() .variables(variables) .query($query.to_string()) @@ -3458,7 +3456,7 @@ fn it_parses_default_floats() { "Query", ); - let schema = Schema::parse_test(&schema, &Default::default()).unwrap(); + let schema = Schema::parse(&schema, &Default::default()).unwrap(); let value = schema .supergraph_schema() .get_input_object("WithAllKindsOfFloats") @@ -3491,7 +3489,7 @@ fn it_statically_includes() { }", "Query", ); - let schema = Schema::parse_test(&schema, &Default::default()).expect("could not parse schema"); + let schema = Schema::parse(&schema, &Default::default()).expect("could not parse schema"); let query = Query::parse( "query { @@ -3640,7 +3638,7 @@ fn it_statically_skips() { }", "Query", ); - let schema = Schema::parse_test(&schema, &Default::default()).expect("could not parse schema"); + let schema = Schema::parse(&schema, &Default::default()).expect("could not parse schema"); let query = Query::parse( "query { @@ -3781,7 +3779,7 @@ fn it_should_fail_with_empty_selection_set() { }", "Query", ); - let schema = Schema::parse_test(&schema, &Default::default()).expect("could not parse schema"); + let schema = Schema::parse(&schema, &Default::default()).expect("could not parse schema"); let _query_error = Query::parse( "query { @@ -5129,7 +5127,7 @@ fn fragment_on_interface_on_query() { } }"; - let schema = Schema::parse_test(schema, &Default::default()).expect("could not parse schema"); + let schema = Schema::parse(schema, &Default::default()).expect("could not parse schema"); let api_schema = schema.api_schema(); let query = Query::parse(query, None, &schema, &Default::default()).expect("could not parse query"); @@ -5322,7 +5320,7 @@ fn parse_introspection_query() { }"; let schema = with_supergraph_boilerplate(schema, "Query"); - let schema = Schema::parse_test(&schema, &Default::default()).expect("could not parse schema"); + let schema = Schema::parse(&schema, &Default::default()).expect("could not parse schema"); let query = "{ __type(name: \"Bar\") { @@ -5633,7 +5631,7 @@ fn query_operation_nullification() { #[test] fn test_error_path_works_across_inline_fragments() { - let schema = Schema::parse_test( + let schema = Schema::parse( r#" schema @link(url: "https://specs.apollo.dev/link/v1.0") @@ -5750,7 +5748,7 @@ fn test_error_path_works_across_inline_fragments() { #[test] fn test_query_not_named_query() { let config = Default::default(); - let schema = Schema::parse_test( + let schema = Schema::parse( r#" schema @core(feature: "https://specs.apollo.dev/core/v0.1") @@ -5789,7 +5787,7 @@ fn test_query_not_named_query() { #[test] fn filtered_defer_fragment() { let config = Configuration::default(); - let schema = Schema::parse_test( + let schema = Schema::parse( r#" schema @core(feature: "https://specs.apollo.dev/core/v0.1") diff --git a/apollo-router/src/spec/query/traverse.rs b/apollo-router/src/spec/query/traverse.rs index f8a90d8fc7..295ba4a393 100644 --- a/apollo-router/src/spec/query/traverse.rs +++ b/apollo-router/src/spec/query/traverse.rs @@ -10,7 +10,7 @@ pub(crate) fn document( document: &ExecutableDocument, operation_name: Option<&str>, ) -> Result<(), BoxError> { - if let Ok(operation) = document.get_operation(operation_name) { + if let Ok(operation) = document.operations.get(operation_name) { visitor.operation(operation.object_type().as_str(), operation)?; } diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs index f07ed436eb..07f13f746a 100644 --- a/apollo-router/src/spec/schema.rs +++ b/apollo-router/src/spec/schema.rs @@ -9,14 +9,15 @@ use apollo_compiler::ast; use apollo_compiler::schema::Implementers; use apollo_compiler::validation::Valid; use apollo_compiler::Name; +use apollo_federation::schema::ValidFederationSchema; +use apollo_federation::ApiSchemaOptions; +use apollo_federation::Supergraph; use http::Uri; use semver::Version; use semver::VersionReq; use sha2::Digest; use sha2::Sha256; -use crate::configuration::ApiSchemaMode; -use crate::configuration::QueryPlannerMode; use crate::error::ParseErrors; use crate::error::SchemaError; use crate::query_planner::OperationKind; @@ -27,32 +28,18 @@ pub(crate) struct Schema { pub(crate) raw_sdl: Arc, supergraph: Supergraph, subgraphs: HashMap, - pub(crate) implementers_map: HashMap, - api_schema: Option, + pub(crate) implementers_map: apollo_compiler::collections::HashMap, + api_schema: ApiSchema, pub(crate) schema_id: Arc, } -/// TODO: remove and use apollo_federation::Supergraph unconditionally -/// when we’re more confident in its constructor -enum Supergraph { - ApolloFederation(apollo_federation::Supergraph), - ApolloCompiler(Valid), -} - /// Wrapper type to distinguish from `Schema::definitions` for the supergraph schema #[derive(Debug)] -pub(crate) struct ApiSchema(pub(crate) Valid); +pub(crate) struct ApiSchema(pub(crate) ValidFederationSchema); impl Schema { - #[cfg(test)] - pub(crate) fn parse_test(s: &str, configuration: &Configuration) -> Result { - let schema = Self::parse(s, configuration)?; - let api_schema = Self::parse_compiler_schema(&schema.create_api_schema(configuration)?)?; - Ok(schema.with_api_schema(api_schema)) - } - pub(crate) fn parse_ast(sdl: &str) -> Result { - let mut parser = apollo_compiler::Parser::new(); + let mut parser = apollo_compiler::parser::Parser::new(); let result = parser.parse_ast(sdl, "schema.graphql"); // Trace log recursion limit data @@ -122,40 +109,37 @@ impl Schema { ); let implementers_map = definitions.implementers_map(); - let legacy_only = config.experimental_query_planner_mode == QueryPlannerMode::Legacy - && config.experimental_api_schema_generation_mode == ApiSchemaMode::Legacy; - let supergraph = if cfg!(test) || !legacy_only { - Supergraph::ApolloFederation(apollo_federation::Supergraph::from_schema(definitions)?) - } else { - Supergraph::ApolloCompiler(definitions) - }; + let supergraph = Supergraph::from_schema(definitions)?; let schema_id = Arc::new(Schema::schema_id(sdl)); + let api_schema = supergraph + .to_api_schema(ApiSchemaOptions { + include_defer: config.supergraph.defer_support, + ..Default::default() + }) + .map_err(|e| { + SchemaError::Api(format!( + "The supergraph schema failed to produce a valid API schema: {e}" + )) + })?; + Ok(Schema { raw_sdl: Arc::new(sdl.to_owned()), supergraph, subgraphs, implementers_map, - api_schema: None, + api_schema: ApiSchema(api_schema), schema_id, }) } - pub(crate) fn federation_supergraph(&self) -> &apollo_federation::Supergraph { - // This is only called in cases wher we create ApolloFederation above - #[allow(clippy::panic)] - match &self.supergraph { - Supergraph::ApolloFederation(s) => s, - Supergraph::ApolloCompiler(_) => panic!("expected an apollo-federation supergraph"), - } + pub(crate) fn federation_supergraph(&self) -> &Supergraph { + &self.supergraph } pub(crate) fn supergraph_schema(&self) -> &Valid { - match &self.supergraph { - Supergraph::ApolloFederation(s) => s.schema.schema(), - Supergraph::ApolloCompiler(s) => s, - } + self.supergraph.schema.schema() } pub(crate) fn schema_id(sdl: &str) -> String { @@ -164,31 +148,6 @@ impl Schema { format!("{:x}", hasher.finalize()) } - pub(crate) fn create_api_schema( - &self, - configuration: &Configuration, - ) -> Result { - use apollo_federation::ApiSchemaOptions; - - let api_schema = self - .federation_supergraph() - .to_api_schema(ApiSchemaOptions { - include_defer: configuration.supergraph.defer_support, - ..Default::default() - }) - .map_err(|e| { - SchemaError::Api(format!( - "The supergraph schema failed to produce a valid API schema: {e}" - )) - })?; - Ok(api_schema.schema().to_string()) - } - - pub(crate) fn with_api_schema(mut self, api_schema: Valid) -> Self { - self.api_schema = Some(ApiSchema(api_schema)); - self - } - /// Extracts a string containing the entire [`Schema`]. pub(crate) fn as_string(&self) -> &Arc { &self.raw_sdl @@ -245,13 +204,9 @@ impl Schema { self.subgraphs.get(service_name) } - // TODO: make `self.api_schema` non-optional after we move to Rust-only API schema generation - #[allow(clippy::panic)] + /// Return the API schema for this supergraph. pub(crate) fn api_schema(&self) -> &ApiSchema { - match &self.api_schema { - Some(schema) => schema, - None => panic!("missing API schema"), - } + &self.api_schema } pub(crate) fn root_operation_name(&self, kind: OperationKind) -> &str { @@ -392,14 +347,11 @@ impl std::fmt::Debug for Schema { } } -#[derive(Debug)] -pub(crate) struct InvalidObject; - impl std::ops::Deref for ApiSchema { type Target = Valid; fn deref(&self) -> &Self::Target { - &self.0 + self.0.schema() } } @@ -449,7 +401,7 @@ mod tests { "#, ); let schema = format!("{base_schema}\n{schema}"); - Schema::parse_test(&schema, &Default::default()).unwrap() + Schema::parse(&schema, &Default::default()).unwrap() } fn gen_schema_interfaces(schema: &str) -> Schema { @@ -473,7 +425,7 @@ mod tests { "#, ); let schema = format!("{base_schema}\n{schema}"); - Schema::parse_test(&schema, &Default::default()).unwrap() + Schema::parse(&schema, &Default::default()).unwrap() } let schema = gen_schema_types("union UnionType = Foo | Bar | Baz"); assert!(schema.is_subtype("UnionType", "Foo")); @@ -529,7 +481,7 @@ mod tests { @join__graph(name: "products" url: "http://localhost:4003/graphql") REVIEWS @join__graph(name: "reviews" url: "http://localhost:4002/graphql") }"#; - let schema = Schema::parse_test(schema, &Default::default()).unwrap(); + let schema = Schema::parse(schema, &Default::default()).unwrap(); assert_eq!(schema.subgraphs.len(), 4); assert_eq!( @@ -578,7 +530,7 @@ mod tests { #[test] fn api_schema() { let schema = include_str!("../testdata/contract_schema.graphql"); - let schema = Schema::parse_test(schema, &Default::default()).unwrap(); + let schema = Schema::parse(schema, &Default::default()).unwrap(); let has_in_stock_field = |schema: &apollo_compiler::Schema| { schema .get_object("Product") @@ -593,7 +545,7 @@ mod tests { #[test] fn federation_version() { // @core directive - let schema = Schema::parse_test( + let schema = Schema::parse( include_str!("../testdata/minimal_supergraph.graphql"), &Default::default(), ) @@ -601,7 +553,7 @@ mod tests { assert_eq!(schema.federation_version(), Some(1)); // @link directive - let schema = Schema::parse_test( + let schema = Schema::parse( include_str!("../testdata/minimal_fed2_supergraph.graphql"), &Default::default(), ) @@ -614,7 +566,7 @@ mod tests { #[cfg(not(windows))] { let schema = include_str!("../testdata/starstuff@current.graphql"); - let schema = Schema::parse_test(schema, &Default::default()).unwrap(); + let schema = Schema::parse(schema, &Default::default()).unwrap(); assert_eq!( Schema::schema_id(&schema.raw_sdl), @@ -627,7 +579,7 @@ mod tests { #[test] fn inaccessible_on_non_core() { let schema = include_str!("../testdata/inaccessible_on_non_core.graphql"); - match Schema::parse_test(schema, &Default::default()) { + match Schema::parse(schema, &Default::default()) { Err(SchemaError::Api(s)) => { assert_eq!( s, @@ -643,7 +595,7 @@ mod tests { #[test] fn unclosed_brace_error_does_not_panic() { let schema = "schema {"; - let result = Schema::parse_test(schema, &Default::default()); + let result = Schema::parse(schema, &Default::default()); assert!(result.is_err()); } } diff --git a/apollo-router/src/uplink/mod.rs b/apollo-router/src/uplink/mod.rs index 2a7aa22046..d6eb3262c5 100644 --- a/apollo-router/src/uplink/mod.rs +++ b/apollo-router/src/uplink/mod.rs @@ -118,9 +118,8 @@ impl Endpoints { urls.iter() .cycle() .skip(*current) - .map(|url| { + .inspect(|_| { *current += 1; - url }) .take(urls.len()), ) @@ -467,7 +466,7 @@ where .json(request_body) .send() .await - .map_err(|e| { + .inspect_err(|e| { if let Some(hyper_err) = e.source() { if let Some(os_err) = hyper_err.source() { if os_err.to_string().contains("tcp connect error: Cannot assign requested address (os error 99)") { @@ -475,7 +474,6 @@ where } } } - e })?; tracing::debug!("uplink response {:?}", res); let response_body: graphql_client::Response = res.json().await?; diff --git a/apollo-router/tests/common.rs b/apollo-router/tests/common.rs index 32f4888304..6eddc3dc70 100644 --- a/apollo-router/tests/common.rs +++ b/apollo-router/tests/common.rs @@ -19,7 +19,6 @@ use futures::StreamExt; use http::header::ACCEPT; use http::header::CONTENT_TYPE; use http::HeaderValue; -use jsonpath_lib::Selector; use mediatype::names::BOUNDARY; use mediatype::names::FORM_DATA; use mediatype::names::MULTIPART; @@ -54,7 +53,6 @@ use tokio::process::Child; use tokio::process::Command; use tokio::task; use tokio::time::Instant; -use tower::BoxError; use tracing::info_span; use tracing_core::Dispatch; use tracing_core::LevelFilter; @@ -1034,20 +1032,6 @@ impl Drop for IntegrationTest { } } -pub trait ValueExt { - fn select_path<'a>(&'a self, path: &str) -> Result, BoxError>; - fn as_string(&self) -> Option; -} - -impl ValueExt for Value { - fn select_path<'a>(&'a self, path: &str) -> Result, BoxError> { - Ok(Selector::new().str_path(path)?.value(self).select()?) - } - fn as_string(&self) -> Option { - self.as_str().map(|s| s.to_string()) - } -} - /// Merge in overrides to a yaml config. /// /// The test harness needs some options to be present for it to work, so this diff --git a/apollo-router/tests/integration/batching.rs b/apollo-router/tests/integration/batching.rs index 09281ea677..c50c85054b 100644 --- a/apollo-router/tests/integration/batching.rs +++ b/apollo-router/tests/integration/batching.rs @@ -4,7 +4,7 @@ use itertools::Itertools; use tower::BoxError; use wiremock::ResponseTemplate; -use crate::integration::common::ValueExt as _; +use crate::integration::ValueExt as _; const CONFIG: &str = include_str!("../fixtures/batching/all_enabled.router.yaml"); const SHORT_TIMEOUTS_CONFIG: &str = include_str!("../fixtures/batching/short_timeouts.router.yaml"); @@ -377,12 +377,12 @@ async fn it_handles_cancelled_by_rhai() -> Result<(), BoxError> { entryA: index: 0 - errors: - - message: "rhai execution error: 'Runtime error: cancelled expected failure (line 5, position 13)\nin closure call'" + - message: "rhai execution error: 'Runtime error: cancelled expected failure (line 5, position 13)'" - data: entryA: index: 1 - errors: - - message: "rhai execution error: 'Runtime error: cancelled expected failure (line 5, position 13)\nin closure call'" + - message: "rhai execution error: 'Runtime error: cancelled expected failure (line 5, position 13)'" "###); } @@ -471,7 +471,7 @@ async fn it_handles_single_request_cancelled_by_rhai() -> Result<(), BoxError> { entryA: index: 1 - errors: - - message: "rhai execution error: 'Runtime error: cancelled expected failure (line 5, position 13)\nin closure call'" + - message: "rhai execution error: 'Runtime error: cancelled expected failure (line 5, position 13)'" "###); } diff --git a/apollo-router/tests/integration/mod.rs b/apollo-router/tests/integration/mod.rs index 55c95c0d06..7ab2f50d95 100644 --- a/apollo-router/tests/integration/mod.rs +++ b/apollo-router/tests/integration/mod.rs @@ -17,3 +17,21 @@ mod rhai; mod subscription; mod telemetry; mod validation; + +use jsonpath_lib::Selector; +use serde_json::Value; +use tower::BoxError; + +pub trait ValueExt { + fn select_path<'a>(&'a self, path: &str) -> Result, BoxError>; + fn as_string(&self) -> Option; +} + +impl ValueExt for Value { + fn select_path<'a>(&'a self, path: &str) -> Result, BoxError> { + Ok(Selector::new().str_path(path)?.value(self).select()?) + } + fn as_string(&self) -> Option { + self.as_str().map(|s| s.to_string()) + } +} diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index 9df887825f..b7bfe2ea52 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -1,5 +1,4 @@ use apollo_router::plugin::test::MockSubgraph; -use apollo_router::services::execution::QueryPlan; use apollo_router::services::router; use apollo_router::services::supergraph; use apollo_router::Context; @@ -12,8 +11,6 @@ use futures::StreamExt; use http::header::CACHE_CONTROL; use http::HeaderValue; use http::Method; -use serde::Deserialize; -use serde::Serialize; use serde_json::json; use serde_json::Value; use tower::BoxError; @@ -158,12 +155,6 @@ async fn query_planner_cache() -> Result<(), BoxError> { Ok(()) } -#[derive(Deserialize, Serialize)] - -struct QueryPlannerContent { - plan: QueryPlan, -} - #[tokio::test(flavor = "multi_thread")] async fn apq() -> Result<(), BoxError> { let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap(); @@ -233,7 +224,7 @@ async fn apq() -> Result<(), BoxError> { res.errors.first().unwrap().message, "PersistedQueryNotFound" ); - let r: Option = client.get(&format!("apq:{query_hash}")).await.unwrap(); + let r: Option = client.get(format!("apq:{query_hash}")).await.unwrap(); assert!(r.is_none()); // Now we register the query @@ -261,7 +252,7 @@ async fn apq() -> Result<(), BoxError> { assert!(res.data.is_some()); assert!(res.errors.is_empty()); - let s: Option = client.get(&format!("apq:{query_hash}")).await.unwrap(); + let s: Option = client.get(format!("apq:{query_hash}")).await.unwrap(); insta::assert_snapshot!(s.unwrap()); // we start a new router with the same config @@ -416,13 +407,13 @@ async fn entity_cache() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("subgraph:products:type:Query:hash:0df945dc1bc08f7fc02e8905b4c72aa9112f29bb7a214e4a38d199f0aa635b48:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:products:type:Query:hash:0df945dc1bc08f7fc02e8905b4c72aa9112f29bb7a214e4a38d199f0aa635b48:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap()); - let s: String = client.get("subgraph:reviews:Product:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c").await.unwrap(); + let s: String = client.get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c").await.unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap()); @@ -526,7 +517,7 @@ async fn entity_cache() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("subgraph:reviews:Product:d9a4cd73308dd13ca136390c10340823f94c335b9da198d2339c886c738abf0d:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:reviews:type:Product:entity:d9a4cd73308dd13ca136390c10340823f94c335b9da198d2339c886c738abf0d:hash:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -743,7 +734,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("subgraph:products:type:Query:hash:0df945dc1bc08f7fc02e8905b4c72aa9112f29bb7a214e4a38d199f0aa635b48:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:products:type:Query:hash:0df945dc1bc08f7fc02e8905b4c72aa9112f29bb7a214e4a38d199f0aa635b48:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -764,7 +755,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { ); let s: String = client - .get("subgraph:reviews:Product:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -808,7 +799,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("subgraph:reviews:Product:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:3b6ef3c8fd34c469d59f513942c5f4c8f91135e828712de2024e2cd4613c50ae:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:3b6ef3c8fd34c469d59f513942c5f4c8f91135e828712de2024e2cd4613c50ae:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); diff --git a/apollo-router/tests/integration/telemetry/datadog.rs b/apollo-router/tests/integration/telemetry/datadog.rs index 7ae6bdc5f5..613242ca39 100644 --- a/apollo-router/tests/integration/telemetry/datadog.rs +++ b/apollo-router/tests/integration/telemetry/datadog.rs @@ -11,8 +11,18 @@ use tower::BoxError; use crate::integration::common::graph_os_enabled; use crate::integration::common::Telemetry; -use crate::integration::common::ValueExt; use crate::integration::IntegrationTest; +use crate::integration::ValueExt; + +#[derive(buildstructor::Builder)] +struct TraceSpec { + operation_name: Option, + version: Option, + services: HashSet<&'static str>, + span_names: HashSet<&'static str>, + measured_spans: HashSet<&'static str>, + unmeasured_spans: HashSet<&'static str>, +} #[tokio::test(flavor = "multi_thread")] async fn test_default_span_names() -> Result<(), BoxError> { @@ -41,27 +51,27 @@ async fn test_default_span_names() -> Result<(), BoxError> { .unwrap(), id.to_datadog() ); - validate_trace( - id, - &query, - Some("ExampleQuery"), - &["client", "router", "subgraph"], - false, - &[ - "query_planning", - "client_request", - "subgraph_request", - "subgraph", - "fetch", - "supergraph", - "execution", - "query ExampleQuery", - "subgraph server", - "http_request", - "parse_query", - ], - ) - .await?; + TraceSpec::builder() + .services(["client", "router", "subgraph"].into()) + .span_names( + [ + "query_planning", + "client_request", + "subgraph_request", + "subgraph", + "fetch", + "supergraph", + "execution", + "query ExampleQuery", + "subgraph server", + "http_request", + "parse_query", + ] + .into(), + ) + .build() + .validate_trace(id) + .await?; router.graceful_shutdown().await; Ok(()) } @@ -93,27 +103,27 @@ async fn test_override_span_names() -> Result<(), BoxError> { .unwrap(), id.to_datadog() ); - validate_trace( - id, - &query, - Some("ExampleQuery"), - &["client", "router", "subgraph"], - false, - &[ - "query_planning", - "client_request", - "subgraph_request", - "subgraph", - "fetch", - "supergraph", - "execution", - "overridden", - "subgraph server", - "http_request", - "parse_query", - ], - ) - .await?; + TraceSpec::builder() + .services(["client", "router", "subgraph"].into()) + .span_names( + [ + "query_planning", + "client_request", + "subgraph_request", + "subgraph", + "fetch", + "supergraph", + "execution", + "overridden", + "subgraph server", + "http_request", + "parse_query", + ] + .into(), + ) + .build() + .validate_trace(id) + .await?; router.graceful_shutdown().await; Ok(()) } @@ -145,27 +155,27 @@ async fn test_override_span_names_late() -> Result<(), BoxError> { .unwrap(), id.to_datadog() ); - validate_trace( - id, - &query, - Some("ExampleQuery"), - &["client", "router", "subgraph"], - false, - &[ - "query_planning", - "client_request", - "subgraph_request", - "subgraph", - "fetch", - "supergraph", - "execution", - "ExampleQuery", - "subgraph server", - "http_request", - "parse_query", - ], - ) - .await?; + TraceSpec::builder() + .services(["client", "router", "subgraph"].into()) + .span_names( + [ + "query_planning", + "client_request", + "subgraph_request", + "subgraph", + "fetch", + "supergraph", + "execution", + "ExampleQuery", + "subgraph server", + "http_request", + "parse_query", + ] + .into(), + ) + .build() + .validate_trace(id) + .await?; router.graceful_shutdown().await; Ok(()) } @@ -195,26 +205,40 @@ async fn test_basic() -> Result<(), BoxError> { .unwrap(), id.to_datadog() ); - validate_trace( - id, - &query, - Some("ExampleQuery"), - &["client", "router", "subgraph"], - false, - &[ - "query_planning", - "client_request", - "ExampleQuery__products__0", - "products", - "fetch", - "/", - "execution", - "ExampleQuery", - "subgraph server", - "parse_query", - ], - ) - .await?; + TraceSpec::builder() + .operation_name("ExampleQuery") + .services(["client", "router", "subgraph"].into()) + .span_names( + [ + "query_planning", + "client_request", + "ExampleQuery__products__0", + "products", + "fetch", + "/", + "execution", + "ExampleQuery", + "subgraph server", + "parse_query", + ] + .into(), + ) + .measured_spans( + [ + "query_planning", + "subgraph", + "http_request", + "subgraph_request", + "router", + "execution", + "supergraph", + "parse_query", + ] + .into(), + ) + .build() + .validate_trace(id) + .await?; router.graceful_shutdown().await; Ok(()) } @@ -242,25 +266,27 @@ async fn test_resource_mapping_default() -> Result<(), BoxError> { .get("apollo-custom-trace-id") .unwrap() .is_empty()); - validate_trace( - id, - &query, - Some("ExampleQuery"), - &["client", "router", "subgraph"], - false, - &[ - "parse_query", - "ExampleQuery", - "client_request", - "execution", - "query_planning", - "products", - "fetch", - "subgraph server", - "ExampleQuery__products__0", - ], - ) - .await?; + TraceSpec::builder() + .operation_name("ExampleQuery") + .services(["client", "router", "subgraph"].into()) + .span_names( + [ + "parse_query", + "/", + "ExampleQuery", + "client_request", + "execution", + "query_planning", + "products", + "fetch", + "subgraph server", + "ExampleQuery__products__0", + ] + .into(), + ) + .build() + .validate_trace(id) + .await?; router.graceful_shutdown().await; Ok(()) } @@ -288,140 +314,74 @@ async fn test_resource_mapping_override() -> Result<(), BoxError> { .get("apollo-custom-trace-id") .unwrap() .is_empty()); - validate_trace( - id, - &query, - Some("ExampleQuery"), - &["client", "router", "subgraph"], - false, - &[ - "parse_query", - "ExampleQuery", - "client_request", - "execution", - "query_planning", - "products", - "fetch", - "subgraph server", - "overridden", - "ExampleQuery__products__0", - ], - ) - .await?; - router.graceful_shutdown().await; - Ok(()) -} - -async fn validate_trace( - id: TraceId, - query: &Value, - operation_name: Option<&str>, - services: &[&'static str], - custom_span_instrumentation: bool, - expected_span_names: &[&'static str], -) -> Result<(), BoxError> { - let datadog_id = id.to_datadog(); - let url = format!("http://localhost:8126/test/traces?trace_ids={datadog_id}"); - for _ in 0..10 { - if find_valid_trace( - &url, - query, - operation_name, - services, - custom_span_instrumentation, - expected_span_names, + TraceSpec::builder() + .services(["client", "router", "subgraph"].into()) + .span_names( + [ + "parse_query", + "ExampleQuery", + "client_request", + "execution", + "query_planning", + "products", + "fetch", + "subgraph server", + "overridden", + "ExampleQuery__products__0", + ] + .into(), ) - .await - .is_ok() - { - return Ok(()); - } - tokio::time::sleep(Duration::from_millis(100)).await; - } - find_valid_trace( - &url, - query, - operation_name, - services, - custom_span_instrumentation, - expected_span_names, - ) - .await?; - Ok(()) -} - -async fn find_valid_trace( - url: &str, - _query: &Value, - operation_name: Option<&str>, - services: &[&'static str], - _custom_span_instrumentation: bool, - expected_span_names: &[&'static str], -) -> Result<(), BoxError> { - // A valid trace has: - // * All three services - // * The correct spans - // * All spans are parented - // * Required attributes of 'router' span has been set - - // For now just validate service name. - let trace: Value = reqwest::get(url) - .await - .map_err(|e| anyhow!("failed to contact datadog; {}", e))? - .json() + .build() + .validate_trace(id) .await?; - tracing::debug!("{}", serde_json::to_string_pretty(&trace)?); - verify_trace_participants(&trace, services)?; - verify_spans_present(&trace, operation_name, services, expected_span_names)?; + router.graceful_shutdown().await; Ok(()) } -fn verify_trace_participants(trace: &Value, services: &[&'static str]) -> Result<(), BoxError> { - let actual_services: HashSet = trace - .select_path("$..service")? - .into_iter() - .filter_map(|service| service.as_string()) - .collect(); - tracing::debug!("found services {:?}", actual_services); - - let expected_services = services - .iter() - .map(|s| s.to_string()) - .collect::>(); - if actual_services != expected_services { - return Err(BoxError::from(format!( - "incomplete traces, got {actual_services:?} expected {expected_services:?}" - ))); +#[tokio::test(flavor = "multi_thread")] +async fn test_span_metrics() -> Result<(), BoxError> { + if !graph_os_enabled() { + return Ok(()); } - Ok(()) -} + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Datadog) + .config(include_str!("fixtures/disable_span_metrics.router.yaml")) + .build() + .await; -fn verify_spans_present( - trace: &Value, - _operation_name: Option<&str>, - services: &[&'static str], - expected_span_names: &[&'static str], -) -> Result<(), BoxError> { - let operation_names: HashSet = trace - .select_path("$..resource")? - .into_iter() - .filter_map(|span_name| span_name.as_string()) - .collect(); - let mut expected_span_names: HashSet = - expected_span_names.iter().map(|s| s.to_string()).collect(); - if services.contains(&"client") { - expected_span_names.insert("client_request".into()); - } - tracing::debug!("found spans {:?}", operation_names); - let missing_operation_names: Vec<_> = expected_span_names - .iter() - .filter(|o| !operation_names.contains(*o)) - .collect(); - if !missing_operation_names.is_empty() { - return Err(BoxError::from(format!( - "spans did not match, got {operation_names:?}, missing {missing_operation_names:?}" - ))); - } + router.start().await; + router.assert_started().await; + + let query = json!({"query":"query ExampleQuery {topProducts{name}}","variables":{}}); + let (id, result) = router.execute_query(&query).await; + assert!(!result + .headers() + .get("apollo-custom-trace-id") + .unwrap() + .is_empty()); + TraceSpec::builder() + .operation_name("ExampleQuery") + .services(["client", "router", "subgraph"].into()) + .span_names( + [ + "parse_query", + "ExampleQuery", + "client_request", + "execution", + "query_planning", + "products", + "fetch", + "subgraph server", + "ExampleQuery__products__0", + ] + .into(), + ) + .measured_span("subgraph") + .unmeasured_span("supergraph") + .build() + .validate_trace(id) + .await?; + router.graceful_shutdown().await; Ok(()) } @@ -434,3 +394,200 @@ impl DatadogId for TraceId { u64::from_be_bytes(bytes.try_into().unwrap()).to_string() } } + +impl TraceSpec { + #[allow(clippy::too_many_arguments)] + async fn validate_trace(&self, id: TraceId) -> Result<(), BoxError> { + let datadog_id = id.to_datadog(); + let url = format!("http://localhost:8126/test/traces?trace_ids={datadog_id}"); + for _ in 0..10 { + if self.find_valid_trace(&url).await.is_ok() { + return Ok(()); + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + self.find_valid_trace(&url).await?; + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + async fn find_valid_trace(&self, url: &str) -> Result<(), BoxError> { + // A valid trace has: + // * All three services + // * The correct spans + // * All spans are parented + // * Required attributes of 'router' span has been set + + // For now just validate service name. + let trace: Value = reqwest::get(url) + .await + .map_err(|e| anyhow!("failed to contact datadog; {}", e))? + .json() + .await?; + tracing::debug!("{}", serde_json::to_string_pretty(&trace)?); + self.verify_trace_participants(&trace)?; + self.verify_operation_name(&trace)?; + self.verify_priority_sampled(&trace)?; + self.verify_version(&trace)?; + self.verify_spans_present(&trace)?; + self.validate_span_kinds(&trace)?; + self.validate_measured_spans(&trace)?; + Ok(()) + } + + fn verify_version(&self, trace: &Value) -> Result<(), BoxError> { + if let Some(expected_version) = &self.version { + let binding = trace.select_path("$..version")?; + let version = binding.first(); + assert_eq!( + version + .expect("version expected") + .as_str() + .expect("version must be a string"), + expected_version + ); + } + Ok(()) + } + + fn validate_measured_spans(&self, trace: &Value) -> Result<(), BoxError> { + for expected in &self.measured_spans { + assert!( + self.measured_span(trace, expected)?, + "missing measured span {}", + expected + ); + } + for unexpected in &self.unmeasured_spans { + assert!( + !self.measured_span(trace, unexpected)?, + "unexpected measured span {}", + unexpected + ); + } + Ok(()) + } + + fn measured_span(&self, trace: &Value, name: &str) -> Result { + let binding1 = trace.select_path(&format!( + "$..[?(@.meta.['otel.original_name'] == '{}')].metrics.['_dd.measured']", + name + ))?; + let binding2 = trace.select_path(&format!( + "$..[?(@.name == '{}')].metrics.['_dd.measured']", + name + ))?; + Ok(binding1 + .first() + .or(binding2.first()) + .and_then(|v| v.as_f64()) + .map(|v| v == 1.0) + .unwrap_or_default()) + } + + fn validate_span_kinds(&self, trace: &Value) -> Result<(), BoxError> { + // Validate that the span.kind has been propagated. We can just do this for a selection of spans. + self.validate_span_kind(trace, "router", "server")?; + self.validate_span_kind(trace, "supergraph", "internal")?; + self.validate_span_kind(trace, "http_request", "client")?; + Ok(()) + } + + fn verify_trace_participants(&self, trace: &Value) -> Result<(), BoxError> { + let actual_services: HashSet = trace + .select_path("$..service")? + .into_iter() + .filter_map(|service| service.as_string()) + .collect(); + tracing::debug!("found services {:?}", actual_services); + + let expected_services = self + .services + .iter() + .map(|s| s.to_string()) + .collect::>(); + if actual_services != expected_services { + return Err(BoxError::from(format!( + "incomplete traces, got {actual_services:?} expected {expected_services:?}" + ))); + } + Ok(()) + } + + fn verify_spans_present(&self, trace: &Value) -> Result<(), BoxError> { + let operation_names: HashSet = trace + .select_path("$..resource")? + .into_iter() + .filter_map(|span_name| span_name.as_string()) + .collect(); + let mut span_names: HashSet<&str> = self.span_names.clone(); + if self.services.contains("client") { + span_names.insert("client_request"); + } + tracing::debug!("found spans {:?}", operation_names); + let missing_operation_names: Vec<_> = span_names + .iter() + .filter(|o| !operation_names.contains(**o)) + .collect(); + if !missing_operation_names.is_empty() { + return Err(BoxError::from(format!( + "spans did not match, got {operation_names:?}, missing {missing_operation_names:?}" + ))); + } + Ok(()) + } + + fn validate_span_kind(&self, trace: &Value, name: &str, kind: &str) -> Result<(), BoxError> { + let binding1 = trace.select_path(&format!( + "$..[?(@.meta.['otel.original_name'] == '{}')].meta.['span.kind']", + name + ))?; + let binding2 = + trace.select_path(&format!("$..[?(@.name == '{}')].meta.['span.kind']", name))?; + let binding = binding1.first().or(binding2.first()); + + assert!( + binding.is_some(), + "span.kind missing or incorrect {}, {}", + name, + trace + ); + assert_eq!( + binding + .expect("expected binding") + .as_str() + .expect("expected string"), + kind + ); + Ok(()) + } + + fn verify_operation_name(&self, trace: &Value) -> Result<(), BoxError> { + if let Some(expected_operation_name) = &self.operation_name { + let binding = + trace.select_path("$..[?(@.name == 'supergraph')]..['graphql.operation.name']")?; + let operation_name = binding.first(); + assert_eq!( + operation_name + .expect("graphql.operation.name expected") + .as_str() + .expect("graphql.operation.name must be a string"), + expected_operation_name + ); + } + Ok(()) + } + + fn verify_priority_sampled(&self, trace: &Value) -> Result<(), BoxError> { + let binding = trace.select_path("$.._sampling_priority_v1")?; + let sampling_priority = binding.first(); + assert_eq!( + sampling_priority + .expect("sampling priority expected") + .as_f64() + .expect("sampling priority must be a number"), + 1.0 + ); + Ok(()) + } +} diff --git a/apollo-router/tests/integration/telemetry/fixtures/datadog.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/datadog.router.yaml index 18294a77ff..f95964ae6d 100644 --- a/apollo-router/tests/integration/telemetry/fixtures/datadog.router.yaml +++ b/apollo-router/tests/integration/telemetry/fixtures/datadog.router.yaml @@ -7,6 +7,9 @@ telemetry: format: datadog common: service_name: router + resource: + env: local1 + service.version: router_version_override datadog: enabled: true batch_processor: diff --git a/apollo-router/tests/integration/telemetry/fixtures/datadog_resource_mapping_default.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/datadog_resource_mapping_default.router.yaml index 396a60fa5d..96160b1831 100644 --- a/apollo-router/tests/integration/telemetry/fixtures/datadog_resource_mapping_default.router.yaml +++ b/apollo-router/tests/integration/telemetry/fixtures/datadog_resource_mapping_default.router.yaml @@ -12,6 +12,12 @@ telemetry: enable_span_mapping: true batch_processor: scheduled_delay: 100ms + instrumentation: + spans: + mode: spec_compliant + supergraph: + attributes: + graphql.operation.name: true diff --git a/apollo-router/tests/integration/telemetry/fixtures/disable_span_metrics.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/disable_span_metrics.router.yaml new file mode 100644 index 0000000000..0d47070c4a --- /dev/null +++ b/apollo-router/tests/integration/telemetry/fixtures/disable_span_metrics.router.yaml @@ -0,0 +1,26 @@ +telemetry: + exporters: + tracing: + experimental_response_trace_id: + enabled: true + header_name: apollo-custom-trace-id + format: datadog + common: + service_name: router + datadog: + enabled: true + batch_processor: + scheduled_delay: 100ms + span_metrics: + supergraph: false + + instrumentation: + spans: + mode: spec_compliant + supergraph: + attributes: + graphql.operation.name: true + + + + diff --git a/apollo-router/tests/integration/telemetry/jaeger.rs b/apollo-router/tests/integration/telemetry/jaeger.rs index 6003efea04..fcf59e4ef5 100644 --- a/apollo-router/tests/integration/telemetry/jaeger.rs +++ b/apollo-router/tests/integration/telemetry/jaeger.rs @@ -10,8 +10,8 @@ use serde_json::Value; use tower::BoxError; use crate::integration::common::Telemetry; -use crate::integration::common::ValueExt; use crate::integration::IntegrationTest; +use crate::integration::ValueExt; #[tokio::test(flavor = "multi_thread")] async fn test_reload() -> Result<(), BoxError> { @@ -342,7 +342,6 @@ async fn validate_trace( .finish(); let id = id.to_string(); - println!("trace id: {}", id); let url = format!("http://localhost:16686/api/traces/{id}?{params}"); for _ in 0..10 { if find_valid_trace( @@ -428,6 +427,12 @@ fn verify_router_span_fields( .first(), Some(&&Value::String("1.0".to_string())) ); + assert!(router_span + .select_path("$.logs[*].fields[?(@.key == 'histogram.apollo_router_span')].value")? + .is_empty(),); + assert!(router_span + .select_path("$.logs[*].fields[?(@.key == 'histogram.apollo_router_span')].value")? + .is_empty(),); if custom_span_instrumentation { assert_eq!( router_span diff --git a/apollo-router/tests/integration/telemetry/otlp.rs b/apollo-router/tests/integration/telemetry/otlp.rs index 48d5f08e37..7eae04f567 100644 --- a/apollo-router/tests/integration/telemetry/otlp.rs +++ b/apollo-router/tests/integration/telemetry/otlp.rs @@ -19,8 +19,8 @@ use wiremock::MockServer; use wiremock::ResponseTemplate; use crate::integration::common::Telemetry; -use crate::integration::common::ValueExt; use crate::integration::IntegrationTest; +use crate::integration::ValueExt; #[tokio::test(flavor = "multi_thread")] async fn test_basic() -> Result<(), BoxError> { diff --git a/apollo-router/tests/integration/telemetry/zipkin.rs b/apollo-router/tests/integration/telemetry/zipkin.rs index 792a637a88..c0d5e0a8d5 100644 --- a/apollo-router/tests/integration/telemetry/zipkin.rs +++ b/apollo-router/tests/integration/telemetry/zipkin.rs @@ -10,8 +10,8 @@ use serde_json::Value; use tower::BoxError; use crate::integration::common::Telemetry; -use crate::integration::common::ValueExt; use crate::integration::IntegrationTest; +use crate::integration::ValueExt; #[tokio::test(flavor = "multi_thread")] async fn test_basic() -> Result<(), BoxError> { diff --git a/apollo-router/tests/integration_tests.rs b/apollo-router/tests/integration_tests.rs index c35d17ae29..4f99c0602d 100644 --- a/apollo-router/tests/integration_tests.rs +++ b/apollo-router/tests/integration_tests.rs @@ -28,7 +28,6 @@ use http::Uri; use maplit::hashmap; use mime::APPLICATION_JSON; use serde_json_bytes::json; -use serde_json_bytes::Value; use tower::BoxError; use tower::ServiceExt; use walkdir::DirEntry; @@ -468,7 +467,7 @@ async fn persisted_queries() { assert_eq!( actual.errors, vec![apollo_router::graphql::Error::builder() - .message(&format!( + .message(format!( "Persisted query '{UNKNOWN_QUERY_ID}' not found in the persisted query list" )) .extension_code("PERSISTED_QUERY_NOT_IN_LIST") @@ -1291,52 +1290,6 @@ impl Plugin for CountingServiceRegistry { } } -trait ValueExt { - fn eq_and_ordered(&self, other: &Self) -> bool; -} - -impl ValueExt for Value { - fn eq_and_ordered(&self, other: &Self) -> bool { - match (self, other) { - (Value::Object(a), Value::Object(b)) => { - let mut it_a = a.iter(); - let mut it_b = b.iter(); - - loop { - match (it_a.next(), it_b.next()) { - (Some(_), None) | (None, Some(_)) => break false, - (None, None) => break true, - (Some((field_a, value_a)), Some((field_b, value_b))) - if field_a == field_b && ValueExt::eq_and_ordered(value_a, value_b) => - { - continue - } - (Some(_), Some(_)) => break false, - } - } - } - (Value::Array(a), Value::Array(b)) => { - let mut it_a = a.iter(); - let mut it_b = b.iter(); - - loop { - match (it_a.next(), it_b.next()) { - (Some(_), None) | (None, Some(_)) => break false, - (None, None) => break true, - (Some(value_a), Some(value_b)) - if ValueExt::eq_and_ordered(value_a, value_b) => - { - continue - } - (Some(_), Some(_)) => break false, - } - } - } - (a, b) => a == b, - } - } -} - #[tokio::test(flavor = "multi_thread")] async fn all_stock_router_example_yamls_are_valid() { let example_dir = concat!(env!("CARGO_MANIFEST_DIR"), "/../examples"); diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/README.md b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/README.md new file mode 100644 index 0000000000..15f004693c --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/README.md @@ -0,0 +1,6 @@ +# Entity cache invalidation + +This tests entity cache invalidation based on entity keys. This is the expected process: +- a query is sent to the router, for which multiple entities will be requested +- we reload the subgraph with a mock mutation where the response has an extension to invalidate one of the entities +- we do the same query, we should see an `_entities` query that only requests that specific entity \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml new file mode 100644 index 0000000000..b297fee443 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml @@ -0,0 +1,17 @@ +override_subgraph_url: + products: http://localhost:4005 +include_subgraph_errors: + all: true + +preview_entity_cache: + enabled: true + redis: + urls: + ["redis://localhost:6379",] + subgraph: + all: + enabled: true + subgraphs: + reviews: + ttl: 120s + enabled: true \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json new file mode 100644 index 0000000000..b505259570 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json @@ -0,0 +1,239 @@ +{ + "enterprise": true, + "redis": true, + "actions": [ + { + "type": "Start", + "schema_path": "./supergraph.graphql", + "configuration_path": "./configuration.yaml", + "subgraphs": { + "products": { + "requests": [ + { + "request": { + "body": {"query":"{topProducts{__typename upc}}"} + }, + "response": { + "headers": { + "Cache-Control": "public, max-age=10", + "Content-Type": "application/json" + }, + "body": {"data": { "topProducts": [{ "__typename": "Product", "upc": "0" }, { "__typename": "Product", "upc": "1"} ] } } + } + } + ] + }, + "reviews": { + "requests": [ + { + "request": { + "body": { + "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "variables":{"representations":[{"upc":"0","__typename":"Product"},{"upc":"1","__typename":"Product"}]} + } + }, + "response": { + "headers": { + "Cache-Control": "public, max-age=10", + "Content-Type": "application/json" + }, + "body": {"data": { "_entities": [ + { + "reviews": [ + { "body": "A"}, + { "body": "B"} + ] + }, + { + "reviews": [ + { "body": "C"} + ] + }] + }} + } + } + ] + } + } + }, + { + "type": "Request", + "request": { + "query": "{ topProducts { reviews { body } } }" + }, + "expected_response": { + "data":{ + "topProducts": [{ + "reviews": [{ + "body": "A" + },{ + "body": "B" + }] + }, + { + "reviews": [{ + "body": "C" + }] + }] + } + } + }, + { + "type": "ReloadSubgraphs", + "subgraphs": { + "reviews": { + "requests": [ + { + "request": { + "body": {"query":"mutation{invalidateProductReview}"} + }, + "response": { + "headers": { + "Content-Type": "application/json" + }, + "body": { + "data": { "invalidateProductReview": 1 }, + "extensions": { + "invalidation": [{ + "kind": "entity", + "subgraph": "reviews", + "type": "Product", + "key": { + "upc": "1" + } + }] + } + } + } + }, + { + "request": { + "body": { + "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "variables":{"representations":[{"upc":"1","__typename":"Product"}]} + } + }, + "response": { + "status": 500, + "headers": { + "Cache-Control": "public, max-age=10", + "Content-Type": "application/json" + }, + "body": {} + } + } + ] + } + } + }, + { + "type": "Request", + "request": { + "query": "{ topProducts { reviews { body } } }" + }, + "expected_response": { + "data":{ + "topProducts": [{ + "reviews": [{ + "body": "A" + },{ + "body": "B" + }] + }, + { + "reviews": [{ + "body": "C" + }] + }] + } + } + }, + { + "type": "Request", + "request": { + "query": "mutation { invalidateProductReview }" + }, + "expected_response": { + "data":{ + "invalidateProductReview": 1 + } + } + }, + { + "type": "Request", + "request": { + "query": "{ topProducts { reviews { body } } }" + }, + "expected_response":{ + "data":{ + "topProducts":[{"reviews":null},{"reviews":null}] + }, + "errors":[ + { + "message":"HTTP fetch failed from 'reviews': 500: Internal Server Error", + "extensions":{"code":"SUBREQUEST_HTTP_ERROR","service":"reviews","reason":"500: Internal Server Error","http":{"status":500}} + }, + { + "message":"service 'reviews' response was malformed: {}", + "extensions":{"service":"reviews","reason":"{}","code":"SUBREQUEST_MALFORMED_RESPONSE"} + } + ] + } + }, + { + "type": "ReloadSubgraphs", + "subgraphs": { + "reviews": { + "requests": [ + { + "request": { + "body": { + "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "variables":{"representations":[{"upc":"1","__typename":"Product"}]} + } + }, + "response": { + "headers": { + "Cache-Control": "public, max-age=10", + "Content-Type": "application/json" + }, + "body": {"data": { "_entities": [ + { + "reviews": [ + { "body": "C"} + ] + }] + }} + } + } + ] + } + } + }, + { + "type": "Request", + "request": { + "query": "{ topProducts { reviews { body } } }" + }, + "expected_response": { + "data":{ + "topProducts": [{ + "reviews": [{ + "body": "A" + },{ + "body": "B" + }] + }, + { + "reviews": [{ + "body": "C" + }] + }] + } + } + }, + { + "type": "Stop" + } + ] +} \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql new file mode 100644 index 0000000000..8f4b1aa05b --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql @@ -0,0 +1,90 @@ + +schema + @core(feature: "https://specs.apollo.dev/core/v0.2"), + @core(feature: "https://specs.apollo.dev/join/v0.1", for: EXECUTION) + @core(feature: "https://specs.apollo.dev/inaccessible/v0.1", for: SECURITY) +{ + query: Query + mutation: Mutation +} + +directive @core(as: String, feature: String!, for: core__Purpose) repeatable on SCHEMA + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet) on FIELD_DEFINITION + +directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on OBJECT | INTERFACE + +directive @join__owner(graph: join__Graph!) on OBJECT | INTERFACE + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @tag(name: String!) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION + +directive @inaccessible on OBJECT | FIELD_DEFINITION | INTERFACE | UNION + +enum core__Purpose { + """ + `EXECUTION` features provide metadata necessary to for operation execution. + """ + EXECUTION + + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY +} + +scalar join__FieldSet + +enum join__Graph { + ACCOUNTS @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") + INVENTORY @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") + PRODUCTS @join__graph(name: "products", url: "https://products.demo.starstuff.dev") + REVIEWS @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") +} +type Mutation { + updateMyAccount: User @join__field(graph: ACCOUNTS) + invalidateProductReview: Int @join__field(graph: REVIEWS) +} + +type Product + @join__owner(graph: PRODUCTS) + @join__type(graph: PRODUCTS, key: "upc") + @join__type(graph: INVENTORY, key: "upc") + @join__type(graph: REVIEWS, key: "upc") +{ + inStock: Boolean @join__field(graph: INVENTORY) @tag(name: "private") @inaccessible + name: String @join__field(graph: PRODUCTS) + price: Int @join__field(graph: PRODUCTS) + reviews: [Review] @join__field(graph: REVIEWS) + reviewsForAuthor(authorID: ID!): [Review] @join__field(graph: REVIEWS) + shippingEstimate: Int @join__field(graph: INVENTORY, requires: "price weight") + upc: String! @join__field(graph: PRODUCTS) + weight: Int @join__field(graph: PRODUCTS) +} + +type Query { + me: User @join__field(graph: ACCOUNTS) + topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS) +} + +type Review + @join__owner(graph: REVIEWS) + @join__type(graph: REVIEWS, key: "id") +{ + author: User @join__field(graph: REVIEWS, provides: "username") + body: String @join__field(graph: REVIEWS) + id: ID! @join__field(graph: REVIEWS) + product: Product @join__field(graph: REVIEWS) +} + +type User + @join__owner(graph: ACCOUNTS) + @join__type(graph: ACCOUNTS, key: "id") + @join__type(graph: REVIEWS, key: "id") +{ + id: ID! @join__field(graph: ACCOUNTS) + name: String @join__field(graph: ACCOUNTS) + reviews: [Review] @join__field(graph: REVIEWS) + username: String @join__field(graph: ACCOUNTS) +} diff --git a/apollo-router/tests/samples_tests.rs b/apollo-router/tests/samples_tests.rs index 605f632c88..4507089a66 100644 --- a/apollo-router/tests/samples_tests.rs +++ b/apollo-router/tests/samples_tests.rs @@ -54,7 +54,7 @@ fn lookup_dir( ); if path.join("plan.json").exists() { - let mut file = File::open(&path.join("plan.json")).map_err(|e| { + let mut file = File::open(path.join("plan.json")).map_err(|e| { format!( "could not open file at path '{:?}': {e}", &path.join("plan.json") diff --git a/dockerfiles/tracing/datadog-subgraph/package-lock.json b/dockerfiles/tracing/datadog-subgraph/package-lock.json index fbdc25d16a..849552a1fd 100644 --- a/dockerfiles/tracing/datadog-subgraph/package-lock.json +++ b/dockerfiles/tracing/datadog-subgraph/package-lock.json @@ -383,9 +383,9 @@ } }, "node_modules/@datadog/native-iast-taint-tracking": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@datadog/native-iast-taint-tracking/-/native-iast-taint-tracking-2.1.0.tgz", - "integrity": "sha512-DjZ6itJcjLrTdKk2vP96hak2xS0ABd0NIB8poZG3OBQU5efkzu8JOQoxbIKMklG/0P2zh7EquvGP88PdVXT9aA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@datadog/native-iast-taint-tracking/-/native-iast-taint-tracking-3.0.0.tgz", + "integrity": "sha512-V+25+edlNCQSNRUvL45IajN+CFEjii9NbjfSMG6HRHbH/zeLL9FCNE+GU88dwB1bqXKNpBdrIxsfgTN65Yq9tA==", "hasInstallScript": true, "license": "Apache-2.0", "dependencies": { @@ -886,15 +886,15 @@ } }, "node_modules/dd-trace": { - "version": "5.17.0", - "resolved": "https://registry.npmjs.org/dd-trace/-/dd-trace-5.17.0.tgz", - "integrity": "sha512-XirOYj5pJFYnm9NHvN5RFcvDyN/XMDS72wqTTnJgTPMbE4Dc28oQIdM2XWNxDtAcxqLnZq0/4DtFebGYzBAIYw==", + "version": "5.18.0", + "resolved": "https://registry.npmjs.org/dd-trace/-/dd-trace-5.18.0.tgz", + "integrity": "sha512-2akfBl6cA2ROWDog3Xfykid/Ep3jf5dzF5YT8XLzyqRB/jmfsGp4pPWxJEVo/SMXYdj9/G307weCJ7X47Mg8sQ==", "hasInstallScript": true, "license": "(Apache-2.0 OR BSD-3-Clause)", "dependencies": { "@datadog/native-appsec": "8.0.1", "@datadog/native-iast-rewriter": "2.3.1", - "@datadog/native-iast-taint-tracking": "2.1.0", + "@datadog/native-iast-taint-tracking": "3.0.0", "@datadog/native-metrics": "^2.0.0", "@datadog/pprof": "5.3.0", "@datadog/sketches-js": "^2.1.0", @@ -903,7 +903,7 @@ "crypto-randomuuid": "^1.0.0", "dc-polyfill": "^0.1.4", "ignore": "^5.2.4", - "import-in-the-middle": "^1.7.4", + "import-in-the-middle": "^1.8.1", "int64-buffer": "^0.1.9", "istanbul-lib-coverage": "3.2.0", "jest-docblock": "^29.7.0", diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index d353c22089..68cb7cdbbd 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.51.0 + image: ghcr.io/apollographql/router:v1.52.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 56d42a866f..254b9e98f8 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.51.0 + image: ghcr.io/apollographql/router:v1.52.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 5b26c558d0..b94e872653 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.51.0 + image: ghcr.io/apollographql/router:v1.52.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/configuration/telemetry/exporters/tracing/datadog.mdx b/docs/source/configuration/telemetry/exporters/tracing/datadog.mdx index 4eaa322b21..0eea7691d9 100644 --- a/docs/source/configuration/telemetry/exporters/tracing/datadog.mdx +++ b/docs/source/configuration/telemetry/exporters/tracing/datadog.mdx @@ -185,6 +185,39 @@ telemetry: my.span.attribute: request_header: x-custom-header ``` +If you have introduced a new span in a custom build of the Router you can enable resource mapping for it by adding it to the `resource_mapping` configuration. + +### `span_metrics` +When set, `span_metrics` allows you to specify which spans will show span metrics in the Datadog APM and Trace view. +By default, span metrics are enabled for: + +* `request` +* `router` +* `supergraph` +* `subgraph` +* `subgraph_request` +* `http_request` +* `query_planning` +* `execution` +* `query_parsing` + +You may override these defaults by specifying `span_metrics` configuration: + +The following will disable span metrics for the supergraph span. +```yaml title="router.yaml" +telemetry: + exporters: + tracing: + datadog: + enabled: true + span_metrics: + # Disable span metrics for supergraph + supergraph: false + # Enable span metrics for my_custom_span + my_custom_span: true +``` + +If you have introduced a new span in a custom build of the Router you can enable span metrics for it by adding it to the `span_metrics` configuration. ### `batch_processor` @@ -209,10 +242,12 @@ telemetry: ## Datadog native configuration reference -| Attribute | Default | Description | -|-----------------------|---------------------------------------|---------------------------------| -| `enabled` | `false` | Enable the OTLP exporter. | -| `enable_span_mapping` | `false` | If span mapping should be used. | -| `endpoint` | `http://localhost:8126/v0.4/traces` | The endpoint to send spans to. | -| `batch_processor` | | The batch processor settings. | +| Attribute | Default | Description | +|-----------------------|-------------------------------------|-----------------------------------------| +| `enabled` | `false` | Enable the OTLP exporter. | +| `enable_span_mapping` | `false` | If span mapping should be used. | +| `endpoint` | `http://localhost:8126/v0.4/traces` | The endpoint to send spans to. | +| `batch_processor` | | The batch processor settings. | +| `resource_mapping` | See [config](#resource_mapping) | A map of span names to attribute names. | +| `span_metrics` | See [config](#span_metrics) | A map of span names to boolean. | diff --git a/docs/source/configuration/telemetry/instrumentation/instruments.mdx b/docs/source/configuration/telemetry/instrumentation/instruments.mdx index f7325120f0..0e612a30a3 100644 --- a/docs/source/configuration/telemetry/instrumentation/instruments.mdx +++ b/docs/source/configuration/telemetry/instrumentation/instruments.mdx @@ -7,8 +7,6 @@ description: Create and customize instruments to collect data and report measure import RouterServices from '../../../../shared/router-lifecycle-services.mdx'; import TelemetryPerformanceNote from '../../../../shared/telemetry-performance.mdx'; - - An _instrument_ in the router collects data and reports measurements to a metric backend. Supported instruments include standard instruments from OpenTelemetry, standard instruments for the router's request lifecycle, and custom instruments. Supported instrument kinds are counters and histograms. You can configure instruments in `router.yaml` with `telemetry.instrumentation.instruments`. diff --git a/docs/source/configuration/telemetry/instrumentation/selectors.mdx b/docs/source/configuration/telemetry/instrumentation/selectors.mdx index bf9b074ea2..59eaca160a 100644 --- a/docs/source/configuration/telemetry/instrumentation/selectors.mdx +++ b/docs/source/configuration/telemetry/instrumentation/selectors.mdx @@ -76,6 +76,7 @@ The subgraph service executes multiple times during query execution, with each e | `subgraph_operation_name` | Yes | `string`\|`hash` | The operation name from the subgraph query | | `subgraph_operation_kind` | No | `string` | The operation kind from the subgraph query | | `subgraph_query` | Yes | `string` | The graphql query to the subgraph | +| `subgraph_name` | No | `true`\|`false` | The subgraph name | | `subgraph_query_variable` | Yes | | The name of a subgraph query variable | | `subgraph_response_data` | Yes | | Json Path into the subgraph response body data (it might impact performance) | | `subgraph_response_errors` | Yes | | Json Path into the subgraph response body errors (it might impact performance) | @@ -92,7 +93,8 @@ The subgraph service executes multiple times during query execution, with each e | `baggage` | Yes | | The name of a baggage item | | `env` | Yes | | The name of an environment variable | | `static` | No | | A static string value | -| `error` | No | `reason` | a string value containing error reason when it's a critical error | +| `error` | No | `reason` | A string value containing error reason when it's a critical error | +| `cache` | No | `hit`\|`miss` | Returns the number of cache hit or miss for this subgraph request | ### GraphQL diff --git a/docs/source/executing-operations/subscription-callback-protocol.mdx b/docs/source/executing-operations/subscription-callback-protocol.mdx index 4624e6ba20..0f4c3e743c 100644 --- a/docs/source/executing-operations/subscription-callback-protocol.mdx +++ b/docs/source/executing-operations/subscription-callback-protocol.mdx @@ -233,13 +233,31 @@ The `next` message includes a `payload` field, which contains the subscription d //highlight-start "payload": { "data": { - "numberIncremented": 5 + "getLivePriceUpdates": { + "__typename": "Stock", + "price": 5 + } } } //highlight-end } ``` +> As this is a recent feature, if you are using a library that does not yet + support subscription callbacks (or you are rolling your own implementation), + you will need to make the payload look exactly how the response to a regular + query would. + + * The root level key needs to be the name of the subscription operation. + * The initial subscription query will contain the exact fields that the router + expects to be included. Specifically, if there are requests for + `__typename`, those need to be respected, otherwise the subscription may + not behave as intended. + + Both bullets above are things that are typically handled by server libraries + automatically, so they may not be obvious when implementing the protocol + manually. + ### `complete` **Emitter** sends a `complete` message to **Router** to terminate an active subscription. **Emitter** might terminate a subscription for the following reasons: diff --git a/examples/supergraph-sdl/rust/Cargo.toml b/examples/supergraph-sdl/rust/Cargo.toml index 5103ff756d..2326e049e7 100644 --- a/examples/supergraph-sdl/rust/Cargo.toml +++ b/examples/supergraph-sdl/rust/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] anyhow = "1" -apollo-compiler = "=1.0.0-beta.18" +apollo-compiler = "=1.0.0-beta.19" apollo-router = { path = "../../../apollo-router" } async-trait = "0.1" tower = { version = "0.4", features = ["full"] } diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index a4e22b5d20..928b68ed73 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -13,7 +13,7 @@ cargo-fuzz = true libfuzzer-sys = "0.4" apollo-compiler.workspace = true apollo-parser = "0.7.6" -apollo-smith = "0.8.0" +apollo-smith = "0.9.0" env_logger = "0.10.2" log = "0.4" reqwest = { workspace = true, features = ["json", "blocking"] } diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 3e98c25b35..02d1f131bd 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.51.0 +version: 1.52.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.51.0" +appVersion: "v1.52.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 9282d0e4f4..a1940b4b28 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.51.0](https://img.shields.io/badge/Version-1.51.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.51.0](https://img.shields.io/badge/AppVersion-v1.51.0-informational?style=flat-square) +![Version: 1.52.0](https://img.shields.io/badge/Version-1.52.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.52.0](https://img.shields.io/badge/AppVersion-v1.52.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.51.0 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.52.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.51.0 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.51.0 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.52.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ @@ -67,6 +67,7 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | lifecycle | object | `{}` | | | managedFederation.apiKey | string | `nil` | If using managed federation, the graph API key to identify router to Studio | | managedFederation.existingSecret | string | `nil` | If using managed federation, use existing Secret which stores the graph API key instead of creating a new one. If set along `managedFederation.apiKey`, a secret with the graph API key will be created using this parameter as name | +| managedFederation.existingSecretKeyRefKey | string | `nil` | If using managed federation, the name of the key within the existing Secret which stores the graph API key. If set along `managedFederation.apiKey`, a secret with the graph API key will be created using this parameter as key, defaults to using a key of `managedFederationApiKey` | | managedFederation.graphRef | string | `""` | If using managed federation, the variant of which graph to use | | nameOverride | string | `""` | | | nodeSelector | object | `{}` | | @@ -94,5 +95,3 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | topologySpreadConstraints | list | `[]` | Sets the [topology spread constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/) for Deployment pods | | virtualservice.enabled | bool | `false` | | ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/helm/chart/router/templates/deployment.yaml b/helm/chart/router/templates/deployment.yaml index 6a09cf521b..d6af1a19b4 100644 --- a/helm/chart/router/templates/deployment.yaml +++ b/helm/chart/router/templates/deployment.yaml @@ -76,7 +76,7 @@ spec: valueFrom: secretKeyRef: name: {{ template "router.managedFederation.apiSecretName" . }} - key: managedFederationApiKey + key: {{ (.Values.managedFederation.existingSecretKeyRefKey | default "managedFederationApiKey") }} optional: true {{- end }} {{- if not (empty .Values.managedFederation.graphRef) }} @@ -114,12 +114,12 @@ spec: {{- toYaml .Values.lifecycle | nindent 12 }} livenessProbe: httpGet: - path: "/health?live" + path: {{ (.Values.router.configuration.health_check.path | default "/health") }}{{"?live"}} port: {{ splitList ":" ((index .Values.router.configuration "health_check").listen | default ":8088") | last }} initialDelaySeconds: {{ ((.Values.probes).liveness).initialDelaySeconds | default 0 }} readinessProbe: httpGet: - path: "/health?ready" + path: {{ (.Values.router.configuration.health_check.path | default "/health") }}{{"?ready"}} port: {{ (splitList ":" ((index .Values.router.configuration "health_check").listen | default ":8088")) | last }} initialDelaySeconds: {{ ((.Values.probes).readiness).initialDelaySeconds | default 0 }} resources: diff --git a/helm/chart/router/templates/secret.yaml b/helm/chart/router/templates/secret.yaml index a5a921f716..70e1b44a1b 100644 --- a/helm/chart/router/templates/secret.yaml +++ b/helm/chart/router/templates/secret.yaml @@ -9,5 +9,5 @@ metadata: {{- include "apollographql.templatizeExtraLabels" . | trim | nindent 4 }} {{- end }} data: - managedFederationApiKey: {{ default "MISSING" .Values.managedFederation.apiKey | b64enc | quote }} + {{ (.Values.managedFederation.existingSecretKeyRefKey | default "managedFederationApiKey") }}: {{ default "MISSING" .Values.managedFederation.apiKey | b64enc | quote }} {{- end }} diff --git a/helm/chart/router/templates/service.yaml b/helm/chart/router/templates/service.yaml index 2062ab8f5f..9cc426588b 100644 --- a/helm/chart/router/templates/service.yaml +++ b/helm/chart/router/templates/service.yaml @@ -22,6 +22,13 @@ spec: targetPort: health protocol: TCP name: health + {{- /* Should we expose an entity cache invalidation port? */}} + {{- if and (.Values.router.configuration.preview_entity_cache).enabled (.Values.router.configuration.preview_entity_cache).invalidation }} + - port: {{ (splitList ":" .Values.router.configuration.preview_entity_cache.invalidation.listen | last) }} + targetPort: invalidation + protocol: TCP + name: invalidation + {{- end }} {{- if .Values.serviceMonitor.enabled }} {{/* NOTE: metrics configuration moved under telemetry.exporters in Router 1.35.0 */}} {{- if .Values.router.configuration.telemetry.exporters.metrics.prometheus.listen }} diff --git a/helm/chart/router/values.yaml b/helm/chart/router/values.yaml index 3e67a224f1..b90f46dc66 100644 --- a/helm/chart/router/values.yaml +++ b/helm/chart/router/values.yaml @@ -21,6 +21,9 @@ managedFederation: # -- If using managed federation, use existing Secret which stores the graph API key instead of creating a new one. # If set along `managedFederation.apiKey`, a secret with the graph API key will be created using this parameter as name existingSecret: + # -- If using managed federation, the name of the key within the existing Secret which stores the graph API key. + # If set along `managedFederation.apiKey`, a secret with the graph API key will be created using this parameter as key, defaults to using a key of `managedFederationApiKey` + existingSecretKeyRefKey: # -- If using managed federation, the variant of which graph to use graphRef: "" diff --git a/licenses.html b/licenses.html index 1c32ce0094..f4c1083d5b 100644 --- a/licenses.html +++ b/licenses.html @@ -39,20 +39,20 @@

Third Party Licenses

-

This page lists the licenses of the dependencies used in the Apollo Router Core.

+

This page lists the licenses of the dependencies used in the Apollo router.

Overview of licenses:

@@ -1506,6 +1506,7 @@

Used by:

  • encoding_rs
  • fragile
  • static_assertions
  • +
  • tinyvec
  • zeroize
  • @@ -1716,12 +1717,12 @@ 

    Used by:

    Apache License 2.0

    Used by:

                                      Apache License
                                Version 2.0, January 2004
    -                        http://www.apache.org/licenses/
    +                        https://www.apache.org/licenses/
     
        TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
     
    @@ -1895,31 +1896,6 @@ 

    Used by:

    of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License.
  • @@ -2127,6 +2103,7 @@

    Used by:

  • windows_i686_gnu
  • windows_i686_gnu
  • windows_i686_gnu
  • +
  • windows_i686_gnullvm
  • windows_i686_msvc
  • windows_i686_msvc
  • windows_i686_msvc
  • @@ -2978,7 +2955,6 @@

    Used by:

    • opentelemetry
    • opentelemetry-aws
    • -
    • opentelemetry-datadog
    • opentelemetry-http
    • opentelemetry-jaeger
    • opentelemetry-otlp
    • @@ -3198,7 +3174,7 @@

      Used by:

                                       Apache License
      @@ -3622,8 +3598,8 @@ 

      Used by:

    • actix-http
    • actix-macros
    • actix-router
    • -
    • actix-rt
    • -
    • actix-server
    • +
    • actix-rt
    • +
    • actix-server
    • actix-service
    • actix-utils
    • actix-web
    • @@ -3842,6 +3818,7 @@

      Apache License 2.0

      Used by:

                                       Apache License
                                  Version 2.0, January 2004
      @@ -4488,6 +4465,7 @@ 

      Used by:

    • graphql-parser
    • hex
    • humantime
    • +
    • is_terminal_polyfill
    • quick-error
    • resolv-conf
    • scheduled-thread-pool
    • @@ -4910,182 +4888,182 @@

      Used by:

      -
                                       Apache License
      -                           Version 2.0, January 2004
      -                        http://www.apache.org/licenses/
      -
      -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
      -
      -   1. Definitions.
      -
      -      "License" shall mean the terms and conditions for use, reproduction,
      -      and distribution as defined by Sections 1 through 9 of this document.
      -
      -      "Licensor" shall mean the copyright owner or entity authorized by
      -      the copyright owner that is granting the License.
      -
      -      "Legal Entity" shall mean the union of the acting entity and all
      -      other entities that control, are controlled by, or are under common
      -      control with that entity. For the purposes of this definition,
      -      "control" means (i) the power, direct or indirect, to cause the
      -      direction or management of such entity, whether by contract or
      -      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      -      outstanding shares, or (iii) beneficial ownership of such entity.
      -
      -      "You" (or "Your") shall mean an individual or Legal Entity
      -      exercising permissions granted by this License.
      -
      -      "Source" form shall mean the preferred form for making modifications,
      -      including but not limited to software source code, documentation
      -      source, and configuration files.
      -
      -      "Object" form shall mean any form resulting from mechanical
      -      transformation or translation of a Source form, including but
      -      not limited to compiled object code, generated documentation,
      -      and conversions to other media types.
      -
      -      "Work" shall mean the work of authorship, whether in Source or
      -      Object form, made available under the License, as indicated by a
      -      copyright notice that is included in or attached to the work
      -      (an example is provided in the Appendix below).
      -
      -      "Derivative Works" shall mean any work, whether in Source or Object
      -      form, that is based on (or derived from) the Work and for which the
      -      editorial revisions, annotations, elaborations, or other modifications
      -      represent, as a whole, an original work of authorship. For the purposes
      -      of this License, Derivative Works shall not include works that remain
      -      separable from, or merely link (or bind by name) to the interfaces of,
      -      the Work and Derivative Works thereof.
      -
      -      "Contribution" shall mean any work of authorship, including
      -      the original version of the Work and any modifications or additions
      -      to that Work or Derivative Works thereof, that is intentionally
      -      submitted to Licensor for inclusion in the Work by the copyright owner
      -      or by an individual or Legal Entity authorized to submit on behalf of
      -      the copyright owner. For the purposes of this definition, "submitted"
      -      means any form of electronic, verbal, or written communication sent
      -      to the Licensor or its representatives, including but not limited to
      -      communication on electronic mailing lists, source code control systems,
      -      and issue tracking systems that are managed by, or on behalf of, the
      -      Licensor for the purpose of discussing and improving the Work, but
      -      excluding communication that is conspicuously marked or otherwise
      -      designated in writing by the copyright owner as "Not a Contribution."
      -
      -      "Contributor" shall mean Licensor and any individual or Legal Entity
      -      on behalf of whom a Contribution has been received by Licensor and
      -      subsequently incorporated within the Work.
      -
      -   2. Grant of Copyright License. Subject to the terms and conditions of
      -      this License, each Contributor hereby grants to You a perpetual,
      -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      -      copyright license to reproduce, prepare Derivative Works of,
      -      publicly display, publicly perform, sublicense, and distribute the
      -      Work and such Derivative Works in Source or Object form.
      -
      -   3. Grant of Patent License. Subject to the terms and conditions of
      -      this License, each Contributor hereby grants to You a perpetual,
      -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      -      (except as stated in this section) patent license to make, have made,
      -      use, offer to sell, sell, import, and otherwise transfer the Work,
      -      where such license applies only to those patent claims licensable
      -      by such Contributor that are necessarily infringed by their
      -      Contribution(s) alone or by combination of their Contribution(s)
      -      with the Work to which such Contribution(s) was submitted. If You
      -      institute patent litigation against any entity (including a
      -      cross-claim or counterclaim in a lawsuit) alleging that the Work
      -      or a Contribution incorporated within the Work constitutes direct
      -      or contributory patent infringement, then any patent licenses
      -      granted to You under this License for that Work shall terminate
      -      as of the date such litigation is filed.
      -
      -   4. Redistribution. You may reproduce and distribute copies of the
      -      Work or Derivative Works thereof in any medium, with or without
      -      modifications, and in Source or Object form, provided that You
      -      meet the following conditions:
      -
      -      (a) You must give any other recipients of the Work or
      -          Derivative Works a copy of this License; and
      -
      -      (b) You must cause any modified files to carry prominent notices
      -          stating that You changed the files; and
      -
      -      (c) You must retain, in the Source form of any Derivative Works
      -          that You distribute, all copyright, patent, trademark, and
      -          attribution notices from the Source form of the Work,
      -          excluding those notices that do not pertain to any part of
      -          the Derivative Works; and
      -
      -      (d) If the Work includes a "NOTICE" text file as part of its
      -          distribution, then any Derivative Works that You distribute must
      -          include a readable copy of the attribution notices contained
      -          within such NOTICE file, excluding those notices that do not
      -          pertain to any part of the Derivative Works, in at least one
      -          of the following places: within a NOTICE text file distributed
      -          as part of the Derivative Works; within the Source form or
      -          documentation, if provided along with the Derivative Works; or,
      -          within a display generated by the Derivative Works, if and
      -          wherever such third-party notices normally appear. The contents
      -          of the NOTICE file are for informational purposes only and
      -          do not modify the License. You may add Your own attribution
      -          notices within Derivative Works that You distribute, alongside
      -          or as an addendum to the NOTICE text from the Work, provided
      -          that such additional attribution notices cannot be construed
      -          as modifying the License.
      -
      -      You may add Your own copyright statement to Your modifications and
      -      may provide additional or different license terms and conditions
      -      for use, reproduction, or distribution of Your modifications, or
      -      for any such Derivative Works as a whole, provided Your use,
      -      reproduction, and distribution of the Work otherwise complies with
      -      the conditions stated in this License.
      -
      -   5. Submission of Contributions. Unless You explicitly state otherwise,
      -      any Contribution intentionally submitted for inclusion in the Work
      -      by You to the Licensor shall be under the terms and conditions of
      -      this License, without any additional terms or conditions.
      -      Notwithstanding the above, nothing herein shall supersede or modify
      -      the terms of any separate license agreement you may have executed
      -      with Licensor regarding such Contributions.
      -
      -   6. Trademarks. This License does not grant permission to use the trade
      -      names, trademarks, service marks, or product names of the Licensor,
      -      except as required for reasonable and customary use in describing the
      -      origin of the Work and reproducing the content of the NOTICE file.
      -
      -   7. Disclaimer of Warranty. Unless required by applicable law or
      -      agreed to in writing, Licensor provides the Work (and each
      -      Contributor provides its Contributions) on an "AS IS" BASIS,
      -      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      -      implied, including, without limitation, any warranties or conditions
      -      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      -      PARTICULAR PURPOSE. You are solely responsible for determining the
      -      appropriateness of using or redistributing the Work and assume any
      -      risks associated with Your exercise of permissions under this License.
      -
      -   8. Limitation of Liability. In no event and under no legal theory,
      -      whether in tort (including negligence), contract, or otherwise,
      -      unless required by applicable law (such as deliberate and grossly
      -      negligent acts) or agreed to in writing, shall any Contributor be
      -      liable to You for damages, including any direct, indirect, special,
      -      incidental, or consequential damages of any character arising as a
      -      result of this License or out of the use or inability to use the
      -      Work (including but not limited to damages for loss of goodwill,
      -      work stoppage, computer failure or malfunction, or any and all
      -      other commercial damages or losses), even if such Contributor
      -      has been advised of the possibility of such damages.
      -
      -   9. Accepting Warranty or Additional Liability. While redistributing
      -      the Work or Derivative Works thereof, You may choose to offer,
      -      and charge a fee for, acceptance of support, warranty, indemnity,
      -      or other liability obligations and/or rights consistent with this
      -      License. However, in accepting such obligations, You may act only
      -      on Your own behalf and on Your sole responsibility, not on behalf
      -      of any other Contributor, and only if You agree to indemnify,
      -      defend, and hold each Contributor harmless for any liability
      -      incurred by, or claims asserted against, such Contributor by reason
      -      of your accepting any such warranty or additional liability.
      -
      -   END OF TERMS AND CONDITIONS
      +                
                                       Apache License
      +                           Version 2.0, January 2004
      +                        http://www.apache.org/licenses/
      +
      +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
      +
      +   1. Definitions.
      +
      +      "License" shall mean the terms and conditions for use, reproduction,
      +      and distribution as defined by Sections 1 through 9 of this document.
      +
      +      "Licensor" shall mean the copyright owner or entity authorized by
      +      the copyright owner that is granting the License.
      +
      +      "Legal Entity" shall mean the union of the acting entity and all
      +      other entities that control, are controlled by, or are under common
      +      control with that entity. For the purposes of this definition,
      +      "control" means (i) the power, direct or indirect, to cause the
      +      direction or management of such entity, whether by contract or
      +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      +      outstanding shares, or (iii) beneficial ownership of such entity.
      +
      +      "You" (or "Your") shall mean an individual or Legal Entity
      +      exercising permissions granted by this License.
      +
      +      "Source" form shall mean the preferred form for making modifications,
      +      including but not limited to software source code, documentation
      +      source, and configuration files.
      +
      +      "Object" form shall mean any form resulting from mechanical
      +      transformation or translation of a Source form, including but
      +      not limited to compiled object code, generated documentation,
      +      and conversions to other media types.
      +
      +      "Work" shall mean the work of authorship, whether in Source or
      +      Object form, made available under the License, as indicated by a
      +      copyright notice that is included in or attached to the work
      +      (an example is provided in the Appendix below).
      +
      +      "Derivative Works" shall mean any work, whether in Source or Object
      +      form, that is based on (or derived from) the Work and for which the
      +      editorial revisions, annotations, elaborations, or other modifications
      +      represent, as a whole, an original work of authorship. For the purposes
      +      of this License, Derivative Works shall not include works that remain
      +      separable from, or merely link (or bind by name) to the interfaces of,
      +      the Work and Derivative Works thereof.
      +
      +      "Contribution" shall mean any work of authorship, including
      +      the original version of the Work and any modifications or additions
      +      to that Work or Derivative Works thereof, that is intentionally
      +      submitted to Licensor for inclusion in the Work by the copyright owner
      +      or by an individual or Legal Entity authorized to submit on behalf of
      +      the copyright owner. For the purposes of this definition, "submitted"
      +      means any form of electronic, verbal, or written communication sent
      +      to the Licensor or its representatives, including but not limited to
      +      communication on electronic mailing lists, source code control systems,
      +      and issue tracking systems that are managed by, or on behalf of, the
      +      Licensor for the purpose of discussing and improving the Work, but
      +      excluding communication that is conspicuously marked or otherwise
      +      designated in writing by the copyright owner as "Not a Contribution."
      +
      +      "Contributor" shall mean Licensor and any individual or Legal Entity
      +      on behalf of whom a Contribution has been received by Licensor and
      +      subsequently incorporated within the Work.
      +
      +   2. Grant of Copyright License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      copyright license to reproduce, prepare Derivative Works of,
      +      publicly display, publicly perform, sublicense, and distribute the
      +      Work and such Derivative Works in Source or Object form.
      +
      +   3. Grant of Patent License. Subject to the terms and conditions of
      +      this License, each Contributor hereby grants to You a perpetual,
      +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      +      (except as stated in this section) patent license to make, have made,
      +      use, offer to sell, sell, import, and otherwise transfer the Work,
      +      where such license applies only to those patent claims licensable
      +      by such Contributor that are necessarily infringed by their
      +      Contribution(s) alone or by combination of their Contribution(s)
      +      with the Work to which such Contribution(s) was submitted. If You
      +      institute patent litigation against any entity (including a
      +      cross-claim or counterclaim in a lawsuit) alleging that the Work
      +      or a Contribution incorporated within the Work constitutes direct
      +      or contributory patent infringement, then any patent licenses
      +      granted to You under this License for that Work shall terminate
      +      as of the date such litigation is filed.
      +
      +   4. Redistribution. You may reproduce and distribute copies of the
      +      Work or Derivative Works thereof in any medium, with or without
      +      modifications, and in Source or Object form, provided that You
      +      meet the following conditions:
      +
      +      (a) You must give any other recipients of the Work or
      +          Derivative Works a copy of this License; and
      +
      +      (b) You must cause any modified files to carry prominent notices
      +          stating that You changed the files; and
      +
      +      (c) You must retain, in the Source form of any Derivative Works
      +          that You distribute, all copyright, patent, trademark, and
      +          attribution notices from the Source form of the Work,
      +          excluding those notices that do not pertain to any part of
      +          the Derivative Works; and
      +
      +      (d) If the Work includes a "NOTICE" text file as part of its
      +          distribution, then any Derivative Works that You distribute must
      +          include a readable copy of the attribution notices contained
      +          within such NOTICE file, excluding those notices that do not
      +          pertain to any part of the Derivative Works, in at least one
      +          of the following places: within a NOTICE text file distributed
      +          as part of the Derivative Works; within the Source form or
      +          documentation, if provided along with the Derivative Works; or,
      +          within a display generated by the Derivative Works, if and
      +          wherever such third-party notices normally appear. The contents
      +          of the NOTICE file are for informational purposes only and
      +          do not modify the License. You may add Your own attribution
      +          notices within Derivative Works that You distribute, alongside
      +          or as an addendum to the NOTICE text from the Work, provided
      +          that such additional attribution notices cannot be construed
      +          as modifying the License.
      +
      +      You may add Your own copyright statement to Your modifications and
      +      may provide additional or different license terms and conditions
      +      for use, reproduction, or distribution of Your modifications, or
      +      for any such Derivative Works as a whole, provided Your use,
      +      reproduction, and distribution of the Work otherwise complies with
      +      the conditions stated in this License.
      +
      +   5. Submission of Contributions. Unless You explicitly state otherwise,
      +      any Contribution intentionally submitted for inclusion in the Work
      +      by You to the Licensor shall be under the terms and conditions of
      +      this License, without any additional terms or conditions.
      +      Notwithstanding the above, nothing herein shall supersede or modify
      +      the terms of any separate license agreement you may have executed
      +      with Licensor regarding such Contributions.
      +
      +   6. Trademarks. This License does not grant permission to use the trade
      +      names, trademarks, service marks, or product names of the Licensor,
      +      except as required for reasonable and customary use in describing the
      +      origin of the Work and reproducing the content of the NOTICE file.
      +
      +   7. Disclaimer of Warranty. Unless required by applicable law or
      +      agreed to in writing, Licensor provides the Work (and each
      +      Contributor provides its Contributions) on an "AS IS" BASIS,
      +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      +      implied, including, without limitation, any warranties or conditions
      +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      +      PARTICULAR PURPOSE. You are solely responsible for determining the
      +      appropriateness of using or redistributing the Work and assume any
      +      risks associated with Your exercise of permissions under this License.
      +
      +   8. Limitation of Liability. In no event and under no legal theory,
      +      whether in tort (including negligence), contract, or otherwise,
      +      unless required by applicable law (such as deliberate and grossly
      +      negligent acts) or agreed to in writing, shall any Contributor be
      +      liable to You for damages, including any direct, indirect, special,
      +      incidental, or consequential damages of any character arising as a
      +      result of this License or out of the use or inability to use the
      +      Work (including but not limited to damages for loss of goodwill,
      +      work stoppage, computer failure or malfunction, or any and all
      +      other commercial damages or losses), even if such Contributor
      +      has been advised of the possibility of such damages.
      +
      +   9. Accepting Warranty or Additional Liability. While redistributing
      +      the Work or Derivative Works thereof, You may choose to offer,
      +      and charge a fee for, acceptance of support, warranty, indemnity,
      +      or other liability obligations and/or rights consistent with this
      +      License. However, in accepting such obligations, You may act only
      +      on Your own behalf and on Your sole responsibility, not on behalf
      +      of any other Contributor, and only if You agree to indemnify,
      +      defend, and hold each Contributor harmless for any liability
      +      incurred by, or claims asserted against, such Contributor by reason
      +      of your accepting any such warranty or additional liability.
      +
      +   END OF TERMS AND CONDITIONS
       
    • @@ -7389,6 +7367,7 @@

      Used by:

    • askama
    • askama_derive
    • askama_escape
    • +
    • askama_parser
                                  Apache License
                             Version 2.0, January 2004
    @@ -8679,7 +8658,8 @@ 

    Used by:

  • debugid
  • derivative
  • derive_arbitrary
  • -
  • either
  • +
  • displaydoc
  • +
  • either
  • env_logger
  • envmnt
  • equivalent
  • @@ -8695,6 +8675,7 @@

    Used by:

  • fraction
  • fsio
  • futures-lite
  • +
  • futures-timer
  • gimli
  • git2
  • glob
  • @@ -8704,7 +8685,7 @@

    Used by:

  • heck
  • heck
  • hermit-abi
  • -
  • hermit-abi
  • +
  • hermit-abi
  • httparse
  • humantime-serde
  • hyper-rustls
  • @@ -8720,7 +8701,6 @@

    Used by:

  • itertools
  • itertools
  • itertools
  • -
  • jobserver
  • js-sys
  • lazy_static
  • libfuzzer-sys
  • @@ -8763,7 +8743,6 @@

    Used by:

  • pest_meta
  • petgraph
  • pkg-config
  • -
  • proc-macro2
  • prost
  • prost-build
  • prost-derive
  • @@ -8771,14 +8750,13 @@

    Used by:

  • prost-types
  • prost-types
  • proteus
  • -
  • quote
  • regex
  • regex-automata
  • regex-lite
  • regex-syntax
  • regex-syntax
  • rowan
  • -
  • rustc-demangle
  • +
  • rustc-demangle
  • rustc-hash
  • rustc_version
  • rustc_version
  • @@ -8788,7 +8766,7 @@

    Used by:

  • rustls-native-certs
  • rustls-pemfile
  • scopeguard
  • -
  • sct
  • +
  • sct
  • security-framework
  • security-framework-sys
  • semver
  • @@ -8804,7 +8782,6 @@

    Used by:

  • socket2
  • stable_deref_trait
  • syn
  • -
  • syn
  • tempfile
  • thread_local
  • threadpool
  • @@ -8821,11 +8798,10 @@

    Used by:

  • unicode-id
  • unicode-normalization
  • unicode-width
  • -
  • unicode-xid
  • url
  • uuid
  • version_check
  • -
  • waker-fn
  • +
  • waker-fn
  • wasi
  • wasi
  • wasi
  • @@ -10491,830 +10467,830 @@

    Used by:

    Apache License 2.0

    Used by:

    -
                                  Apache License
    -                        Version 2.0, January 2004
    -                     http://www.apache.org/licenses/
    +                
                                  Apache License
    +                        Version 2.0, January 2004
    +                     http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1. Definitions.
    +
    +   "License" shall mean the terms and conditions for use, reproduction,
    +   and distribution as defined by Sections 1 through 9 of this document.
    +
    +   "Licensor" shall mean the copyright owner or entity authorized by
    +   the copyright owner that is granting the License.
    +
    +   "Legal Entity" shall mean the union of the acting entity and all
    +   other entities that control, are controlled by, or are under common
    +   control with that entity. For the purposes of this definition,
    +   "control" means (i) the power, direct or indirect, to cause the
    +   direction or management of such entity, whether by contract or
    +   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +   outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +   "You" (or "Your") shall mean an individual or Legal Entity
    +   exercising permissions granted by this License.
    +
    +   "Source" form shall mean the preferred form for making modifications,
    +   including but not limited to software source code, documentation
    +   source, and configuration files.
    +
    +   "Object" form shall mean any form resulting from mechanical
    +   transformation or translation of a Source form, including but
    +   not limited to compiled object code, generated documentation,
    +   and conversions to other media types.
    +
    +   "Work" shall mean the work of authorship, whether in Source or
    +   Object form, made available under the License, as indicated by a
    +   copyright notice that is included in or attached to the work
    +   (an example is provided in the Appendix below).
    +
    +   "Derivative Works" shall mean any work, whether in Source or Object
    +   form, that is based on (or derived from) the Work and for which the
    +   editorial revisions, annotations, elaborations, or other modifications
    +   represent, as a whole, an original work of authorship. For the purposes
    +   of this License, Derivative Works shall not include works that remain
    +   separable from, or merely link (or bind by name) to the interfaces of,
    +   the Work and Derivative Works thereof.
    +
    +   "Contribution" shall mean any work of authorship, including
    +   the original version of the Work and any modifications or additions
    +   to that Work or Derivative Works thereof, that is intentionally
    +   submitted to Licensor for inclusion in the Work by the copyright owner
    +   or by an individual or Legal Entity authorized to submit on behalf of
    +   the copyright owner. For the purposes of this definition, "submitted"
    +   means any form of electronic, verbal, or written communication sent
    +   to the Licensor or its representatives, including but not limited to
    +   communication on electronic mailing lists, source code control systems,
    +   and issue tracking systems that are managed by, or on behalf of, the
    +   Licensor for the purpose of discussing and improving the Work, but
    +   excluding communication that is conspicuously marked or otherwise
    +   designated in writing by the copyright owner as "Not a Contribution."
    +
    +   "Contributor" shall mean Licensor and any individual or Legal Entity
    +   on behalf of whom a Contribution has been received by Licensor and
    +   subsequently incorporated within the Work.
    +
    +2. Grant of Copyright License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   copyright license to reproduce, prepare Derivative Works of,
    +   publicly display, publicly perform, sublicense, and distribute the
    +   Work and such Derivative Works in Source or Object form.
    +
    +3. Grant of Patent License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   (except as stated in this section) patent license to make, have made,
    +   use, offer to sell, sell, import, and otherwise transfer the Work,
    +   where such license applies only to those patent claims licensable
    +   by such Contributor that are necessarily infringed by their
    +   Contribution(s) alone or by combination of their Contribution(s)
    +   with the Work to which such Contribution(s) was submitted. If You
    +   institute patent litigation against any entity (including a
    +   cross-claim or counterclaim in a lawsuit) alleging that the Work
    +   or a Contribution incorporated within the Work constitutes direct
    +   or contributory patent infringement, then any patent licenses
    +   granted to You under this License for that Work shall terminate
    +   as of the date such litigation is filed.
    +
    +4. Redistribution. You may reproduce and distribute copies of the
    +   Work or Derivative Works thereof in any medium, with or without
    +   modifications, and in Source or Object form, provided that You
    +   meet the following conditions:
    +
    +   (a) You must give any other recipients of the Work or
    +       Derivative Works a copy of this License; and
    +
    +   (b) You must cause any modified files to carry prominent notices
    +       stating that You changed the files; and
    +
    +   (c) You must retain, in the Source form of any Derivative Works
    +       that You distribute, all copyright, patent, trademark, and
    +       attribution notices from the Source form of the Work,
    +       excluding those notices that do not pertain to any part of
    +       the Derivative Works; and
    +
    +   (d) If the Work includes a "NOTICE" text file as part of its
    +       distribution, then any Derivative Works that You distribute must
    +       include a readable copy of the attribution notices contained
    +       within such NOTICE file, excluding those notices that do not
    +       pertain to any part of the Derivative Works, in at least one
    +       of the following places: within a NOTICE text file distributed
    +       as part of the Derivative Works; within the Source form or
    +       documentation, if provided along with the Derivative Works; or,
    +       within a display generated by the Derivative Works, if and
    +       wherever such third-party notices normally appear. The contents
    +       of the NOTICE file are for informational purposes only and
    +       do not modify the License. You may add Your own attribution
    +       notices within Derivative Works that You distribute, alongside
    +       or as an addendum to the NOTICE text from the Work, provided
    +       that such additional attribution notices cannot be construed
    +       as modifying the License.
    +
    +   You may add Your own copyright statement to Your modifications and
    +   may provide additional or different license terms and conditions
    +   for use, reproduction, or distribution of Your modifications, or
    +   for any such Derivative Works as a whole, provided Your use,
    +   reproduction, and distribution of the Work otherwise complies with
    +   the conditions stated in this License.
    +
    +5. Submission of Contributions. Unless You explicitly state otherwise,
    +   any Contribution intentionally submitted for inclusion in the Work
    +   by You to the Licensor shall be under the terms and conditions of
    +   this License, without any additional terms or conditions.
    +   Notwithstanding the above, nothing herein shall supersede or modify
    +   the terms of any separate license agreement you may have executed
    +   with Licensor regarding such Contributions.
    +
    +6. Trademarks. This License does not grant permission to use the trade
    +   names, trademarks, service marks, or product names of the Licensor,
    +   except as required for reasonable and customary use in describing the
    +   origin of the Work and reproducing the content of the NOTICE file.
    +
    +7. Disclaimer of Warranty. Unless required by applicable law or
    +   agreed to in writing, Licensor provides the Work (and each
    +   Contributor provides its Contributions) on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +   implied, including, without limitation, any warranties or conditions
    +   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +   PARTICULAR PURPOSE. You are solely responsible for determining the
    +   appropriateness of using or redistributing the Work and assume any
    +   risks associated with Your exercise of permissions under this License.
    +
    +8. Limitation of Liability. In no event and under no legal theory,
    +   whether in tort (including negligence), contract, or otherwise,
    +   unless required by applicable law (such as deliberate and grossly
    +   negligent acts) or agreed to in writing, shall any Contributor be
    +   liable to You for damages, including any direct, indirect, special,
    +   incidental, or consequential damages of any character arising as a
    +   result of this License or out of the use or inability to use the
    +   Work (including but not limited to damages for loss of goodwill,
    +   work stoppage, computer failure or malfunction, or any and all
    +   other commercial damages or losses), even if such Contributor
    +   has been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability. While redistributing
    +   the Work or Derivative Works thereof, You may choose to offer,
    +   and charge a fee for, acceptance of support, warranty, indemnity,
    +   or other liability obligations and/or rights consistent with this
    +   License. However, in accepting such obligations, You may act only
    +   on Your own behalf and on Your sole responsibility, not on behalf
    +   of any other Contributor, and only if You agree to indemnify,
    +   defend, and hold each Contributor harmless for any liability
    +   incurred by, or claims asserted against, such Contributor by reason
    +   of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    + +
  • +

    Apache License 2.0

    +

    Used by:

    + +
                                  Apache License
    +                        Version 2.0, January 2004
    +                     http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1. Definitions.
    +
    +   "License" shall mean the terms and conditions for use, reproduction,
    +   and distribution as defined by Sections 1 through 9 of this document.
    +
    +   "Licensor" shall mean the copyright owner or entity authorized by
    +   the copyright owner that is granting the License.
    +
    +   "Legal Entity" shall mean the union of the acting entity and all
    +   other entities that control, are controlled by, or are under common
    +   control with that entity. For the purposes of this definition,
    +   "control" means (i) the power, direct or indirect, to cause the
    +   direction or management of such entity, whether by contract or
    +   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +   outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +   "You" (or "Your") shall mean an individual or Legal Entity
    +   exercising permissions granted by this License.
    +
    +   "Source" form shall mean the preferred form for making modifications,
    +   including but not limited to software source code, documentation
    +   source, and configuration files.
    +
    +   "Object" form shall mean any form resulting from mechanical
    +   transformation or translation of a Source form, including but
    +   not limited to compiled object code, generated documentation,
    +   and conversions to other media types.
    +
    +   "Work" shall mean the work of authorship, whether in Source or
    +   Object form, made available under the License, as indicated by a
    +   copyright notice that is included in or attached to the work
    +   (an example is provided in the Appendix below).
    +
    +   "Derivative Works" shall mean any work, whether in Source or Object
    +   form, that is based on (or derived from) the Work and for which the
    +   editorial revisions, annotations, elaborations, or other modifications
    +   represent, as a whole, an original work of authorship. For the purposes
    +   of this License, Derivative Works shall not include works that remain
    +   separable from, or merely link (or bind by name) to the interfaces of,
    +   the Work and Derivative Works thereof.
    +
    +   "Contribution" shall mean any work of authorship, including
    +   the original version of the Work and any modifications or additions
    +   to that Work or Derivative Works thereof, that is intentionally
    +   submitted to Licensor for inclusion in the Work by the copyright owner
    +   or by an individual or Legal Entity authorized to submit on behalf of
    +   the copyright owner. For the purposes of this definition, "submitted"
    +   means any form of electronic, verbal, or written communication sent
    +   to the Licensor or its representatives, including but not limited to
    +   communication on electronic mailing lists, source code control systems,
    +   and issue tracking systems that are managed by, or on behalf of, the
    +   Licensor for the purpose of discussing and improving the Work, but
    +   excluding communication that is conspicuously marked or otherwise
    +   designated in writing by the copyright owner as "Not a Contribution."
    +
    +   "Contributor" shall mean Licensor and any individual or Legal Entity
    +   on behalf of whom a Contribution has been received by Licensor and
    +   subsequently incorporated within the Work.
    +
    +2. Grant of Copyright License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   copyright license to reproduce, prepare Derivative Works of,
    +   publicly display, publicly perform, sublicense, and distribute the
    +   Work and such Derivative Works in Source or Object form.
    +
    +3. Grant of Patent License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   (except as stated in this section) patent license to make, have made,
    +   use, offer to sell, sell, import, and otherwise transfer the Work,
    +   where such license applies only to those patent claims licensable
    +   by such Contributor that are necessarily infringed by their
    +   Contribution(s) alone or by combination of their Contribution(s)
    +   with the Work to which such Contribution(s) was submitted. If You
    +   institute patent litigation against any entity (including a
    +   cross-claim or counterclaim in a lawsuit) alleging that the Work
    +   or a Contribution incorporated within the Work constitutes direct
    +   or contributory patent infringement, then any patent licenses
    +   granted to You under this License for that Work shall terminate
    +   as of the date such litigation is filed.
    +
    +4. Redistribution. You may reproduce and distribute copies of the
    +   Work or Derivative Works thereof in any medium, with or without
    +   modifications, and in Source or Object form, provided that You
    +   meet the following conditions:
    +
    +   (a) You must give any other recipients of the Work or
    +       Derivative Works a copy of this License; and
    +
    +   (b) You must cause any modified files to carry prominent notices
    +       stating that You changed the files; and
    +
    +   (c) You must retain, in the Source form of any Derivative Works
    +       that You distribute, all copyright, patent, trademark, and
    +       attribution notices from the Source form of the Work,
    +       excluding those notices that do not pertain to any part of
    +       the Derivative Works; and
    +
    +   (d) If the Work includes a "NOTICE" text file as part of its
    +       distribution, then any Derivative Works that You distribute must
    +       include a readable copy of the attribution notices contained
    +       within such NOTICE file, excluding those notices that do not
    +       pertain to any part of the Derivative Works, in at least one
    +       of the following places: within a NOTICE text file distributed
    +       as part of the Derivative Works; within the Source form or
    +       documentation, if provided along with the Derivative Works; or,
    +       within a display generated by the Derivative Works, if and
    +       wherever such third-party notices normally appear. The contents
    +       of the NOTICE file are for informational purposes only and
    +       do not modify the License. You may add Your own attribution
    +       notices within Derivative Works that You distribute, alongside
    +       or as an addendum to the NOTICE text from the Work, provided
    +       that such additional attribution notices cannot be construed
    +       as modifying the License.
    +
    +   You may add Your own copyright statement to Your modifications and
    +   may provide additional or different license terms and conditions
    +   for use, reproduction, or distribution of Your modifications, or
    +   for any such Derivative Works as a whole, provided Your use,
    +   reproduction, and distribution of the Work otherwise complies with
    +   the conditions stated in this License.
    +
    +5. Submission of Contributions. Unless You explicitly state otherwise,
    +   any Contribution intentionally submitted for inclusion in the Work
    +   by You to the Licensor shall be under the terms and conditions of
    +   this License, without any additional terms or conditions.
    +   Notwithstanding the above, nothing herein shall supersede or modify
    +   the terms of any separate license agreement you may have executed
    +   with Licensor regarding such Contributions.
    +
    +6. Trademarks. This License does not grant permission to use the trade
    +   names, trademarks, service marks, or product names of the Licensor,
    +   except as required for reasonable and customary use in describing the
    +   origin of the Work and reproducing the content of the NOTICE file.
    +
    +7. Disclaimer of Warranty. Unless required by applicable law or
    +   agreed to in writing, Licensor provides the Work (and each
    +   Contributor provides its Contributions) on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +   implied, including, without limitation, any warranties or conditions
    +   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +   PARTICULAR PURPOSE. You are solely responsible for determining the
    +   appropriateness of using or redistributing the Work and assume any
    +   risks associated with Your exercise of permissions under this License.
    +
    +8. Limitation of Liability. In no event and under no legal theory,
    +   whether in tort (including negligence), contract, or otherwise,
    +   unless required by applicable law (such as deliberate and grossly
    +   negligent acts) or agreed to in writing, shall any Contributor be
    +   liable to You for damages, including any direct, indirect, special,
    +   incidental, or consequential damages of any character arising as a
    +   result of this License or out of the use or inability to use the
    +   Work (including but not limited to damages for loss of goodwill,
    +   work stoppage, computer failure or malfunction, or any and all
    +   other commercial damages or losses), even if such Contributor
    +   has been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability. While redistributing
    +   the Work or Derivative Works thereof, You may choose to offer,
    +   and charge a fee for, acceptance of support, warranty, indemnity,
    +   or other liability obligations and/or rights consistent with this
    +   License. However, in accepting such obligations, You may act only
    +   on Your own behalf and on Your sole responsibility, not on behalf
    +   of any other Contributor, and only if You agree to indemnify,
    +   defend, and hold each Contributor harmless for any liability
    +   incurred by, or claims asserted against, such Contributor by reason
    +   of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +APPENDIX: How to apply the Apache License to your work.
    +
    +   To apply the Apache License to your work, attach the following
    +   boilerplate notice, with the fields enclosed by brackets "[]"
    +   replaced with your own identifying information. (Don't include
    +   the brackets!)  The text should be enclosed in the appropriate
    +   comment syntax for the file format. We also recommend that a
    +   file or class name and description of purpose be included on the
    +   same "printed page" as the copyright notice for easier
    +   identification within third-party archives.
    +
    +Copyright [yyyy] [name of copyright owner]
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
                                  Apache License
    +                        Version 2.0, January 2004
    +                     http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1. Definitions.
    +
    +   "License" shall mean the terms and conditions for use, reproduction,
    +   and distribution as defined by Sections 1 through 9 of this document.
    +
    +   "Licensor" shall mean the copyright owner or entity authorized by
    +   the copyright owner that is granting the License.
    +
    +   "Legal Entity" shall mean the union of the acting entity and all
    +   other entities that control, are controlled by, or are under common
    +   control with that entity. For the purposes of this definition,
    +   "control" means (i) the power, direct or indirect, to cause the
    +   direction or management of such entity, whether by contract or
    +   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +   outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +   "You" (or "Your") shall mean an individual or Legal Entity
    +   exercising permissions granted by this License.
    +
    +   "Source" form shall mean the preferred form for making modifications,
    +   including but not limited to software source code, documentation
    +   source, and configuration files.
    +
    +   "Object" form shall mean any form resulting from mechanical
    +   transformation or translation of a Source form, including but
    +   not limited to compiled object code, generated documentation,
    +   and conversions to other media types.
    +
    +   "Work" shall mean the work of authorship, whether in Source or
    +   Object form, made available under the License, as indicated by a
    +   copyright notice that is included in or attached to the work
    +   (an example is provided in the Appendix below).
    +
    +   "Derivative Works" shall mean any work, whether in Source or Object
    +   form, that is based on (or derived from) the Work and for which the
    +   editorial revisions, annotations, elaborations, or other modifications
    +   represent, as a whole, an original work of authorship. For the purposes
    +   of this License, Derivative Works shall not include works that remain
    +   separable from, or merely link (or bind by name) to the interfaces of,
    +   the Work and Derivative Works thereof.
    +
    +   "Contribution" shall mean any work of authorship, including
    +   the original version of the Work and any modifications or additions
    +   to that Work or Derivative Works thereof, that is intentionally
    +   submitted to Licensor for inclusion in the Work by the copyright owner
    +   or by an individual or Legal Entity authorized to submit on behalf of
    +   the copyright owner. For the purposes of this definition, "submitted"
    +   means any form of electronic, verbal, or written communication sent
    +   to the Licensor or its representatives, including but not limited to
    +   communication on electronic mailing lists, source code control systems,
    +   and issue tracking systems that are managed by, or on behalf of, the
    +   Licensor for the purpose of discussing and improving the Work, but
    +   excluding communication that is conspicuously marked or otherwise
    +   designated in writing by the copyright owner as "Not a Contribution."
    +
    +   "Contributor" shall mean Licensor and any individual or Legal Entity
    +   on behalf of whom a Contribution has been received by Licensor and
    +   subsequently incorporated within the Work.
    +
    +2. Grant of Copyright License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   copyright license to reproduce, prepare Derivative Works of,
    +   publicly display, publicly perform, sublicense, and distribute the
    +   Work and such Derivative Works in Source or Object form.
    +
    +3. Grant of Patent License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   (except as stated in this section) patent license to make, have made,
    +   use, offer to sell, sell, import, and otherwise transfer the Work,
    +   where such license applies only to those patent claims licensable
    +   by such Contributor that are necessarily infringed by their
    +   Contribution(s) alone or by combination of their Contribution(s)
    +   with the Work to which such Contribution(s) was submitted. If You
    +   institute patent litigation against any entity (including a
    +   cross-claim or counterclaim in a lawsuit) alleging that the Work
    +   or a Contribution incorporated within the Work constitutes direct
    +   or contributory patent infringement, then any patent licenses
    +   granted to You under this License for that Work shall terminate
    +   as of the date such litigation is filed.
    +
    +4. Redistribution. You may reproduce and distribute copies of the
    +   Work or Derivative Works thereof in any medium, with or without
    +   modifications, and in Source or Object form, provided that You
    +   meet the following conditions:
    +
    +   (a) You must give any other recipients of the Work or
    +       Derivative Works a copy of this License; and
    +
    +   (b) You must cause any modified files to carry prominent notices
    +       stating that You changed the files; and
    +
    +   (c) You must retain, in the Source form of any Derivative Works
    +       that You distribute, all copyright, patent, trademark, and
    +       attribution notices from the Source form of the Work,
    +       excluding those notices that do not pertain to any part of
    +       the Derivative Works; and
    +
    +   (d) If the Work includes a "NOTICE" text file as part of its
    +       distribution, then any Derivative Works that You distribute must
    +       include a readable copy of the attribution notices contained
    +       within such NOTICE file, excluding those notices that do not
    +       pertain to any part of the Derivative Works, in at least one
    +       of the following places: within a NOTICE text file distributed
    +       as part of the Derivative Works; within the Source form or
    +       documentation, if provided along with the Derivative Works; or,
    +       within a display generated by the Derivative Works, if and
    +       wherever such third-party notices normally appear. The contents
    +       of the NOTICE file are for informational purposes only and
    +       do not modify the License. You may add Your own attribution
    +       notices within Derivative Works that You distribute, alongside
    +       or as an addendum to the NOTICE text from the Work, provided
    +       that such additional attribution notices cannot be construed
    +       as modifying the License.
    +
    +   You may add Your own copyright statement to Your modifications and
    +   may provide additional or different license terms and conditions
    +   for use, reproduction, or distribution of Your modifications, or
    +   for any such Derivative Works as a whole, provided Your use,
    +   reproduction, and distribution of the Work otherwise complies with
    +   the conditions stated in this License.
    +
    +5. Submission of Contributions. Unless You explicitly state otherwise,
    +   any Contribution intentionally submitted for inclusion in the Work
    +   by You to the Licensor shall be under the terms and conditions of
    +   this License, without any additional terms or conditions.
    +   Notwithstanding the above, nothing herein shall supersede or modify
    +   the terms of any separate license agreement you may have executed
    +   with Licensor regarding such Contributions.
    +
    +6. Trademarks. This License does not grant permission to use the trade
    +   names, trademarks, service marks, or product names of the Licensor,
    +   except as required for reasonable and customary use in describing the
    +   origin of the Work and reproducing the content of the NOTICE file.
    +
    +7. Disclaimer of Warranty. Unless required by applicable law or
    +   agreed to in writing, Licensor provides the Work (and each
    +   Contributor provides its Contributions) on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +   implied, including, without limitation, any warranties or conditions
    +   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +   PARTICULAR PURPOSE. You are solely responsible for determining the
    +   appropriateness of using or redistributing the Work and assume any
    +   risks associated with Your exercise of permissions under this License.
    +
    +8. Limitation of Liability. In no event and under no legal theory,
    +   whether in tort (including negligence), contract, or otherwise,
    +   unless required by applicable law (such as deliberate and grossly
    +   negligent acts) or agreed to in writing, shall any Contributor be
    +   liable to You for damages, including any direct, indirect, special,
    +   incidental, or consequential damages of any character arising as a
    +   result of this License or out of the use or inability to use the
    +   Work (including but not limited to damages for loss of goodwill,
    +   work stoppage, computer failure or malfunction, or any and all
    +   other commercial damages or losses), even if such Contributor
    +   has been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability. While redistributing
    +   the Work or Derivative Works thereof, You may choose to offer,
    +   and charge a fee for, acceptance of support, warranty, indemnity,
    +   or other liability obligations and/or rights consistent with this
    +   License. However, in accepting such obligations, You may act only
    +   on Your own behalf and on Your sole responsibility, not on behalf
    +   of any other Contributor, and only if You agree to indemnify,
    +   defend, and hold each Contributor harmless for any liability
    +   incurred by, or claims asserted against, such Contributor by reason
    +   of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +APPENDIX: How to apply the Apache License to your work.
    +
    +   To apply the Apache License to your work, attach the following
    +   boilerplate notice, with the fields enclosed by brackets "[]"
    +   replaced with your own identifying information. (Don't include
    +   the brackets!)  The text should be enclosed in the appropriate
    +   comment syntax for the file format. We also recommend that a
    +   file or class name and description of purpose be included on the
    +   same "printed page" as the copyright notice for easier
    +   identification within third-party archives.
    +
    +Copyright [yyyy] [name of copyright owner]
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
                                  Apache License
    +                        Version 2.0, January 2004
    +                     http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1. Definitions.
    +
    +   "License" shall mean the terms and conditions for use, reproduction,
    +   and distribution as defined by Sections 1 through 9 of this document.
    +
    +   "Licensor" shall mean the copyright owner or entity authorized by
    +   the copyright owner that is granting the License.
    +
    +   "Legal Entity" shall mean the union of the acting entity and all
    +   other entities that control, are controlled by, or are under common
    +   control with that entity. For the purposes of this definition,
    +   "control" means (i) the power, direct or indirect, to cause the
    +   direction or management of such entity, whether by contract or
    +   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +   outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +   "You" (or "Your") shall mean an individual or Legal Entity
    +   exercising permissions granted by this License.
    +
    +   "Source" form shall mean the preferred form for making modifications,
    +   including but not limited to software source code, documentation
    +   source, and configuration files.
    +
    +   "Object" form shall mean any form resulting from mechanical
    +   transformation or translation of a Source form, including but
    +   not limited to compiled object code, generated documentation,
    +   and conversions to other media types.
    +
    +   "Work" shall mean the work of authorship, whether in Source or
    +   Object form, made available under the License, as indicated by a
    +   copyright notice that is included in or attached to the work
    +   (an example is provided in the Appendix below).
    +
    +   "Derivative Works" shall mean any work, whether in Source or Object
    +   form, that is based on (or derived from) the Work and for which the
    +   editorial revisions, annotations, elaborations, or other modifications
    +   represent, as a whole, an original work of authorship. For the purposes
    +   of this License, Derivative Works shall not include works that remain
    +   separable from, or merely link (or bind by name) to the interfaces of,
    +   the Work and Derivative Works thereof.
    +
    +   "Contribution" shall mean any work of authorship, including
    +   the original version of the Work and any modifications or additions
    +   to that Work or Derivative Works thereof, that is intentionally
    +   submitted to Licensor for inclusion in the Work by the copyright owner
    +   or by an individual or Legal Entity authorized to submit on behalf of
    +   the copyright owner. For the purposes of this definition, "submitted"
    +   means any form of electronic, verbal, or written communication sent
    +   to the Licensor or its representatives, including but not limited to
    +   communication on electronic mailing lists, source code control systems,
    +   and issue tracking systems that are managed by, or on behalf of, the
    +   Licensor for the purpose of discussing and improving the Work, but
    +   excluding communication that is conspicuously marked or otherwise
    +   designated in writing by the copyright owner as "Not a Contribution."
    +
    +   "Contributor" shall mean Licensor and any individual or Legal Entity
    +   on behalf of whom a Contribution has been received by Licensor and
    +   subsequently incorporated within the Work.
    +
    +2. Grant of Copyright License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   copyright license to reproduce, prepare Derivative Works of,
    +   publicly display, publicly perform, sublicense, and distribute the
    +   Work and such Derivative Works in Source or Object form.
    +
    +3. Grant of Patent License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   (except as stated in this section) patent license to make, have made,
    +   use, offer to sell, sell, import, and otherwise transfer the Work,
    +   where such license applies only to those patent claims licensable
    +   by such Contributor that are necessarily infringed by their
    +   Contribution(s) alone or by combination of their Contribution(s)
    +   with the Work to which such Contribution(s) was submitted. If You
    +   institute patent litigation against any entity (including a
    +   cross-claim or counterclaim in a lawsuit) alleging that the Work
    +   or a Contribution incorporated within the Work constitutes direct
    +   or contributory patent infringement, then any patent licenses
    +   granted to You under this License for that Work shall terminate
    +   as of the date such litigation is filed.
    +
    +4. Redistribution. You may reproduce and distribute copies of the
    +   Work or Derivative Works thereof in any medium, with or without
    +   modifications, and in Source or Object form, provided that You
    +   meet the following conditions:
    +
    +   (a) You must give any other recipients of the Work or
    +       Derivative Works a copy of this License; and
    +
    +   (b) You must cause any modified files to carry prominent notices
    +       stating that You changed the files; and
    +
    +   (c) You must retain, in the Source form of any Derivative Works
    +       that You distribute, all copyright, patent, trademark, and
    +       attribution notices from the Source form of the Work,
    +       excluding those notices that do not pertain to any part of
    +       the Derivative Works; and
    +
    +   (d) If the Work includes a "NOTICE" text file as part of its
    +       distribution, then any Derivative Works that You distribute must
    +       include a readable copy of the attribution notices contained
    +       within such NOTICE file, excluding those notices that do not
    +       pertain to any part of the Derivative Works, in at least one
    +       of the following places: within a NOTICE text file distributed
    +       as part of the Derivative Works; within the Source form or
    +       documentation, if provided along with the Derivative Works; or,
    +       within a display generated by the Derivative Works, if and
    +       wherever such third-party notices normally appear. The contents
    +       of the NOTICE file are for informational purposes only and
    +       do not modify the License. You may add Your own attribution
    +       notices within Derivative Works that You distribute, alongside
    +       or as an addendum to the NOTICE text from the Work, provided
    +       that such additional attribution notices cannot be construed
    +       as modifying the License.
    +
    +   You may add Your own copyright statement to Your modifications and
    +   may provide additional or different license terms and conditions
    +   for use, reproduction, or distribution of Your modifications, or
    +   for any such Derivative Works as a whole, provided Your use,
    +   reproduction, and distribution of the Work otherwise complies with
    +   the conditions stated in this License.
    +
    +5. Submission of Contributions. Unless You explicitly state otherwise,
    +   any Contribution intentionally submitted for inclusion in the Work
    +   by You to the Licensor shall be under the terms and conditions of
    +   this License, without any additional terms or conditions.
    +   Notwithstanding the above, nothing herein shall supersede or modify
    +   the terms of any separate license agreement you may have executed
    +   with Licensor regarding such Contributions.
    +
    +6. Trademarks. This License does not grant permission to use the trade
    +   names, trademarks, service marks, or product names of the Licensor,
    +   except as required for reasonable and customary use in describing the
    +   origin of the Work and reproducing the content of the NOTICE file.
    +
    +7. Disclaimer of Warranty. Unless required by applicable law or
    +   agreed to in writing, Licensor provides the Work (and each
    +   Contributor provides its Contributions) on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +   implied, including, without limitation, any warranties or conditions
    +   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +   PARTICULAR PURPOSE. You are solely responsible for determining the
    +   appropriateness of using or redistributing the Work and assume any
    +   risks associated with Your exercise of permissions under this License.
    +
    +8. Limitation of Liability. In no event and under no legal theory,
    +   whether in tort (including negligence), contract, or otherwise,
    +   unless required by applicable law (such as deliberate and grossly
    +   negligent acts) or agreed to in writing, shall any Contributor be
    +   liable to You for damages, including any direct, indirect, special,
    +   incidental, or consequential damages of any character arising as a
    +   result of this License or out of the use or inability to use the
    +   Work (including but not limited to damages for loss of goodwill,
    +   work stoppage, computer failure or malfunction, or any and all
    +   other commercial damages or losses), even if such Contributor
    +   has been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability. While redistributing
    +   the Work or Derivative Works thereof, You may choose to offer,
    +   and charge a fee for, acceptance of support, warranty, indemnity,
    +   or other liability obligations and/or rights consistent with this
    +   License. However, in accepting such obligations, You may act only
    +   on Your own behalf and on Your sole responsibility, not on behalf
    +   of any other Contributor, and only if You agree to indemnify,
    +   defend, and hold each Contributor harmless for any liability
    +   incurred by, or claims asserted against, such Contributor by reason
    +   of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +APPENDIX: How to apply the Apache License to your work.
    +
    +   To apply the Apache License to your work, attach the following
    +   boilerplate notice, with the fields enclosed by brackets "[]"
    +   replaced with your own identifying information. (Don't include
    +   the brackets!)  The text should be enclosed in the appropriate
    +   comment syntax for the file format. We also recommend that a
    +   file or class name and description of purpose be included on the
    +   same "printed page" as the copyright notice for easier
    +   identification within third-party archives.
    +
    +Copyright [yyyy] [name of copyright owner]
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
      Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
     
    -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
     
    -1. Definitions.
    -
    -   "License" shall mean the terms and conditions for use, reproduction,
    -   and distribution as defined by Sections 1 through 9 of this document.
    -
    -   "Licensor" shall mean the copyright owner or entity authorized by
    -   the copyright owner that is granting the License.
    -
    -   "Legal Entity" shall mean the union of the acting entity and all
    -   other entities that control, are controlled by, or are under common
    -   control with that entity. For the purposes of this definition,
    -   "control" means (i) the power, direct or indirect, to cause the
    -   direction or management of such entity, whether by contract or
    -   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -   outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -   "You" (or "Your") shall mean an individual or Legal Entity
    -   exercising permissions granted by this License.
    -
    -   "Source" form shall mean the preferred form for making modifications,
    -   including but not limited to software source code, documentation
    -   source, and configuration files.
    -
    -   "Object" form shall mean any form resulting from mechanical
    -   transformation or translation of a Source form, including but
    -   not limited to compiled object code, generated documentation,
    -   and conversions to other media types.
    -
    -   "Work" shall mean the work of authorship, whether in Source or
    -   Object form, made available under the License, as indicated by a
    -   copyright notice that is included in or attached to the work
    -   (an example is provided in the Appendix below).
    -
    -   "Derivative Works" shall mean any work, whether in Source or Object
    -   form, that is based on (or derived from) the Work and for which the
    -   editorial revisions, annotations, elaborations, or other modifications
    -   represent, as a whole, an original work of authorship. For the purposes
    -   of this License, Derivative Works shall not include works that remain
    -   separable from, or merely link (or bind by name) to the interfaces of,
    -   the Work and Derivative Works thereof.
    -
    -   "Contribution" shall mean any work of authorship, including
    -   the original version of the Work and any modifications or additions
    -   to that Work or Derivative Works thereof, that is intentionally
    -   submitted to Licensor for inclusion in the Work by the copyright owner
    -   or by an individual or Legal Entity authorized to submit on behalf of
    -   the copyright owner. For the purposes of this definition, "submitted"
    -   means any form of electronic, verbal, or written communication sent
    -   to the Licensor or its representatives, including but not limited to
    -   communication on electronic mailing lists, source code control systems,
    -   and issue tracking systems that are managed by, or on behalf of, the
    -   Licensor for the purpose of discussing and improving the Work, but
    -   excluding communication that is conspicuously marked or otherwise
    -   designated in writing by the copyright owner as "Not a Contribution."
    -
    -   "Contributor" shall mean Licensor and any individual or Legal Entity
    -   on behalf of whom a Contribution has been received by Licensor and
    -   subsequently incorporated within the Work.
    -
    -2. Grant of Copyright License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   copyright license to reproduce, prepare Derivative Works of,
    -   publicly display, publicly perform, sublicense, and distribute the
    -   Work and such Derivative Works in Source or Object form.
    -
    -3. Grant of Patent License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   (except as stated in this section) patent license to make, have made,
    -   use, offer to sell, sell, import, and otherwise transfer the Work,
    -   where such license applies only to those patent claims licensable
    -   by such Contributor that are necessarily infringed by their
    -   Contribution(s) alone or by combination of their Contribution(s)
    -   with the Work to which such Contribution(s) was submitted. If You
    -   institute patent litigation against any entity (including a
    -   cross-claim or counterclaim in a lawsuit) alleging that the Work
    -   or a Contribution incorporated within the Work constitutes direct
    -   or contributory patent infringement, then any patent licenses
    -   granted to You under this License for that Work shall terminate
    -   as of the date such litigation is filed.
    -
    -4. Redistribution. You may reproduce and distribute copies of the
    -   Work or Derivative Works thereof in any medium, with or without
    -   modifications, and in Source or Object form, provided that You
    -   meet the following conditions:
    -
    -   (a) You must give any other recipients of the Work or
    -       Derivative Works a copy of this License; and
    -
    -   (b) You must cause any modified files to carry prominent notices
    -       stating that You changed the files; and
    -
    -   (c) You must retain, in the Source form of any Derivative Works
    -       that You distribute, all copyright, patent, trademark, and
    -       attribution notices from the Source form of the Work,
    -       excluding those notices that do not pertain to any part of
    -       the Derivative Works; and
    -
    -   (d) If the Work includes a "NOTICE" text file as part of its
    -       distribution, then any Derivative Works that You distribute must
    -       include a readable copy of the attribution notices contained
    -       within such NOTICE file, excluding those notices that do not
    -       pertain to any part of the Derivative Works, in at least one
    -       of the following places: within a NOTICE text file distributed
    -       as part of the Derivative Works; within the Source form or
    -       documentation, if provided along with the Derivative Works; or,
    -       within a display generated by the Derivative Works, if and
    -       wherever such third-party notices normally appear. The contents
    -       of the NOTICE file are for informational purposes only and
    -       do not modify the License. You may add Your own attribution
    -       notices within Derivative Works that You distribute, alongside
    -       or as an addendum to the NOTICE text from the Work, provided
    -       that such additional attribution notices cannot be construed
    -       as modifying the License.
    -
    -   You may add Your own copyright statement to Your modifications and
    -   may provide additional or different license terms and conditions
    -   for use, reproduction, or distribution of Your modifications, or
    -   for any such Derivative Works as a whole, provided Your use,
    -   reproduction, and distribution of the Work otherwise complies with
    -   the conditions stated in this License.
    -
    -5. Submission of Contributions. Unless You explicitly state otherwise,
    -   any Contribution intentionally submitted for inclusion in the Work
    -   by You to the Licensor shall be under the terms and conditions of
    -   this License, without any additional terms or conditions.
    -   Notwithstanding the above, nothing herein shall supersede or modify
    -   the terms of any separate license agreement you may have executed
    -   with Licensor regarding such Contributions.
    -
    -6. Trademarks. This License does not grant permission to use the trade
    -   names, trademarks, service marks, or product names of the Licensor,
    -   except as required for reasonable and customary use in describing the
    -   origin of the Work and reproducing the content of the NOTICE file.
    -
    -7. Disclaimer of Warranty. Unless required by applicable law or
    -   agreed to in writing, Licensor provides the Work (and each
    -   Contributor provides its Contributions) on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -   implied, including, without limitation, any warranties or conditions
    -   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -   PARTICULAR PURPOSE. You are solely responsible for determining the
    -   appropriateness of using or redistributing the Work and assume any
    -   risks associated with Your exercise of permissions under this License.
    -
    -8. Limitation of Liability. In no event and under no legal theory,
    -   whether in tort (including negligence), contract, or otherwise,
    -   unless required by applicable law (such as deliberate and grossly
    -   negligent acts) or agreed to in writing, shall any Contributor be
    -   liable to You for damages, including any direct, indirect, special,
    -   incidental, or consequential damages of any character arising as a
    -   result of this License or out of the use or inability to use the
    -   Work (including but not limited to damages for loss of goodwill,
    -   work stoppage, computer failure or malfunction, or any and all
    -   other commercial damages or losses), even if such Contributor
    -   has been advised of the possibility of such damages.
    -
    -9. Accepting Warranty or Additional Liability. While redistributing
    -   the Work or Derivative Works thereof, You may choose to offer,
    -   and charge a fee for, acceptance of support, warranty, indemnity,
    -   or other liability obligations and/or rights consistent with this
    -   License. However, in accepting such obligations, You may act only
    -   on Your own behalf and on Your sole responsibility, not on behalf
    -   of any other Contributor, and only if You agree to indemnify,
    -   defend, and hold each Contributor harmless for any liability
    -   incurred by, or claims asserted against, such Contributor by reason
    -   of your accepting any such warranty or additional liability.
    -
    -END OF TERMS AND CONDITIONS
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                  Apache License
    -                        Version 2.0, January 2004
    -                     http://www.apache.org/licenses/
    -
    -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -1. Definitions.
    -
    -   "License" shall mean the terms and conditions for use, reproduction,
    -   and distribution as defined by Sections 1 through 9 of this document.
    -
    -   "Licensor" shall mean the copyright owner or entity authorized by
    -   the copyright owner that is granting the License.
    -
    -   "Legal Entity" shall mean the union of the acting entity and all
    -   other entities that control, are controlled by, or are under common
    -   control with that entity. For the purposes of this definition,
    -   "control" means (i) the power, direct or indirect, to cause the
    -   direction or management of such entity, whether by contract or
    -   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -   outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -   "You" (or "Your") shall mean an individual or Legal Entity
    -   exercising permissions granted by this License.
    -
    -   "Source" form shall mean the preferred form for making modifications,
    -   including but not limited to software source code, documentation
    -   source, and configuration files.
    -
    -   "Object" form shall mean any form resulting from mechanical
    -   transformation or translation of a Source form, including but
    -   not limited to compiled object code, generated documentation,
    -   and conversions to other media types.
    -
    -   "Work" shall mean the work of authorship, whether in Source or
    -   Object form, made available under the License, as indicated by a
    -   copyright notice that is included in or attached to the work
    -   (an example is provided in the Appendix below).
    -
    -   "Derivative Works" shall mean any work, whether in Source or Object
    -   form, that is based on (or derived from) the Work and for which the
    -   editorial revisions, annotations, elaborations, or other modifications
    -   represent, as a whole, an original work of authorship. For the purposes
    -   of this License, Derivative Works shall not include works that remain
    -   separable from, or merely link (or bind by name) to the interfaces of,
    -   the Work and Derivative Works thereof.
    -
    -   "Contribution" shall mean any work of authorship, including
    -   the original version of the Work and any modifications or additions
    -   to that Work or Derivative Works thereof, that is intentionally
    -   submitted to Licensor for inclusion in the Work by the copyright owner
    -   or by an individual or Legal Entity authorized to submit on behalf of
    -   the copyright owner. For the purposes of this definition, "submitted"
    -   means any form of electronic, verbal, or written communication sent
    -   to the Licensor or its representatives, including but not limited to
    -   communication on electronic mailing lists, source code control systems,
    -   and issue tracking systems that are managed by, or on behalf of, the
    -   Licensor for the purpose of discussing and improving the Work, but
    -   excluding communication that is conspicuously marked or otherwise
    -   designated in writing by the copyright owner as "Not a Contribution."
    -
    -   "Contributor" shall mean Licensor and any individual or Legal Entity
    -   on behalf of whom a Contribution has been received by Licensor and
    -   subsequently incorporated within the Work.
    -
    -2. Grant of Copyright License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   copyright license to reproduce, prepare Derivative Works of,
    -   publicly display, publicly perform, sublicense, and distribute the
    -   Work and such Derivative Works in Source or Object form.
    -
    -3. Grant of Patent License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   (except as stated in this section) patent license to make, have made,
    -   use, offer to sell, sell, import, and otherwise transfer the Work,
    -   where such license applies only to those patent claims licensable
    -   by such Contributor that are necessarily infringed by their
    -   Contribution(s) alone or by combination of their Contribution(s)
    -   with the Work to which such Contribution(s) was submitted. If You
    -   institute patent litigation against any entity (including a
    -   cross-claim or counterclaim in a lawsuit) alleging that the Work
    -   or a Contribution incorporated within the Work constitutes direct
    -   or contributory patent infringement, then any patent licenses
    -   granted to You under this License for that Work shall terminate
    -   as of the date such litigation is filed.
    -
    -4. Redistribution. You may reproduce and distribute copies of the
    -   Work or Derivative Works thereof in any medium, with or without
    -   modifications, and in Source or Object form, provided that You
    -   meet the following conditions:
    -
    -   (a) You must give any other recipients of the Work or
    -       Derivative Works a copy of this License; and
    -
    -   (b) You must cause any modified files to carry prominent notices
    -       stating that You changed the files; and
    -
    -   (c) You must retain, in the Source form of any Derivative Works
    -       that You distribute, all copyright, patent, trademark, and
    -       attribution notices from the Source form of the Work,
    -       excluding those notices that do not pertain to any part of
    -       the Derivative Works; and
    -
    -   (d) If the Work includes a "NOTICE" text file as part of its
    -       distribution, then any Derivative Works that You distribute must
    -       include a readable copy of the attribution notices contained
    -       within such NOTICE file, excluding those notices that do not
    -       pertain to any part of the Derivative Works, in at least one
    -       of the following places: within a NOTICE text file distributed
    -       as part of the Derivative Works; within the Source form or
    -       documentation, if provided along with the Derivative Works; or,
    -       within a display generated by the Derivative Works, if and
    -       wherever such third-party notices normally appear. The contents
    -       of the NOTICE file are for informational purposes only and
    -       do not modify the License. You may add Your own attribution
    -       notices within Derivative Works that You distribute, alongside
    -       or as an addendum to the NOTICE text from the Work, provided
    -       that such additional attribution notices cannot be construed
    -       as modifying the License.
    -
    -   You may add Your own copyright statement to Your modifications and
    -   may provide additional or different license terms and conditions
    -   for use, reproduction, or distribution of Your modifications, or
    -   for any such Derivative Works as a whole, provided Your use,
    -   reproduction, and distribution of the Work otherwise complies with
    -   the conditions stated in this License.
    -
    -5. Submission of Contributions. Unless You explicitly state otherwise,
    -   any Contribution intentionally submitted for inclusion in the Work
    -   by You to the Licensor shall be under the terms and conditions of
    -   this License, without any additional terms or conditions.
    -   Notwithstanding the above, nothing herein shall supersede or modify
    -   the terms of any separate license agreement you may have executed
    -   with Licensor regarding such Contributions.
    -
    -6. Trademarks. This License does not grant permission to use the trade
    -   names, trademarks, service marks, or product names of the Licensor,
    -   except as required for reasonable and customary use in describing the
    -   origin of the Work and reproducing the content of the NOTICE file.
    -
    -7. Disclaimer of Warranty. Unless required by applicable law or
    -   agreed to in writing, Licensor provides the Work (and each
    -   Contributor provides its Contributions) on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -   implied, including, without limitation, any warranties or conditions
    -   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -   PARTICULAR PURPOSE. You are solely responsible for determining the
    -   appropriateness of using or redistributing the Work and assume any
    -   risks associated with Your exercise of permissions under this License.
    -
    -8. Limitation of Liability. In no event and under no legal theory,
    -   whether in tort (including negligence), contract, or otherwise,
    -   unless required by applicable law (such as deliberate and grossly
    -   negligent acts) or agreed to in writing, shall any Contributor be
    -   liable to You for damages, including any direct, indirect, special,
    -   incidental, or consequential damages of any character arising as a
    -   result of this License or out of the use or inability to use the
    -   Work (including but not limited to damages for loss of goodwill,
    -   work stoppage, computer failure or malfunction, or any and all
    -   other commercial damages or losses), even if such Contributor
    -   has been advised of the possibility of such damages.
    -
    -9. Accepting Warranty or Additional Liability. While redistributing
    -   the Work or Derivative Works thereof, You may choose to offer,
    -   and charge a fee for, acceptance of support, warranty, indemnity,
    -   or other liability obligations and/or rights consistent with this
    -   License. However, in accepting such obligations, You may act only
    -   on Your own behalf and on Your sole responsibility, not on behalf
    -   of any other Contributor, and only if You agree to indemnify,
    -   defend, and hold each Contributor harmless for any liability
    -   incurred by, or claims asserted against, such Contributor by reason
    -   of your accepting any such warranty or additional liability.
    -
    -END OF TERMS AND CONDITIONS
    -
    -APPENDIX: How to apply the Apache License to your work.
    -
    -   To apply the Apache License to your work, attach the following
    -   boilerplate notice, with the fields enclosed by brackets "[]"
    -   replaced with your own identifying information. (Don't include
    -   the brackets!)  The text should be enclosed in the appropriate
    -   comment syntax for the file format. We also recommend that a
    -   file or class name and description of purpose be included on the
    -   same "printed page" as the copyright notice for easier
    -   identification within third-party archives.
    -
    -Copyright [yyyy] [name of copyright owner]
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -	http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                  Apache License
    -                        Version 2.0, January 2004
    -                     http://www.apache.org/licenses/
    -
    -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -1. Definitions.
    -
    -   "License" shall mean the terms and conditions for use, reproduction,
    -   and distribution as defined by Sections 1 through 9 of this document.
    -
    -   "Licensor" shall mean the copyright owner or entity authorized by
    -   the copyright owner that is granting the License.
    -
    -   "Legal Entity" shall mean the union of the acting entity and all
    -   other entities that control, are controlled by, or are under common
    -   control with that entity. For the purposes of this definition,
    -   "control" means (i) the power, direct or indirect, to cause the
    -   direction or management of such entity, whether by contract or
    -   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -   outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -   "You" (or "Your") shall mean an individual or Legal Entity
    -   exercising permissions granted by this License.
    -
    -   "Source" form shall mean the preferred form for making modifications,
    -   including but not limited to software source code, documentation
    -   source, and configuration files.
    -
    -   "Object" form shall mean any form resulting from mechanical
    -   transformation or translation of a Source form, including but
    -   not limited to compiled object code, generated documentation,
    -   and conversions to other media types.
    -
    -   "Work" shall mean the work of authorship, whether in Source or
    -   Object form, made available under the License, as indicated by a
    -   copyright notice that is included in or attached to the work
    -   (an example is provided in the Appendix below).
    -
    -   "Derivative Works" shall mean any work, whether in Source or Object
    -   form, that is based on (or derived from) the Work and for which the
    -   editorial revisions, annotations, elaborations, or other modifications
    -   represent, as a whole, an original work of authorship. For the purposes
    -   of this License, Derivative Works shall not include works that remain
    -   separable from, or merely link (or bind by name) to the interfaces of,
    -   the Work and Derivative Works thereof.
    -
    -   "Contribution" shall mean any work of authorship, including
    -   the original version of the Work and any modifications or additions
    -   to that Work or Derivative Works thereof, that is intentionally
    -   submitted to Licensor for inclusion in the Work by the copyright owner
    -   or by an individual or Legal Entity authorized to submit on behalf of
    -   the copyright owner. For the purposes of this definition, "submitted"
    -   means any form of electronic, verbal, or written communication sent
    -   to the Licensor or its representatives, including but not limited to
    -   communication on electronic mailing lists, source code control systems,
    -   and issue tracking systems that are managed by, or on behalf of, the
    -   Licensor for the purpose of discussing and improving the Work, but
    -   excluding communication that is conspicuously marked or otherwise
    -   designated in writing by the copyright owner as "Not a Contribution."
    -
    -   "Contributor" shall mean Licensor and any individual or Legal Entity
    -   on behalf of whom a Contribution has been received by Licensor and
    -   subsequently incorporated within the Work.
    -
    -2. Grant of Copyright License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   copyright license to reproduce, prepare Derivative Works of,
    -   publicly display, publicly perform, sublicense, and distribute the
    -   Work and such Derivative Works in Source or Object form.
    -
    -3. Grant of Patent License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   (except as stated in this section) patent license to make, have made,
    -   use, offer to sell, sell, import, and otherwise transfer the Work,
    -   where such license applies only to those patent claims licensable
    -   by such Contributor that are necessarily infringed by their
    -   Contribution(s) alone or by combination of their Contribution(s)
    -   with the Work to which such Contribution(s) was submitted. If You
    -   institute patent litigation against any entity (including a
    -   cross-claim or counterclaim in a lawsuit) alleging that the Work
    -   or a Contribution incorporated within the Work constitutes direct
    -   or contributory patent infringement, then any patent licenses
    -   granted to You under this License for that Work shall terminate
    -   as of the date such litigation is filed.
    -
    -4. Redistribution. You may reproduce and distribute copies of the
    -   Work or Derivative Works thereof in any medium, with or without
    -   modifications, and in Source or Object form, provided that You
    -   meet the following conditions:
    -
    -   (a) You must give any other recipients of the Work or
    -       Derivative Works a copy of this License; and
    -
    -   (b) You must cause any modified files to carry prominent notices
    -       stating that You changed the files; and
    -
    -   (c) You must retain, in the Source form of any Derivative Works
    -       that You distribute, all copyright, patent, trademark, and
    -       attribution notices from the Source form of the Work,
    -       excluding those notices that do not pertain to any part of
    -       the Derivative Works; and
    -
    -   (d) If the Work includes a "NOTICE" text file as part of its
    -       distribution, then any Derivative Works that You distribute must
    -       include a readable copy of the attribution notices contained
    -       within such NOTICE file, excluding those notices that do not
    -       pertain to any part of the Derivative Works, in at least one
    -       of the following places: within a NOTICE text file distributed
    -       as part of the Derivative Works; within the Source form or
    -       documentation, if provided along with the Derivative Works; or,
    -       within a display generated by the Derivative Works, if and
    -       wherever such third-party notices normally appear. The contents
    -       of the NOTICE file are for informational purposes only and
    -       do not modify the License. You may add Your own attribution
    -       notices within Derivative Works that You distribute, alongside
    -       or as an addendum to the NOTICE text from the Work, provided
    -       that such additional attribution notices cannot be construed
    -       as modifying the License.
    -
    -   You may add Your own copyright statement to Your modifications and
    -   may provide additional or different license terms and conditions
    -   for use, reproduction, or distribution of Your modifications, or
    -   for any such Derivative Works as a whole, provided Your use,
    -   reproduction, and distribution of the Work otherwise complies with
    -   the conditions stated in this License.
    -
    -5. Submission of Contributions. Unless You explicitly state otherwise,
    -   any Contribution intentionally submitted for inclusion in the Work
    -   by You to the Licensor shall be under the terms and conditions of
    -   this License, without any additional terms or conditions.
    -   Notwithstanding the above, nothing herein shall supersede or modify
    -   the terms of any separate license agreement you may have executed
    -   with Licensor regarding such Contributions.
    -
    -6. Trademarks. This License does not grant permission to use the trade
    -   names, trademarks, service marks, or product names of the Licensor,
    -   except as required for reasonable and customary use in describing the
    -   origin of the Work and reproducing the content of the NOTICE file.
    -
    -7. Disclaimer of Warranty. Unless required by applicable law or
    -   agreed to in writing, Licensor provides the Work (and each
    -   Contributor provides its Contributions) on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -   implied, including, without limitation, any warranties or conditions
    -   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -   PARTICULAR PURPOSE. You are solely responsible for determining the
    -   appropriateness of using or redistributing the Work and assume any
    -   risks associated with Your exercise of permissions under this License.
    -
    -8. Limitation of Liability. In no event and under no legal theory,
    -   whether in tort (including negligence), contract, or otherwise,
    -   unless required by applicable law (such as deliberate and grossly
    -   negligent acts) or agreed to in writing, shall any Contributor be
    -   liable to You for damages, including any direct, indirect, special,
    -   incidental, or consequential damages of any character arising as a
    -   result of this License or out of the use or inability to use the
    -   Work (including but not limited to damages for loss of goodwill,
    -   work stoppage, computer failure or malfunction, or any and all
    -   other commercial damages or losses), even if such Contributor
    -   has been advised of the possibility of such damages.
    -
    -9. Accepting Warranty or Additional Liability. While redistributing
    -   the Work or Derivative Works thereof, You may choose to offer,
    -   and charge a fee for, acceptance of support, warranty, indemnity,
    -   or other liability obligations and/or rights consistent with this
    -   License. However, in accepting such obligations, You may act only
    -   on Your own behalf and on Your sole responsibility, not on behalf
    -   of any other Contributor, and only if You agree to indemnify,
    -   defend, and hold each Contributor harmless for any liability
    -   incurred by, or claims asserted against, such Contributor by reason
    -   of your accepting any such warranty or additional liability.
    -
    -END OF TERMS AND CONDITIONS
    -
    -APPENDIX: How to apply the Apache License to your work.
    -
    -   To apply the Apache License to your work, attach the following
    -   boilerplate notice, with the fields enclosed by brackets "[]"
    -   replaced with your own identifying information. (Don't include
    -   the brackets!)  The text should be enclosed in the appropriate
    -   comment syntax for the file format. We also recommend that a
    -   file or class name and description of purpose be included on the
    -   same "printed page" as the copyright notice for easier
    -   identification within third-party archives.
    -
    -Copyright [yyyy] [name of copyright owner]
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -	http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                  Apache License
    -                        Version 2.0, January 2004
    -                     http://www.apache.org/licenses/
    -
    -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -1. Definitions.
    -
    -   "License" shall mean the terms and conditions for use, reproduction,
    -   and distribution as defined by Sections 1 through 9 of this document.
    -
    -   "Licensor" shall mean the copyright owner or entity authorized by
    -   the copyright owner that is granting the License.
    -
    -   "Legal Entity" shall mean the union of the acting entity and all
    -   other entities that control, are controlled by, or are under common
    -   control with that entity. For the purposes of this definition,
    -   "control" means (i) the power, direct or indirect, to cause the
    -   direction or management of such entity, whether by contract or
    -   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -   outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -   "You" (or "Your") shall mean an individual or Legal Entity
    -   exercising permissions granted by this License.
    -
    -   "Source" form shall mean the preferred form for making modifications,
    -   including but not limited to software source code, documentation
    -   source, and configuration files.
    -
    -   "Object" form shall mean any form resulting from mechanical
    -   transformation or translation of a Source form, including but
    -   not limited to compiled object code, generated documentation,
    -   and conversions to other media types.
    -
    -   "Work" shall mean the work of authorship, whether in Source or
    -   Object form, made available under the License, as indicated by a
    -   copyright notice that is included in or attached to the work
    -   (an example is provided in the Appendix below).
    -
    -   "Derivative Works" shall mean any work, whether in Source or Object
    -   form, that is based on (or derived from) the Work and for which the
    -   editorial revisions, annotations, elaborations, or other modifications
    -   represent, as a whole, an original work of authorship. For the purposes
    -   of this License, Derivative Works shall not include works that remain
    -   separable from, or merely link (or bind by name) to the interfaces of,
    -   the Work and Derivative Works thereof.
    -
    -   "Contribution" shall mean any work of authorship, including
    -   the original version of the Work and any modifications or additions
    -   to that Work or Derivative Works thereof, that is intentionally
    -   submitted to Licensor for inclusion in the Work by the copyright owner
    -   or by an individual or Legal Entity authorized to submit on behalf of
    -   the copyright owner. For the purposes of this definition, "submitted"
    -   means any form of electronic, verbal, or written communication sent
    -   to the Licensor or its representatives, including but not limited to
    -   communication on electronic mailing lists, source code control systems,
    -   and issue tracking systems that are managed by, or on behalf of, the
    -   Licensor for the purpose of discussing and improving the Work, but
    -   excluding communication that is conspicuously marked or otherwise
    -   designated in writing by the copyright owner as "Not a Contribution."
    -
    -   "Contributor" shall mean Licensor and any individual or Legal Entity
    -   on behalf of whom a Contribution has been received by Licensor and
    -   subsequently incorporated within the Work.
    -
    -2. Grant of Copyright License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   copyright license to reproduce, prepare Derivative Works of,
    -   publicly display, publicly perform, sublicense, and distribute the
    -   Work and such Derivative Works in Source or Object form.
    -
    -3. Grant of Patent License. Subject to the terms and conditions of
    -   this License, each Contributor hereby grants to You a perpetual,
    -   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -   (except as stated in this section) patent license to make, have made,
    -   use, offer to sell, sell, import, and otherwise transfer the Work,
    -   where such license applies only to those patent claims licensable
    -   by such Contributor that are necessarily infringed by their
    -   Contribution(s) alone or by combination of their Contribution(s)
    -   with the Work to which such Contribution(s) was submitted. If You
    -   institute patent litigation against any entity (including a
    -   cross-claim or counterclaim in a lawsuit) alleging that the Work
    -   or a Contribution incorporated within the Work constitutes direct
    -   or contributory patent infringement, then any patent licenses
    -   granted to You under this License for that Work shall terminate
    -   as of the date such litigation is filed.
    -
    -4. Redistribution. You may reproduce and distribute copies of the
    -   Work or Derivative Works thereof in any medium, with or without
    -   modifications, and in Source or Object form, provided that You
    -   meet the following conditions:
    -
    -   (a) You must give any other recipients of the Work or
    -       Derivative Works a copy of this License; and
    -
    -   (b) You must cause any modified files to carry prominent notices
    -       stating that You changed the files; and
    -
    -   (c) You must retain, in the Source form of any Derivative Works
    -       that You distribute, all copyright, patent, trademark, and
    -       attribution notices from the Source form of the Work,
    -       excluding those notices that do not pertain to any part of
    -       the Derivative Works; and
    -
    -   (d) If the Work includes a "NOTICE" text file as part of its
    -       distribution, then any Derivative Works that You distribute must
    -       include a readable copy of the attribution notices contained
    -       within such NOTICE file, excluding those notices that do not
    -       pertain to any part of the Derivative Works, in at least one
    -       of the following places: within a NOTICE text file distributed
    -       as part of the Derivative Works; within the Source form or
    -       documentation, if provided along with the Derivative Works; or,
    -       within a display generated by the Derivative Works, if and
    -       wherever such third-party notices normally appear. The contents
    -       of the NOTICE file are for informational purposes only and
    -       do not modify the License. You may add Your own attribution
    -       notices within Derivative Works that You distribute, alongside
    -       or as an addendum to the NOTICE text from the Work, provided
    -       that such additional attribution notices cannot be construed
    -       as modifying the License.
    -
    -   You may add Your own copyright statement to Your modifications and
    -   may provide additional or different license terms and conditions
    -   for use, reproduction, or distribution of Your modifications, or
    -   for any such Derivative Works as a whole, provided Your use,
    -   reproduction, and distribution of the Work otherwise complies with
    -   the conditions stated in this License.
    -
    -5. Submission of Contributions. Unless You explicitly state otherwise,
    -   any Contribution intentionally submitted for inclusion in the Work
    -   by You to the Licensor shall be under the terms and conditions of
    -   this License, without any additional terms or conditions.
    -   Notwithstanding the above, nothing herein shall supersede or modify
    -   the terms of any separate license agreement you may have executed
    -   with Licensor regarding such Contributions.
    -
    -6. Trademarks. This License does not grant permission to use the trade
    -   names, trademarks, service marks, or product names of the Licensor,
    -   except as required for reasonable and customary use in describing the
    -   origin of the Work and reproducing the content of the NOTICE file.
    -
    -7. Disclaimer of Warranty. Unless required by applicable law or
    -   agreed to in writing, Licensor provides the Work (and each
    -   Contributor provides its Contributions) on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -   implied, including, without limitation, any warranties or conditions
    -   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -   PARTICULAR PURPOSE. You are solely responsible for determining the
    -   appropriateness of using or redistributing the Work and assume any
    -   risks associated with Your exercise of permissions under this License.
    -
    -8. Limitation of Liability. In no event and under no legal theory,
    -   whether in tort (including negligence), contract, or otherwise,
    -   unless required by applicable law (such as deliberate and grossly
    -   negligent acts) or agreed to in writing, shall any Contributor be
    -   liable to You for damages, including any direct, indirect, special,
    -   incidental, or consequential damages of any character arising as a
    -   result of this License or out of the use or inability to use the
    -   Work (including but not limited to damages for loss of goodwill,
    -   work stoppage, computer failure or malfunction, or any and all
    -   other commercial damages or losses), even if such Contributor
    -   has been advised of the possibility of such damages.
    -
    -9. Accepting Warranty or Additional Liability. While redistributing
    -   the Work or Derivative Works thereof, You may choose to offer,
    -   and charge a fee for, acceptance of support, warranty, indemnity,
    -   or other liability obligations and/or rights consistent with this
    -   License. However, in accepting such obligations, You may act only
    -   on Your own behalf and on Your sole responsibility, not on behalf
    -   of any other Contributor, and only if You agree to indemnify,
    -   defend, and hold each Contributor harmless for any liability
    -   incurred by, or claims asserted against, such Contributor by reason
    -   of your accepting any such warranty or additional liability.
    -
    -END OF TERMS AND CONDITIONS
    -
    -APPENDIX: How to apply the Apache License to your work.
    -
    -   To apply the Apache License to your work, attach the following
    -   boilerplate notice, with the fields enclosed by brackets "[]"
    -   replaced with your own identifying information. (Don't include
    -   the brackets!)  The text should be enclosed in the appropriate
    -   comment syntax for the file format. We also recommend that a
    -   file or class name and description of purpose be included on the
    -   same "printed page" as the copyright notice for easier
    -   identification within third-party archives.
    -
    -Copyright [yyyy] [name of copyright owner]
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -	http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
    -
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
      Apache License
    -                           Version 2.0, January 2004
    -                        http://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    +   1. Definitions.
     
           "License" shall mean the terms and conditions for use, reproduction,
           and distribution as defined by Sections 1 through 9 of this document.
    @@ -12198,7 +12174,6 @@ 

    Used by:

  • async-graphql-derive
  • async-graphql-parser
  • async-graphql-value
  • -
  • deadpool-runtime
  • deno-proc-macro-rules
  • deno-proc-macro-rules-macros
  • dunce
  • @@ -12324,26 +12299,6 @@

    Used by:

    See the License for the specific language governing permissions and limitations under the License.
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    Copyright 2023 The allocator-api2 project developers
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -	http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
  • Apache License 2.0

    @@ -12394,7 +12349,6 @@

    Used by:

  • unicode-id
  • unicode-normalization
  • unicode-width
  • -
  • unicode-xid
  • Licensed under the Apache License, Version 2.0
     <LICENSE-APACHE or
    @@ -12621,6 +12575,7 @@ 

    Used by:

  • subtle
  • Copyright (c) 2016-2017 Isis Agora Lovecruft, Henry de Valence. All rights reserved.
    +Copyright (c) 2016-2024 Isis Agora Lovecruft. All rights reserved.
     
     Redistribution and use in source and binary forms, with or without
     modification, are permitted provided that the following conditions are
    @@ -12654,41 +12609,41 @@ 

    Used by:

    BSD 3-Clause "New" or "Revised" License

    Used by:

    -
    Copyright (c) 2019, Sébastien Crozet
    -All rights reserved.
    -
    -Redistribution and use in source and binary forms, with or without
    -modification, are permitted provided that the following conditions are met:
    -
    -1. Redistributions of source code must retain the above copyright notice, this
    -   list of conditions and the following disclaimer.
    -
    -2. Redistributions in binary form must reproduce the above copyright notice,
    -   this list of conditions and the following disclaimer in the documentation
    -   and/or other materials provided with the distribution.
    -
    -3. Neither the name of the author nor the names of its contributors may be used
    -   to endorse or promote products derived from this software without specific
    -   prior written permission.
    -
    -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
    -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
    -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
    -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
    -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
    -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
    -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
    -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    -
    - -
  • -

    BSD 3-Clause "New" or "Revised" License

    -

    Used by:

    - +
    Copyright (c) 2019, Sébastien Crozet
    +All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are met:
    +
    +1. Redistributions of source code must retain the above copyright notice, this
    +   list of conditions and the following disclaimer.
    +
    +2. Redistributions in binary form must reproduce the above copyright notice,
    +   this list of conditions and the following disclaimer in the documentation
    +   and/or other materials provided with the distribution.
    +
    +3. Neither the name of the author nor the names of its contributors may be used
    +   to endorse or promote products derived from this software without specific
    +   prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
    +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
    +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
    +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
    +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
    +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
    +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
    +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
    +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    +
    +
  • +
  • +

    BSD 3-Clause "New" or "Revised" License

    +

    Used by:

    +
    Copyright (c) <year> <owner>. 
    @@ -12744,46 +12699,46 @@ 

    Used by:

    -
    Creative Commons CC0 1.0 Universal
    -
    -<<beginOptional;name=ccOptionalIntro>> CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER.  <<endOptional>>
    -
    -Statement of Purpose
    -
    -The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
    -
    -Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
    -
    -For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
    -
    -1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
    -
    -     i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
    -
    -     ii. moral rights retained by the original author(s) and/or performer(s);
    -
    -     iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
    -
    -     iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
    -
    -     v. rights protecting the extraction, dissemination, use and reuse of data in a Work;
    -
    -     vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
    -
    -     vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
    -
    -2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
    -
    -3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
    -
    -4. Limitations and Disclaimers.
    -
    -     a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
    -
    -     b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
    -
    -     c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
    -
    +                
    Creative Commons CC0 1.0 Universal
    +
    +<<beginOptional;name=ccOptionalIntro>> CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER.  <<endOptional>>
    +
    +Statement of Purpose
    +
    +The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
    +
    +Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
    +
    +For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
    +
    +1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
    +
    +     i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
    +
    +     ii. moral rights retained by the original author(s) and/or performer(s);
    +
    +     iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
    +
    +     iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
    +
    +     v. rights protecting the extraction, dissemination, use and reuse of data in a Work;
    +
    +     vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
    +
    +     vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
    +
    +2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
    +
    +3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
    +
    +4. Limitations and Disclaimers.
    +
    +     a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
    +
    +     b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
    +
    +     c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
    +
          d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. 
  • @@ -13234,7 +13189,6 @@

    ISC License

    Used by:

       Copyright 2015-2016 Brian Smith.
     
    @@ -13255,7 +13209,6 @@ 

    ISC License

    Used by:

    /* Copyright (c) 2015, Google Inc.
      *
    @@ -13277,7 +13230,6 @@ 

    ISC License

    Used by:

    // Copyright 2015-2016 Brian Smith.
     //
    @@ -13707,7 +13659,6 @@ 

    MIT License

    Used by:

    Copyright (c) 2015-2016 the fiat-crypto authors (see
     https://github.com/mit-plv/fiat-crypto/blob/master/AUTHORS).
    @@ -13993,34 +13944,6 @@ 

    Used by:

    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
    -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    Copyright (c) 2017-2019 Geoffroy Couprie
    -
    -Permission is hereby granted, free of charge, to any person obtaining
    -a copy of this software and associated documentation files (the
    -"Software"), to deal in the Software without restriction, including
    -without limitation the rights to use, copy, modify, merge, publish,
    -distribute, sublicense, and/or sell copies of the Software, and to
    -permit persons to whom the Software is furnished to do so, subject to
    -the following conditions:
    -
    -The above copyright notice and this permission notice shall be
    -included in all copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
    -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
    -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     
  • @@ -14060,10 +13983,9 @@

    Used by:

    MIT License

    Used by:

    -
    Copyright (c) 2018 Sean McArthur
    -Copyright (c) 2016 Alex Crichton
    +                
    Copyright (c) 2018-2019 Sean McArthur
     
     Permission is hereby granted, free of charge, to any person obtaining a copy
     of this software and associated documentation files (the "Software"), to deal
    @@ -14089,9 +14011,10 @@ 

    Used by:

    MIT License

    Used by:

    -
    Copyright (c) 2018-2019 Sean McArthur
    +                
    Copyright (c) 2018-2023 Sean McArthur
    +Copyright (c) 2016 Alex Crichton
     
     Permission is hereby granted, free of charge, to any person obtaining a copy
     of this software and associated documentation files (the "Software"), to deal
    @@ -14464,6 +14387,39 @@ 

    Used by:

    shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +
    +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    Copyright (c) 2019-2024 Sean McArthur & Hyper Contributors
    +
    +Permission is hereby granted, free of charge, to any
    +person obtaining a copy of this software and associated
    +documentation files (the "Software"), to deal in the
    +Software without restriction, including without
    +limitation the rights to use, copy, modify, merge,
    +publish, distribute, sublicense, and/or sell copies of
    +the Software, and to permit persons to whom the Software
    +is furnished to do so, subject to the following
    +conditions:
    +
    +The above copyright notice and this permission notice
    +shall be included in all copies or substantial portions
    +of the Software.
    +
     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
     ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
     TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    @@ -14721,6 +14677,35 @@ 

    Used by:

    The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +
    +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    MIT License
    +
    +Copyright (c) 2018 The typed-arena developers
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    @@ -15130,6 +15115,35 @@ 

    Used by:

    The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +
    +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    MIT License
    +
    +Copyright (c) 2023 4lDO2
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    @@ -15145,6 +15159,7 @@ 

    Used by:

    • base64-simd
    • convert_case
    • +
    • cookie-factory
    • crunchy
    • deno_console
    • deno_core
    • @@ -15153,7 +15168,6 @@

      Used by:

    • deno_web
    • deno_webidl
    • difflib
    • -
    • http-body
    • jsonschema
    • lazy-regex-proc_macros
    • number_prefix
    • @@ -15265,26 +15279,26 @@

      Used by:

      -
      MIT License
      -
      -Copyright (c) 2019 Daniel Augusto Rizzi Salvadori
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy
      -of this software and associated documentation files (the "Software"), to deal
      -in the Software without restriction, including without limitation the rights
      -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      -copies of the Software, and to permit persons to whom the Software is
      -furnished to do so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in all
      -copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
      +                
      MIT License
      +
      +Copyright (c) 2019 Daniel Augusto Rizzi Salvadori
      +
      +Permission is hereby granted, free of charge, to any person obtaining a copy
      +of this software and associated documentation files (the "Software"), to deal
      +in the Software without restriction, including without limitation the rights
      +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      +copies of the Software, and to permit persons to whom the Software is
      +furnished to do so, subject to the following conditions:
      +
      +The above copyright notice and this permission notice shall be included in all
      +copies or substantial portions of the Software.
      +
      +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
       SOFTWARE.
    • @@ -15342,6 +15356,7 @@

      MIT License

      Used by:

      Permission is hereby granted, free of charge, to any person obtaining
       a copy of this software and associated documentation files (the
      @@ -15457,7 +15472,6 @@ 

      MIT License

      Used by:

      The MIT License (MIT)
       
      @@ -15576,36 +15590,6 @@ 

      Used by:

      LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
      -
    • -
    • -

      MIT License

      -

      Used by:

      - -
      The MIT License (MIT)
      -
      -Copyright (c) 2015 Austin Bonander
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy
      -of this software and associated documentation files (the "Software"), to deal
      -in the Software without restriction, including without limitation the rights
      -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      -copies of the Software, and to permit persons to whom the Software is
      -furnished to do so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in all
      -copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
      -SOFTWARE.
      -
       
    • @@ -16012,6 +15996,36 @@

      Used by:

      The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +
    • + +
    • +

      MIT License

      +

      Used by:

      + +
      The MIT License (MIT)
      +
      +Copyright (c) 2015 Austin Bonander
      +
      +Permission is hereby granted, free of charge, to any person obtaining a copy
      +of this software and associated documentation files (the "Software"), to deal
      +in the Software without restriction, including without limitation the rights
      +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      +copies of the Software, and to permit persons to whom the Software is
      +furnished to do so, subject to the following conditions:
      +
      +The above copyright notice and this permission notice shall be included in all
      +copies or substantial portions of the Software.
      +
      +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
      +SOFTWARE.
      +
       
    • @@ -16020,26 +16034,26 @@

      Used by:

      -
      The MIT License (MIT)
      -
      -Copyright (c) 2015 Bartłomiej Kamiński
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy
      -of this software and associated documentation files (the "Software"), to deal
      -in the Software without restriction, including without limitation the rights
      -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      -copies of the Software, and to permit persons to whom the Software is
      -furnished to do so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in all
      -copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
      +                
      The MIT License (MIT)
      +
      +Copyright (c) 2015 Bartłomiej Kamiński
      +
      +Permission is hereby granted, free of charge, to any person obtaining a copy
      +of this software and associated documentation files (the "Software"), to deal
      +in the Software without restriction, including without limitation the rights
      +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      +copies of the Software, and to permit persons to whom the Software is
      +furnished to do so, subject to the following conditions:
      +
      +The above copyright notice and this permission notice shall be included in all
      +copies or substantial portions of the Software.
      +
      +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
       SOFTWARE.
    • @@ -16048,28 +16062,28 @@

      Used by:

      -
      The MIT License (MIT)
      -
      -Copyright (c) 2015 Markus Westerlind
      -
      -Permission is hereby granted, free of charge, to any person obtaining a copy
      -of this software and associated documentation files (the "Software"), to deal
      -in the Software without restriction, including without limitation the rights
      -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      -copies of the Software, and to permit persons to whom the Software is
      -furnished to do so, subject to the following conditions:
      -
      -The above copyright notice and this permission notice shall be included in
      -all copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
      -THE SOFTWARE.
      -
      +                
      The MIT License (MIT)
      +
      +Copyright (c) 2015 Markus Westerlind
      +
      +Permission is hereby granted, free of charge, to any person obtaining a copy
      +of this software and associated documentation files (the "Software"), to deal
      +in the Software without restriction, including without limitation the rights
      +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      +copies of the Software, and to permit persons to whom the Software is
      +furnished to do so, subject to the following conditions:
      +
      +The above copyright notice and this permission notice shall be included in
      +all copies or substantial portions of the Software.
      +
      +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
      +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
      +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
      +THE SOFTWARE.
      +
       
    • @@ -17280,7 +17294,6 @@

      OpenSSL License

      Used by:

      /* ====================================================================
        * Copyright (c) 1998-2011 The OpenSSL Project.  All rights reserved.
      diff --git a/scripts/install.sh b/scripts/install.sh
      index f2e7e7a0f4..de9ed49dae 100755
      --- a/scripts/install.sh
      +++ b/scripts/install.sh
      @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa
       
       # Router version defined in apollo-router's Cargo.toml
       # Note: Change this line manually during the release steps.
      -PACKAGE_VERSION="v1.51.0"
      +PACKAGE_VERSION="v1.52.0"
       
       download_binary() {
           downloader --check
      diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock
      index 665233fda9..37b00586cd 100644
      --- a/xtask/Cargo.lock
      +++ b/xtask/Cargo.lock
      @@ -91,9 +91,9 @@ dependencies = [
       
       [[package]]
       name = "anyhow"
      -version = "1.0.75"
      +version = "1.0.86"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
      +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
       
       [[package]]
       name = "ascii"
      @@ -124,9 +124,9 @@ dependencies = [
       
       [[package]]
       name = "base64"
      -version = "0.21.2"
      +version = "0.21.7"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d"
      +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
       
       [[package]]
       name = "bitflags"
      @@ -169,9 +169,9 @@ checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be"
       
       [[package]]
       name = "camino"
      -version = "1.1.6"
      +version = "1.1.7"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c"
      +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239"
       dependencies = [
        "serde",
       ]
      @@ -216,9 +216,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
       
       [[package]]
       name = "chrono"
      -version = "0.4.34"
      +version = "0.4.38"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b"
      +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401"
       dependencies = [
        "android-tzdata",
        "iana-time-zone",
      @@ -228,9 +228,9 @@ dependencies = [
       
       [[package]]
       name = "clap"
      -version = "4.5.1"
      +version = "4.5.9"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da"
      +checksum = "64acc1846d54c1fe936a78dc189c34e28d3f5afc348403f28ecf53660b9b8462"
       dependencies = [
        "clap_builder",
        "clap_derive",
      @@ -238,9 +238,9 @@ dependencies = [
       
       [[package]]
       name = "clap_builder"
      -version = "4.5.1"
      +version = "4.5.9"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb"
      +checksum = "6fb8393d67ba2e7bfaf28a23458e4e2b543cc73a99595511eb207fdb8aede942"
       dependencies = [
        "anstream",
        "anstyle",
      @@ -250,11 +250,11 @@ dependencies = [
       
       [[package]]
       name = "clap_derive"
      -version = "4.5.0"
      +version = "4.5.8"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47"
      +checksum = "2bac35c6dafb060fd4d275d9a4ffae97917c13a6327903a8be2153cd964f7085"
       dependencies = [
      - "heck",
      + "heck 0.5.0",
        "proc-macro2",
        "quote",
        "syn 2.0.48",
      @@ -407,9 +407,9 @@ dependencies = [
       
       [[package]]
       name = "fastrand"
      -version = "2.0.0"
      +version = "2.1.0"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764"
      +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a"
       
       [[package]]
       name = "filetime"
      @@ -419,15 +419,15 @@ checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0"
       dependencies = [
        "cfg-if",
        "libc",
      - "redox_syscall",
      + "redox_syscall 0.3.5",
        "windows-sys 0.48.0",
       ]
       
       [[package]]
       name = "flate2"
      -version = "1.0.27"
      +version = "1.0.30"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010"
      +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae"
       dependencies = [
        "crc32fast",
        "miniz_oxide",
      @@ -574,7 +574,7 @@ checksum = "a40f793251171991c4eb75bd84bc640afa8b68ff6907bc89d3b712a22f700506"
       dependencies = [
        "graphql-introspection-query",
        "graphql-parser",
      - "heck",
      + "heck 0.4.1",
        "lazy_static",
        "proc-macro2",
        "quote",
      @@ -625,11 +625,17 @@ version = "0.4.1"
       source = "registry+https://github.com/rust-lang/crates.io-index"
       checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
       
      +[[package]]
      +name = "heck"
      +version = "0.5.0"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
      +
       [[package]]
       name = "hermit-abi"
      -version = "0.3.2"
      +version = "0.3.9"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b"
      +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
       
       [[package]]
       name = "home"
      @@ -757,9 +763,9 @@ dependencies = [
       
       [[package]]
       name = "insta"
      -version = "1.35.1"
      +version = "1.39.0"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "7c985c1bef99cf13c58fade470483d81a2bfe846ebde60ed28cc2dddec2df9e2"
      +checksum = "810ae6042d48e2c9e9215043563a58a80b877bc863228a74cf10c49d4620a6f5"
       dependencies = [
        "console",
        "lazy_static",
      @@ -768,7 +774,6 @@ dependencies = [
        "pest_derive",
        "serde",
        "similar",
      - "yaml-rust",
       ]
       
       [[package]]
      @@ -809,9 +814,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
       
       [[package]]
       name = "libc"
      -version = "0.2.153"
      +version = "0.2.155"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
      +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
       
       [[package]]
       name = "linked-hash-map"
      @@ -825,6 +830,16 @@ version = "0.4.13"
       source = "registry+https://github.com/rust-lang/crates.io-index"
       checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
       
      +[[package]]
      +name = "lock_api"
      +version = "0.4.12"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
      +dependencies = [
      + "autocfg",
      + "scopeguard",
      +]
      +
       [[package]]
       name = "log"
       version = "0.4.20"
      @@ -911,9 +926,9 @@ dependencies = [
       
       [[package]]
       name = "once_cell"
      -version = "1.18.0"
      +version = "1.19.0"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
      +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
       
       [[package]]
       name = "openssl-probe"
      @@ -921,6 +936,29 @@ version = "0.1.5"
       source = "registry+https://github.com/rust-lang/crates.io-index"
       checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
       
      +[[package]]
      +name = "parking_lot"
      +version = "0.12.3"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27"
      +dependencies = [
      + "lock_api",
      + "parking_lot_core",
      +]
      +
      +[[package]]
      +name = "parking_lot_core"
      +version = "0.9.10"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
      +dependencies = [
      + "cfg-if",
      + "libc",
      + "redox_syscall 0.5.2",
      + "smallvec",
      + "windows-targets 0.52.0",
      +]
      +
       [[package]]
       name = "percent-encoding"
       version = "2.3.0"
      @@ -1057,11 +1095,20 @@ dependencies = [
        "bitflags 1.3.2",
       ]
       
      +[[package]]
      +name = "redox_syscall"
      +version = "0.5.2"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd"
      +dependencies = [
      + "bitflags 2.4.0",
      +]
      +
       [[package]]
       name = "regex"
      -version = "1.10.3"
      +version = "1.10.5"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15"
      +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f"
       dependencies = [
        "aho-corasick",
        "memchr",
      @@ -1088,9 +1135,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
       
       [[package]]
       name = "reqwest"
      -version = "0.11.18"
      +version = "0.11.27"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55"
      +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62"
       dependencies = [
        "base64",
        "bytes",
      @@ -1115,6 +1162,8 @@ dependencies = [
        "serde",
        "serde_json",
        "serde_urlencoded",
      + "sync_wrapper",
      + "system-configuration",
        "tokio",
        "tokio-rustls",
        "tower-service",
      @@ -1227,6 +1276,12 @@ dependencies = [
        "windows-sys 0.48.0",
       ]
       
      +[[package]]
      +name = "scopeguard"
      +version = "1.2.0"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
      +
       [[package]]
       name = "sct"
       version = "0.7.0"
      @@ -1271,18 +1326,18 @@ dependencies = [
       
       [[package]]
       name = "serde"
      -version = "1.0.197"
      +version = "1.0.204"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
      +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12"
       dependencies = [
        "serde_derive",
       ]
       
       [[package]]
       name = "serde_derive"
      -version = "1.0.197"
      +version = "1.0.204"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
      +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222"
       dependencies = [
        "proc-macro2",
        "quote",
      @@ -1291,9 +1346,9 @@ dependencies = [
       
       [[package]]
       name = "serde_json"
      -version = "1.0.105"
      +version = "1.0.120"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360"
      +checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5"
       dependencies = [
        "itoa",
        "ryu",
      @@ -1329,6 +1384,15 @@ version = "1.1.0"
       source = "registry+https://github.com/rust-lang/crates.io-index"
       checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde"
       
      +[[package]]
      +name = "signal-hook-registry"
      +version = "1.4.2"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
      +dependencies = [
      + "libc",
      +]
      +
       [[package]]
       name = "similar"
       version = "2.2.1"
      @@ -1344,6 +1408,12 @@ dependencies = [
        "autocfg",
       ]
       
      +[[package]]
      +name = "smallvec"
      +version = "1.13.2"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
      +
       [[package]]
       name = "socket2"
       version = "0.4.9"
      @@ -1398,11 +1468,38 @@ dependencies = [
        "unicode-ident",
       ]
       
      +[[package]]
      +name = "sync_wrapper"
      +version = "0.1.2"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
      +
      +[[package]]
      +name = "system-configuration"
      +version = "0.5.1"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
      +dependencies = [
      + "bitflags 1.3.2",
      + "core-foundation",
      + "system-configuration-sys",
      +]
      +
      +[[package]]
      +name = "system-configuration-sys"
      +version = "0.5.0"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
      +dependencies = [
      + "core-foundation-sys",
      + "libc",
      +]
      +
       [[package]]
       name = "tar"
      -version = "0.4.40"
      +version = "0.4.41"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb"
      +checksum = "cb797dad5fb5b76fcf519e702f4a589483b5ef06567f160c392832c1f5e44909"
       dependencies = [
        "filetime",
        "libc",
      @@ -1411,15 +1508,14 @@ dependencies = [
       
       [[package]]
       name = "tempfile"
      -version = "3.8.0"
      +version = "3.10.1"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef"
      +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1"
       dependencies = [
        "cfg-if",
        "fastrand",
      - "redox_syscall",
        "rustix",
      - "windows-sys 0.48.0",
      + "windows-sys 0.52.0",
       ]
       
       [[package]]
      @@ -1469,20 +1565,34 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
       
       [[package]]
       name = "tokio"
      -version = "1.36.0"
      +version = "1.38.1"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931"
      +checksum = "eb2caba9f80616f438e09748d5acda951967e1ea58508ef53d9c6402485a46df"
       dependencies = [
        "backtrace",
        "bytes",
        "libc",
        "mio",
        "num_cpus",
      + "parking_lot",
        "pin-project-lite",
      + "signal-hook-registry",
        "socket2 0.5.5",
      + "tokio-macros",
        "windows-sys 0.48.0",
       ]
       
      +[[package]]
      +name = "tokio-macros"
      +version = "2.3.0"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
      +dependencies = [
      + "proc-macro2",
      + "quote",
      + "syn 2.0.48",
      +]
      +
       [[package]]
       name = "tokio-rustls"
       version = "0.24.1"
      @@ -1624,9 +1734,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
       
       [[package]]
       name = "walkdir"
      -version = "2.4.0"
      +version = "2.5.0"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee"
      +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
       dependencies = [
        "same-file",
        "winapi-util",
      @@ -1729,24 +1839,11 @@ dependencies = [
        "wasm-bindgen",
       ]
       
      -[[package]]
      -name = "webpki"
      -version = "0.22.1"
      -source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e"
      -dependencies = [
      - "ring",
      - "untrusted",
      -]
      -
       [[package]]
       name = "webpki-roots"
      -version = "0.22.6"
      +version = "0.25.4"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87"
      -dependencies = [
      - "webpki",
      -]
      +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1"
       
       [[package]]
       name = "which"
      @@ -1934,11 +2031,12 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
       
       [[package]]
       name = "winreg"
      -version = "0.10.1"
      +version = "0.50.0"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d"
      +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
       dependencies = [
      - "winapi",
      + "cfg-if",
      + "windows-sys 0.48.0",
       ]
       
       [[package]]
      @@ -1949,13 +2047,30 @@ checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
       
       [[package]]
       name = "xattr"
      -version = "1.0.1"
      +version = "1.3.1"
       source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "f4686009f71ff3e5c4dbcf1a282d0a44db3f021ba69350cd42086b3e5f1c6985"
      +checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f"
       dependencies = [
        "libc",
      + "linux-raw-sys",
      + "rustix",
       ]
       
      +[[package]]
      +name = "xshell"
      +version = "0.2.6"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "6db0ab86eae739efd1b054a8d3d16041914030ac4e01cd1dca0cf252fd8b6437"
      +dependencies = [
      + "xshell-macros",
      +]
      +
      +[[package]]
      +name = "xshell-macros"
      +version = "0.2.6"
      +source = "registry+https://github.com/rust-lang/crates.io-index"
      +checksum = "9d422e8e38ec76e2f06ee439ccc765e9c6a9638b9e7c9f2e8255e4d41e8bd852"
      +
       [[package]]
       name = "xtask"
       version = "1.5.0"
      @@ -1986,18 +2101,10 @@ dependencies = [
        "tokio",
        "walkdir",
        "which",
      + "xshell",
        "zip",
       ]
       
      -[[package]]
      -name = "yaml-rust"
      -version = "0.4.5"
      -source = "registry+https://github.com/rust-lang/crates.io-index"
      -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85"
      -dependencies = [
      - "linked-hash-map",
      -]
      -
       [[package]]
       name = "zeroize"
       version = "1.6.0"
      diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml
      index bbeed4d395..7efbefff1b 100644
      --- a/xtask/Cargo.toml
      +++ b/xtask/Cargo.toml
      @@ -37,9 +37,10 @@ serde_json = "1"
       tar = "0.4"
       tempfile = "3"
       tinytemplate = "1.2.1"
      -tokio = "1.36.0"
      +tokio = { version = "1.36.0", features = ["full"] }
       which = "6.0.1"
       walkdir = "2.4.0"
      +xshell = "0.2.6"
       
       [target.'cfg(target_os = "macos")'.dependencies]
       base64 = "0.21"
      diff --git a/xtask/README.md b/xtask/README.md
      index d54e0f1e1c..0bd6883a87 100644
      --- a/xtask/README.md
      +++ b/xtask/README.md
      @@ -4,6 +4,14 @@ The Apollo Router project uses [xtask](https://github.com/matklad/cargo-xtask) t
       
       You can run `cargo xtask --help` to see the usage. Generally we recommend that you continue to use the default cargo commands like `cargo fmt`, `cargo clippy`, and `cargo test`, but if you are debugging something that is happening in CI it can be useful to run the xtask commands that we run [in CI](../.github/workflows).
       
      +## xtask dev
      +
      +`xtask dev` runs all the checks, linting, and tests that we also run on CI. Run it locally before creating a PR to check your work.
      +
      +## xtask lint
      +
      +`xtask lint` runs code formatting checks and clippy. Use `cargo xtask lint --fmt` to fix formatting issues.
      +
       ## xtask test
       
       You can run `cargo xtask test` to run tests with the same configuration as our CI systems. If you are on GNU Linux, it will also run the e2e tests set up in [apollographql/supergraph-demo](https://github.com/apollographql/supergraph-demo).
      @@ -12,8 +20,16 @@ You can run `cargo xtask test` to run tests with the same configuration as our C
       
       You can run `cargo xtask dist` to build the Apollo Router's binary like it would be built in CI. It will automatically build from the source that you've checked out and for your local machine's architecture. If you would like to build a specific version of Router, you can pass `--version v0.1.5` where `v0.1.5` is the version you'd like to build.
       
      -## xtask prep
      +## xtask release
      +
      +This command group prepares the Apollo Router for a new release. A step-by-step guide on doing so is in the [release checklist](../RELEASE_CHECKLIST.md).
      +
      +## xtask fed-flame
      +
      +`cargo xtask fed-flame` is a helper for producing flame graphs for query planning. This is useful to investigate the performance of a query that you know is slow to plan. Typical usage:
       
      -The most important xtask command you'll need to run locally is `cargo xtask prep`. This command prepares the Apollo Router for a new release. You'll want to update the version in `Cargo.toml`, and run `cargo xtask prep` as a part of making the PR for a new release. 
      +```
      +cargo xtask fed-flame plan query.graphql schema.graphql
      +```
       
      -These steps are outlined in more detail in the [release checklist](../RELEASE_CHECKLIST.md).
      +For query planner developers, some more guidance on using flame graphs is available in the Federation Confluence space.
      diff --git a/xtask/src/commands/flame.rs b/xtask/src/commands/flame.rs
      new file mode 100644
      index 0000000000..09b78ed319
      --- /dev/null
      +++ b/xtask/src/commands/flame.rs
      @@ -0,0 +1,34 @@
      +use anyhow::Result;
      +use xshell::*;
      +use xtask::*;
      +
      +const PROJECT_NAME: &str = "apollo-federation-cli";
      +
      +#[derive(Debug, clap::Parser)]
      +pub struct Flame {
      +    subargs: Vec,
      +}
      +
      +impl Flame {
      +    pub fn run(&self) -> Result<()> {
      +        let shell = Shell::new()?;
      +        match which::which("samply") {
      +            Err(which::Error::CannotFindBinaryPath) => {
      +                anyhow::bail!("samply binary not found. Try to run: cargo install samply")
      +            }
      +            Err(err) => anyhow::bail!("{err}"),
      +            Ok(_) => (),
      +        }
      +
      +        cargo!(["build", "--profile", "profiling", "-p", PROJECT_NAME]);
      +
      +        let subargs = &self.subargs;
      +        cmd!(
      +            shell,
      +            "samply record ./target/profiling/{PROJECT_NAME} {subargs...}"
      +        )
      +        .run()?;
      +
      +        Ok(())
      +    }
      +}
      diff --git a/xtask/src/commands/mod.rs b/xtask/src/commands/mod.rs
      index d8234b919a..64c02325ed 100644
      --- a/xtask/src/commands/mod.rs
      +++ b/xtask/src/commands/mod.rs
      @@ -3,6 +3,7 @@ pub(crate) mod changeset;
       pub(crate) mod compliance;
       pub(crate) mod dev;
       pub(crate) mod dist;
      +pub(crate) mod flame;
       pub(crate) mod licenses;
       pub(crate) mod lint;
       pub(crate) mod package;
      @@ -13,6 +14,7 @@ pub(crate) use all::All;
       pub(crate) use compliance::Compliance;
       pub(crate) use dev::Dev;
       pub(crate) use dist::Dist;
      +pub(crate) use flame::Flame;
       pub(crate) use licenses::Licenses;
       pub(crate) use lint::Lint;
       pub(crate) use package::Package;
      diff --git a/xtask/src/main.rs b/xtask/src/main.rs
      index 38036e66df..e52e383d3e 100644
      --- a/xtask/src/main.rs
      +++ b/xtask/src/main.rs
      @@ -37,6 +37,10 @@ pub enum Command {
           /// Locally run all the checks required before a PR is merged.
           Dev(commands::Dev),
       
      +    /// Run the apollo-federation CLI and generate a flame graph.
      +    #[command(name = "fed-flame")]
      +    Flame(commands::Flame),
      +
           /// Run linters for Router.
           Lint(commands::Lint),
       
      @@ -62,6 +66,7 @@ impl Xtask {
                   Command::CheckCompliance(command) => command.run(),
                   Command::Dist(command) => command.run(),
                   Command::Dev(command) => command.run(),
      +            Command::Flame(command) => command.run(),
                   Command::Lint(command) => command.run(),
                   Command::Licenses(command) => command.run(),
                   Command::Test(command) => command.run(),