diff --git a/.circleci/config.yml b/.circleci/config.yml index 67f8183606..dddd8828b2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -734,7 +734,9 @@ jobs: value: << pipeline.project.git_url >> steps: - setup_remote_docker: - version: 20.10.11 + # CircleCI Image Policy + # https://circleci.com/docs/remote-docker-images-support-policy/ + version: 24.0.9 docker_layer_caching: true - run: name: Docker build @@ -872,7 +874,9 @@ jobs: steps: - checkout - setup_remote_docker: - version: 20.10.11 + # CircleCI Image Policy + # https://circleci.com/docs/remote-docker-images-support-policy/ + version: 20.10.24 docker_layer_caching: true - attach_workspace: at: artifacts diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d7c12aae40..6d9f87ef81 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,4 +1,5 @@ /docs/ @apollographql/docs +/.changesets/ @apollographql/docs /apollo-federation/ @dariuszkuc @sachindshinde @goto-bus-stop @SimonSapin @lrlna @TylerBloom @duckki /apollo-federation/src/sources/connect/json_selection @benjamn /apollo-router/ @apollographql/polaris @apollographql/atlas diff --git a/CHANGELOG.md b/CHANGELOG.md index 8fc01da455..987aa1a79c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,426 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.53.0] - 2024-08-28 + +> [!IMPORTANT] +> If you have enabled [Distributed query plan caching](https://www.apollographql.com/docs/router/configuration/distributed-caching/#distributed-query-plan-caching), this release changes the hashing algorithm used for the cache keys. On account of this, you should anticipate additional cache regeneration cost when updating between these versions while the new hashing algorithm comes into service. + +## 🚀 Features + +### Support demand control directives ([PR #5777](https://github.com/apollographql/router/pull/5777)) + +> ⚠️ This is a [GraphOS Router feature](https://www.apollographql.com/graphos-router). + +The router supports two new demand control directives, `@cost` and `@listSize`, that you can use to provide more accurate estimates of GraphQL operation costs to the router's demand control plugin. + +Use the `@cost` directive to customize the weights of operation cost calculations, particularly for expensive resolvers. + +```graphql +type Product { + id: ID! + name: String + expensiveField: Int @cost(weight: 20) +} +``` + +Use the `@listSize` directive to provide a more accurate estimate for the size of a specific list field, particularly for those that differ greatly from the global list size estimate. + +```graphql +type Magazine { + # This is assumed to always return 5 items + headlines: [Article] @listSize(assumedSize: 5) + + # This is estimated to return as many items as are requested by the parameter named "first" + getPage(first: Int!, after: ID!): [Article] + @listSize(slicingArguments: ["first"]) +} +``` + +To learn more, go to [Demand Control](https://www.apollographql.com/docs/router/executing-operations/demand-control/) docs. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5777 + +### General Availability (GA) of Demand Control ([PR #5868](https://github.com/apollographql/router/pull/5868)) + +Demand control in the router is now a generally available (GA) feature. + +**GA compatibility update**: if you used demand control during its preview, to use it in GA you must update your configuration from `preview_demand_control` to `demand_control`. + +To learn more, go to [Demand Control](https://www.apollographql.com/docs/router/executing-operations/demand-control/) docs. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5868 + +### Enable native query planner to run in the background ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771), [PR #5860](https://github.com/apollographql/router/pull/5860)) + +The router now schedules background jobs to run the native (Rust) query planner to compare its results to the legacy implementation. This helps ascertain its correctness before making a decision to switch entirely to it from the legacy query planner. + +To learn more, go to [Experimental Query Planner Mode](https://www.apollographql.com/docs/router/configuration/configuration/experimental_query_planner_mode) docs. + +The router continues to use the legacy query planner to plan and execute operations, so there is no effect on the hot path. + +To disable running background comparisons with the native query planner, you can configure the router to enable only the `legacy` query planner: + +```yaml +experimental_query_planner_mode: legacy +``` + +By [SimonSapin](https://github.com/SimonSapin) in ([PR #5790](https://github.com/apollographql/router/pull/5790), [PR #5811](https://github.com/apollographql/router/pull/5811), [PR #5771](https://github.com/apollographql/router/pull/5771) [PR #5860](https://github.com/apollographql/router/pull/5860)) + +### Add warnings for invalid configuration of custom telemetry ([PR #5759](https://github.com/apollographql/router/issues/5759)) + +The router now logs warnings when running with telemetry that may have invalid custom configurations. + +For example, you may customize telemetry using invalid conditions or inaccessible statuses: + +```yaml +telemetry: + instrumentation: + events: + subgraph: + my.event: + message: "Auditing Router Event" + level: info + on: request + attributes: + subgraph.response.status: code + # Warning: should use selector for subgraph_name: true instead of comparing strings of subgraph_name and product + condition: + eq: + - subgraph_name + - product +``` + +Although the configuration is syntactically correct, its customization is invalid, and the router now outputs warnings for such invalid configurations. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5759 + +### Add V8 heap usage metrics ([PR #5781](https://github.com/apollographql/router/pull/5781)) + +The router supports new gauge metrics for tracking heap memory usage of the V8 Javascript engine: +- `apollo.router.v8.heap.used`: heap memory used by V8, in bytes +- `apollo.router.v8.heap.total`: total heap allocated by V8, in bytes + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5781 + +### Update Federation to v2.9.0 ([PR #5902](https://github.com/apollographql/router/pull/5902)) + +This updates the router to Federation v2.9.0. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5902 + +### Helm: Support `maxSurge` and `maxUnavailable` for rolling updates ([Issue #5664](https://github.com/apollographql/router/issues/5664)) + +The router Helm chart now supports the configuration of `maxSurge` and `maxUnavailable` for the `RollingUpdate` deployment strategy. + +By [Jon Christiansen](https://github.com/theJC) in https://github.com/apollographql/router/pull/5665 + +### Support new telemetry trace ID format ([PR #5735](https://github.com/apollographql/router/pull/5735)) + +The router supports a new UUID format for telemetry trace IDs. + +The following formats are supported in router configuration for trace IDs: + +* `open_telemetry` +* `hexadecimal` (same as `opentelemetry`) +* `decimal` +* `datadog` +* `uuid` (may contain dashes) + +You can configure router logging to display the formatted trace ID with `display_trace_id`: + +```yaml + telemetry: + exporters: + logging: + stdout: + format: + json: + display_trace_id: (true|false|open_telemetry|hexadecimal|decimal|datadog|uuid) +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5735 + +### Add `format` for trace ID propagation. ([PR #5803](https://github.com/apollographql/router/pull/5803)) + +The router now supports specifying the format of trace IDs that are propagated to subgraphs via headers. + +You can configure the format with the `format` option: + +```yaml +telemetry: + exporters: + tracing: + propagation: + request: + header_name: "my_header" + # Must be in UUID form, with or without dashes + format: uuid +``` + +Note that incoming requests must be some form of UUID, either with or without dashes. + +To learn about supported formats, go to [`request` configuration reference](https://apollographql.com/docs/router/configuration/telemetry/exporters/tracing/overview#request-configuration-reference) docs. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5803 + +### New `apollo.router.cache.storage.estimated_size` gauge ([PR #5770](https://github.com/apollographql/router/pull/5770)) + +The router supports the new metric `apollo.router.cache.storage.estimated_size` that helps users understand and monitor the amount of memory that query planner cache entries consume. + +The `apollo.router.cache.storage.estimated_size` metric gives an estimated size in bytes of a cache entry. It has the following attributes: +- `kind`: `query planner`. +- `storage`: `memory`. + +Before using the estimate to decide whether to update the cache, users should validate that the estimate correlates with their pod's memory usage. + +To learn how to troubleshoot with this metric, see the [Pods terminating due to memory pressure](https://www.apollographql.com/docs/router/containerization/kubernetes#pods-terminating-due-to-memory-pressure) guide in docs. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5770 + +## 🐛 Fixes + +### Fix GraphQL query directives validation bug ([PR #5753](https://github.com/apollographql/router/pull/5753)) + +The router now supports GraphQL queries where a variable is used in a directive on the same operation where the variable is declared. + +For example, the following query both declares and uses `$var`: + +```graphql +query GetSomething(: Int!) @someDirective(argument: $var) { + something +} +``` + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/5753 + +### Evaluate selectors in response stage when possible ([PR #5725](https://github.com/apollographql/router/pull/5725)) + +The router now supports having various supergraph selectors on response events. + +Because `events` are triggered at a specific event (`request`|`response`|`error`), you usually have only one condition for a related event. You can however have selectors that can be applied to several events, like `subgraph_name` to get the subgraph name). + +Example of an event to log the raw subgraph response only on a subgraph named `products`, this was not working before. + +```yaml +telemetry: + instrumentation: + events: + subgraph: + response: + level: info + condition: + eq: + - subgraph_name: true + - "products" +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5725 + +### Fix trace propagation via header ([PR #5802](https://github.com/apollographql/router/pull/5802)) + +The router now correctly propagates trace IDs when using the `propagation.request.header_name` configuration option. + +```yaml +telemetry: + exporters: + tracing: + propagation: + request: + header_name: "id_from_header" +``` + +Previously, trace IDs weren't transferred to the root span of the request, causing spans to be incorrectly attributed to new traces. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5802 + +### Add argument cost to type cost in demand control scoring algorithm ([PR #5740](https://github.com/apollographql/router/pull/5740)) + +The router's operation scoring algorithm for demand control now includes field arguments in the type cost. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5740 + +### Support `gt`/`lt` conditions for parsing string selectors to numbers ([PR #5758](https://github.com/apollographql/router/pull/5758)) + +The router now supports greater than (`gt`) and less than (`lt`) conditions for header selectors. + +The following example applies an attribute on a span if the `content-length` header is greater than 100: + +```yaml +telemetry: + instrumentation: + spans: + mode: spec_compliant + router: + attributes: + trace_id: true + payload_is_to_big: # Set this attribute to true if the value of content-length header is > than 100 + static: true + condition: + gt: + - request_header: "content-length" + - 100 +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5758 + +### Set subgraph error path if not present ([PR #5773](https://github.com/apollographql/router/pull/5773)) + +The router now sets the error path in all cases during subgraph response conversion. Previously the router's subgraph service didn't set the error path for some network-level errors. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5773 + +### Fix cost result filtering for custom metrics ([PR #5838](https://github.com/apollographql/router/pull/5838)) + +The router can now filter for custom metrics that use demand control cost information in their conditions. This allows a telemetry config such as the following: + +```yaml +telemetry: + instrumentation: + instruments: + supergraph: + cost.rejected.operations: + type: histogram + value: + cost: estimated + description: "Estimated cost per rejected operation." + unit: delta + condition: + eq: + - cost: result + - "COST_ESTIMATED_TOO_EXPENSIVE" +``` + +This also fixes an issue where attribute comparisons would fail silently when comparing integers to float values. Users can now write integer values in conditions that compare against selectors that select floats: + +```yaml +telemetry: + instrumentation: + instruments: + supergraph: + cost.rejected.operations: + type: histogram + value: + cost: actual + description: "Estimated cost per rejected operation." + unit: delta + condition: + gt: + - cost: delta + - 1 +``` + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5838 + +### Fix missing `apollo_router_cache_size` metric ([PR #5770](https://github.com/apollographql/router/pull/5770)) + +Previously, if the in-memory cache wasn't mutated, the `apollo_router_cache_size` metric wouldn't be available. This has been fixed in this release. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5770 + +### Interrupted subgraph connections trigger error responses and subgraph service hook points ([PR #5859](https://github.com/apollographql/router/pull/5859)) + +The router now returns a proper subgraph response, with an error if necessary, when a subgraph connection is closed or returns an error. + +Previously, this issue prevented the subgraph response service from being triggered in coprocessors or Rhai scripts. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5859 + +### Fix `exists` condition for custom telemetry events ([Issue #5702](https://github.com/apollographql/router/issues/5702)) + +The router now properly handles the `exists` condition for events. The following configuration now works as intended: + +```yaml +telemetry: + instrumentation: + events: + supergraph: + my.event: + message: "Auditing Router Event" + level: info + on: request + attributes: + graphql.operation.name: true + condition: + exists: + operation_name: string +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5759 + +### Fix Datadog underreporting APM metrics ([PR #5780](https://github.com/apollographql/router/pull/5780)) + +The previous [PR #5703](https://github.com/apollographql/router/pull/5703) has been reverted in this release because it caused Datadog to underreport APM span metrics. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/5780 + +### Fix inconsistent `type` attribute in `apollo.router.uplink.fetch.duration` metric ([PR #5816](https://github.com/apollographql/router/pull/5816)) + +The router now always reports a short name in the `type` attribute for the `apollo.router.fetch.duration` metric, instead of sometimes using a fully-qualified Rust path and sometimes using a short name. + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/5816 + +### Enable progressive override with Federation 2.7 and above ([PR #5754](https://github.com/apollographql/router/pull/5754)) + +The progressive override feature is now available when using Federation v2.7 and above. + +By [@o0ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/5754 + +### Support supergraph query selector for events ([PR #5764](https://github.com/apollographql/router/pull/5764)) + +The router now supports the `query: root_fields` selector for `event_response`. Previously the selector worked for `response` stage events but didn't work for `event_response`. + +The following configuration for a `query: root_fields` on an `event_response` now works: + +```yaml +telemetry: + instrumentation: + events: + supergraph: + OPERATION_LIMIT_INFO: + message: operation limit info + on: event_response + level: info + attributes: + graphql.operation.name: true + query.root_fields: + query: root_fields +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5764 + +### Fix session counting and the reporting of file handle shortage ([PR #5834](https://github.com/apollographql/router/pull/5834)) + +The router previously gave incorrect warnings about file handle shortages due to session counting incorrectly including connections to health-check connections or other non-GraphQL connections. This is now corrected so that only connections to the main GraphQL port are counted, and file handle shortages are now handled correctly as a global resource. + +Also, the router's port listening logic had its own custom rate-limiting of log notifications. This has been removed and replaced by the [standard router log rate limiting configuration](https://www.apollographql.com/docs/router/configuration/telemetry/exporters/logging/stdout/#rate_limit) + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5834 + +## 📃 Configuration + +### Increase default Redis timeout ([PR #5795](https://github.com/apollographql/router/pull/5795)) + +The default Redis command timeout was increased from 2ms to 500ms to accommodate common production use cases. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5795 + +## 🛠 Maintenance + +### Improve performance by optimizing telemetry meter and instrument creation ([PR #5629](https://github.com/apollographql/router/pull/5629)) + +The router's performance has been improved by removing telemetry creation out of the critical path, from being created in every service to being created when starting the telemetry plugin. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/5629 + +## 📚 Documentation + +### Add sections on using `@cost` and `@listSize` to demand control docs ([PR #5839](https://github.com/apollographql/router/pull/5839)) + +Updates the demand control documentation to include details on `@cost` and `@listSize` for more accurate cost estimation. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/5839 + # [1.52.1] - 2024-08-27 > [!IMPORTANT] diff --git a/Cargo.lock b/Cargo.lock index c74849f9a4..8a99dd3deb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24,242 +24,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "actix" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de7fa236829ba0841304542f7614c42b80fca007455315c45c785ccfa873a85b" -dependencies = [ - "actix-macros", - "actix-rt", - "actix_derive", - "bitflags 2.6.0", - "bytes", - "crossbeam-channel", - "futures-core", - "futures-sink", - "futures-task", - "futures-util", - "log", - "once_cell", - "parking_lot", - "pin-project-lite", - "smallvec", - "tokio", - "tokio-util", -] - -[[package]] -name = "actix-codec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" -dependencies = [ - "bitflags 2.6.0", - "bytes", - "futures-core", - "futures-sink", - "memchr", - "pin-project-lite", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "actix-http" -version = "3.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae682f693a9cd7b058f2b0b5d9a6d7728a8555779bedbbc35dd88528611d020" -dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "actix-utils", - "ahash", - "base64 0.22.1", - "bitflags 2.6.0", - "brotli 6.0.0", - "bytes", - "bytestring", - "derive_more", - "encoding_rs", - "flate2", - "futures-core", - "h2", - "http 0.2.12", - "httparse", - "httpdate", - "itoa", - "language-tags", - "local-channel", - "mime", - "percent-encoding", - "pin-project-lite", - "rand 0.8.5", - "sha1", - "smallvec", - "tokio", - "tokio-util", - "tracing", - "zstd", -] - -[[package]] -name = "actix-macros" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" -dependencies = [ - "quote", - "syn 2.0.71", -] - -[[package]] -name = "actix-router" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d324164c51f63867b57e73ba5936ea151b8a41a1d23d1031eeb9f70d0236f8" -dependencies = [ - "bytestring", - "cfg-if 1.0.0", - "http 0.2.12", - "regex", - "regex-lite", - "serde", - "tracing", -] - -[[package]] -name = "actix-rt" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eda4e2a6e042aa4e55ac438a2ae052d3b5da0ecf83d7411e1a368946925208" -dependencies = [ - "futures-core", - "tokio", -] - -[[package]] -name = "actix-server" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b02303ce8d4e8be5b855af6cf3c3a08f3eff26880faad82bab679c22d3650cb5" -dependencies = [ - "actix-rt", - "actix-service", - "actix-utils", - "futures-core", - "futures-util", - "mio", - "socket2 0.5.7", - "tokio", - "tracing", -] - -[[package]] -name = "actix-service" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b894941f818cfdc7ccc4b9e60fa7e53b5042a2e8567270f9147d5591893373a" -dependencies = [ - "futures-core", - "paste", - "pin-project-lite", -] - -[[package]] -name = "actix-utils" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8" -dependencies = [ - "local-waker", - "pin-project-lite", -] - -[[package]] -name = "actix-web" -version = "4.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1988c02af8d2b718c05bc4aeb6a66395b7cdf32858c2c71131e5637a8c05a9ff" -dependencies = [ - "actix-codec", - "actix-http", - "actix-macros", - "actix-router", - "actix-rt", - "actix-server", - "actix-service", - "actix-utils", - "actix-web-codegen", - "ahash", - "bytes", - "bytestring", - "cfg-if 1.0.0", - "cookie 0.16.2", - "derive_more", - "encoding_rs", - "futures-core", - "futures-util", - "itoa", - "language-tags", - "log", - "mime", - "once_cell", - "pin-project-lite", - "regex", - "regex-lite", - "serde", - "serde_json", - "serde_urlencoded", - "smallvec", - "socket2 0.5.7", - "time", - "url", -] - -[[package]] -name = "actix-web-actors" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420b001bb709d8510c3e2659dae046e54509ff9528018d09c78381e765a1f9fa" -dependencies = [ - "actix", - "actix-codec", - "actix-http", - "actix-web", - "bytes", - "bytestring", - "futures-core", - "pin-project-lite", - "tokio", - "tokio-util", -] - -[[package]] -name = "actix-web-codegen" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f591380e2e68490b5dfaf1dd1aa0ebe78d84ba7067078512b4ea6e4492d622b8" -dependencies = [ - "actix-router", - "proc-macro2", - "quote", - "syn 2.0.71", -] - -[[package]] -name = "actix_derive" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c7db3d5a9718568e4cf4a537cfd7070e6e6ff7481510d0237fb529ac850f6d3" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.71", -] - [[package]] name = "add-timestamp-header" version = "0.1.0" @@ -395,9 +159,9 @@ checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "apollo-compiler" -version = "1.0.0-beta.19" +version = "1.0.0-beta.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b21b81064ebf506f5a4073f5ef7a3a9cfdba29904814fa3f42612b9055b37f2" +checksum = "07961541ebb5c85cc02ea0f08357e31b30537674bbca818884f1fc658fa99116" dependencies = [ "ahash", "apollo-parser", @@ -412,19 +176,9 @@ dependencies = [ "uuid", ] -[[package]] -name = "apollo-encoder" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee9f27b20841d14923dd5f0714a79f86360b23492d2f98ab5d1651471a56b7a4" -dependencies = [ - "apollo-parser", - "thiserror", -] - [[package]] name = "apollo-federation" -version = "1.52.1" +version = "1.53.0" dependencies = [ "apollo-compiler", "derive_more", @@ -436,6 +190,7 @@ dependencies = [ "multimap 0.10.0", "nom", "petgraph", + "ron", "serde", "serde_json", "serde_json_bytes", @@ -456,13 +211,16 @@ dependencies = [ "apollo-compiler", "apollo-federation", "clap", + "insta", + "serde", + "serde_json", ] [[package]] name = "apollo-parser" -version = "0.7.7" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb7c8a9776825e5524b5ab3a7f478bf091a054180f244dff85814452cb87d90" +checksum = "f17a43dc64e71ca7140e646b99bf86ae721ebb801d2aec44e29a654c4d035ab8" dependencies = [ "memchr", "rowan", @@ -471,7 +229,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.52.1" +version = "1.53.0" dependencies = [ "access-json", "ahash", @@ -500,7 +258,7 @@ dependencies = [ "clap", "console", "console-subscriber", - "cookie 0.18.1", + "cookie", "crossbeam-channel", "dashmap", "derivative", @@ -640,11 +398,11 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.52.1" +version = "1.53.0" dependencies = [ "apollo-parser", "apollo-router", - "apollo-smith 0.5.0", + "apollo-smith", "arbitrary", "criterion", "memory-stats", @@ -656,7 +414,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.52.1" +version = "1.53.0" dependencies = [ "anyhow", "cargo-scaffold", @@ -688,22 +446,9 @@ dependencies = [ [[package]] name = "apollo-smith" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "441a51f1055d2eebcda41b55066de925502e11c97097c6d1bab0da5bdeb5c70f" -dependencies = [ - "apollo-encoder", - "apollo-parser", - "arbitrary", - "once_cell", - "thiserror", -] - -[[package]] -name = "apollo-smith" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae8c0ec27715028b24a0a98ac53e88ac4a980e6d519cdb37265d2f2c76c864a" +checksum = "84ef0a8fba05f32a14d03eb3ff74f556cecca820012d5846770b839c75332b38" dependencies = [ "apollo-compiler", "apollo-parser", @@ -888,9 +633,9 @@ dependencies = [ [[package]] name = "async-graphql" -version = "5.0.10" +version = "6.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35ef8f9be23ee30fe1eb1cf175c689bc33517c6c6d0fd0669dade611e5ced7f" +checksum = "298a5d587d6e6fdb271bf56af2dc325a80eb291fd0fc979146584b9a05494a8c" dependencies = [ "async-graphql-derive", "async-graphql-parser", @@ -904,7 +649,7 @@ dependencies = [ "futures-util", "handlebars 4.5.0", "http 0.2.12", - "indexmap 1.9.3", + "indexmap 2.2.6", "mime", "multer", "num-traits", @@ -920,28 +665,28 @@ dependencies = [ ] [[package]] -name = "async-graphql-actix-web" -version = "5.0.10" +name = "async-graphql-axum" +version = "6.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75e3d335639e722213bdd120f77a66f531bde8bbcff1b19ab8e542f82aed7f48" +checksum = "01a1c20a2059bffbc95130715b23435a05168c518fba9709c81fa2a38eed990c" dependencies = [ - "actix", - "actix-http", - "actix-web", - "actix-web-actors", - "async-channel 1.9.0", "async-graphql", - "futures-channel", + "async-trait", + "axum", + "bytes", "futures-util", "serde_json", - "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tower-service", ] [[package]] name = "async-graphql-derive" -version = "5.0.10" +version = "6.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a0f6ceed3640b4825424da70a5107e79d48d9b2bc6318dfc666b2fc4777f8c4" +checksum = "c7f329c7eb9b646a72f70c9c4b516c70867d356ec46cb00dcac8ad343fd006b0" dependencies = [ "Inflector", "async-graphql-parser", @@ -949,15 +694,16 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "strum 0.25.0", + "syn 2.0.71", "thiserror", ] [[package]] name = "async-graphql-parser" -version = "5.0.10" +version = "6.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc308cd3bc611ee86c9cf19182d2b5ee583da40761970e41207f088be3db18f" +checksum = "6139181845757fd6a73fbb8839f3d036d7150b798db0e9bb3c6e83cdd65bd53b" dependencies = [ "async-graphql-value", "pest", @@ -967,12 +713,12 @@ dependencies = [ [[package]] name = "async-graphql-value" -version = "5.0.10" +version = "6.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d461325bfb04058070712296601dfe5e5bd6cdff84780a0a8c569ffb15c87eb3" +checksum = "323a5143f5bdd2030f45e3f2e0c821c9b1d36e79cf382129c64299c50a7f3750" dependencies = [ "bytes", - "indexmap 1.9.3", + "indexmap 2.2.6", "serde", "serde_json", ] @@ -1143,17 +889,6 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - [[package]] name = "auth-git2" version = "0.5.4" @@ -1616,6 +1351,9 @@ name = "bitflags" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +dependencies = [ + "serde", +] [[package]] name = "block-buffer" @@ -1763,15 +1501,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bytestring" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d80203ea6b29df88012294f62733de21cfeab47f17b41af3a38bc30a03ee72" -dependencies = [ - "bytes", -] - [[package]] name = "cache-control" version = "0.1.0" @@ -1922,7 +1651,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.1", + "strsim", ] [[package]] @@ -2094,17 +1823,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" -[[package]] -name = "cookie" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" -dependencies = [ - "percent-encoding", - "time", - "version_check", -] - [[package]] name = "cookie" version = "0.18.1" @@ -2339,9 +2057,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.14.4" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ "darling_core", "darling_macro", @@ -2349,27 +2067,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.14.4" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 1.0.109", + "strsim", + "syn 2.0.71", ] [[package]] name = "darling_macro" -version = "0.14.4" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 1.0.109", + "syn 2.0.71", ] [[package]] @@ -2805,19 +2523,6 @@ dependencies = [ "syn 2.0.71", ] -[[package]] -name = "env_logger" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.10.2" @@ -2923,10 +2628,10 @@ dependencies = [ name = "everything-subgraph" version = "0.1.0" dependencies = [ - "actix-web", "async-graphql", - "async-graphql-actix-web", - "env_logger 0.9.3", + "async-graphql-axum", + "axum", + "env_logger", "futures", "lazy_static", "log", @@ -2934,6 +2639,7 @@ dependencies = [ "rand 0.8.5", "serde_json", "tokio", + "tower", ] [[package]] @@ -3615,15 +3321,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.3.9" @@ -3909,7 +3606,6 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", - "serde", ] [[package]] @@ -4216,12 +3912,6 @@ dependencies = [ "log", ] -[[package]] -name = "language-tags" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" - [[package]] name = "lazy-regex" version = "2.5.0" @@ -4387,23 +4077,6 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" -[[package]] -name = "local-channel" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6cbc85e69b8df4b8bb8b89ec634e7189099cea8927a276b7384ce5488e53ec8" -dependencies = [ - "futures-core", - "futures-sink", - "local-waker", -] - -[[package]] -name = "local-waker" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" - [[package]] name = "lock_api" version = "0.4.12" @@ -6094,11 +5767,23 @@ dependencies = [ "paste", ] +[[package]] +name = "ron" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" +dependencies = [ + "base64 0.21.7", + "bitflags 2.6.0", + "serde", + "serde_derive", +] + [[package]] name = "router-bridge" -version = "0.5.31+v2.8.5" +version = "0.6.0+v2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "672901b1ec6fd110ac41d61ca5e1754319d0edf39546a089a114ab865d42ae97" +checksum = "96ef4910ade6753863c8437a76e88e236ab91688dcfe739d73417ae7848f3b92" dependencies = [ "anyhow", "async-channel 1.9.0", @@ -6126,9 +5811,9 @@ dependencies = [ "apollo-compiler", "apollo-parser", "apollo-router", - "apollo-smith 0.9.0", + "apollo-smith", "async-trait", - "env_logger 0.10.2", + "env_logger", "http 0.2.12", "libfuzzer-sys", "log", @@ -6841,12 +6526,6 @@ dependencies = [ "regex", ] -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "strsim" version = "0.11.1" @@ -7339,6 +7018,7 @@ checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "slab", diff --git a/Cargo.toml b/Cargo.toml index 760349ac52..2d5abeb92d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,9 +49,9 @@ debug = 1 # Dependencies used in more than one place are specified here in order to keep versions in sync: # https://doc.rust-lang.org/cargo/reference/workspaces.html#the-dependencies-table [workspace.dependencies] -apollo-compiler = "=1.0.0-beta.19" -apollo-parser = "0.7.6" -apollo-smith = { version = "0.5.0", features = ["parser-impl"] } +apollo-compiler = "=1.0.0-beta.20" +apollo-parser = "0.8.0" +apollo-smith = "0.10.0" async-trait = "0.1.77" hex = { version = "0.4.3", features = ["serde"] } http = "0.2.11" @@ -75,4 +75,4 @@ serde_json_bytes = { version = "0.2.4", features = ["preserve_order"] } sha1 = "0.10.6" tempfile = "3.10.1" tokio = { version = "1.36.0", features = ["full"] } -tower = { version = "0.4.13", features = ["full"] } +tower = { version = "0.4.13", features = ["full"] } \ No newline at end of file diff --git a/apollo-federation/Cargo.toml b/apollo-federation/Cargo.toml index bb3bb7a84d..220c25b371 100644 --- a/apollo-federation/Cargo.toml +++ b/apollo-federation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-federation" -version = "1.52.1" +version = "1.53.0" authors = ["The Apollo GraphQL Contributors"] edition = "2021" description = "Apollo Federation" @@ -14,7 +14,7 @@ autotests = false # Integration tests are m # logging statements that capture serialized versions of key data structures. # This logging is gated behind a feature to avoid any unnecessary (even if # small) runtime costs where this data will not be desired. -snapshot_tracing = [] +snapshot_tracing = ["ron"] [dependencies] apollo-compiler.workspace = true @@ -36,6 +36,7 @@ strum_macros = "0.26.0" thiserror = "1.0" url = "2" tracing = "0.1.40" +ron = { version = "0.8.1", optional = true } [dev-dependencies] hex.workspace = true diff --git a/apollo-federation/cli/Cargo.toml b/apollo-federation/cli/Cargo.toml index b64cc7a03c..6512e73ac8 100644 --- a/apollo-federation/cli/Cargo.toml +++ b/apollo-federation/cli/Cargo.toml @@ -7,3 +7,10 @@ edition = "2021" apollo-compiler.workspace = true apollo-federation = { path = ".." } clap = { version = "4.5.1", features = ["derive"] } + +[dev-dependencies] +insta = { version = "1.38.0", features = ["json", "redactions"] } +serde = { version = "1.0.197", features = ["derive"] } +serde_json = { version = "1.0.114", features = [ + "preserve_order", +] } \ No newline at end of file diff --git a/apollo-federation/cli/fixtures/queries/topproducts.graphql b/apollo-federation/cli/fixtures/queries/topproducts.graphql new file mode 100644 index 0000000000..6d6676a990 --- /dev/null +++ b/apollo-federation/cli/fixtures/queries/topproducts.graphql @@ -0,0 +1,11 @@ +query TopProducts($first: Int) { + topProducts(first: $first) { + upc + name + reviews { + id + product { name } + author { id name } + } + } +} \ No newline at end of file diff --git a/apollo-federation/cli/fixtures/queries/topproducts2.graphql b/apollo-federation/cli/fixtures/queries/topproducts2.graphql new file mode 100644 index 0000000000..92df02561d --- /dev/null +++ b/apollo-federation/cli/fixtures/queries/topproducts2.graphql @@ -0,0 +1,6 @@ +query TopProduct2($first: Int) { + topProducts(first: $first) { + upc + name + } +} \ No newline at end of file diff --git a/apollo-federation/cli/fixtures/starstuff.graphql b/apollo-federation/cli/fixtures/starstuff.graphql new file mode 100644 index 0000000000..504fbbaafb --- /dev/null +++ b/apollo-federation/cli/fixtures/starstuff.graphql @@ -0,0 +1,98 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query + mutation: Mutation +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + ACCOUNTS @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev/") + INVENTORY @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev/") + PRODUCTS @join__graph(name: "products", url: "https://products.demo.starstuff.dev/") + REVIEWS @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev/") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Mutation + @join__type(graph: PRODUCTS) + @join__type(graph: REVIEWS) +{ + createProduct(upc: ID!, name: String): Product @join__field(graph: PRODUCTS) + createReview(upc: ID!, id: ID!, body: String): Review @join__field(graph: REVIEWS) +} + +type Product + @join__type(graph: ACCOUNTS, key: "upc", extension: true) + @join__type(graph: INVENTORY, key: "upc") + @join__type(graph: PRODUCTS, key: "upc") + @join__type(graph: REVIEWS, key: "upc") +{ + upc: String! + weight: Int @join__field(graph: INVENTORY, external: true) @join__field(graph: PRODUCTS) + price: Int @join__field(graph: INVENTORY, external: true) @join__field(graph: PRODUCTS) + inStock: Boolean @join__field(graph: INVENTORY) + shippingEstimate: Int @join__field(graph: INVENTORY, requires: "price weight") + name: String @join__field(graph: PRODUCTS) + reviews: [Review] @join__field(graph: REVIEWS) + reviewsForAuthor(authorID: ID!): [Review] @join__field(graph: REVIEWS) +} + +type Query + @join__type(graph: ACCOUNTS) + @join__type(graph: INVENTORY) + @join__type(graph: PRODUCTS) + @join__type(graph: REVIEWS) +{ + me: User @join__field(graph: ACCOUNTS) + recommendedProducts: [Product] @join__field(graph: ACCOUNTS) + topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS) +} + +type Review + @join__type(graph: REVIEWS, key: "id") +{ + id: ID! + body: String + author: User @join__field(graph: REVIEWS, provides: "username") + product: Product +} + +type User + @join__type(graph: ACCOUNTS, key: "id") + @join__type(graph: REVIEWS, key: "id") +{ + id: ID! + name: String @join__field(graph: ACCOUNTS) + username: String @join__field(graph: ACCOUNTS) @join__field(graph: REVIEWS, external: true) + reviews: [Review] @join__field(graph: REVIEWS) +} diff --git a/apollo-federation/cli/src/bench.rs b/apollo-federation/cli/src/bench.rs new file mode 100644 index 0000000000..c672137982 --- /dev/null +++ b/apollo-federation/cli/src/bench.rs @@ -0,0 +1,118 @@ +use std::fmt::Display; +use std::path::PathBuf; +use std::time::Instant; + +use apollo_compiler::ExecutableDocument; +use apollo_federation::error::FederationError; +use apollo_federation::query_plan::query_planner::QueryPlanner; +use apollo_federation::query_plan::query_planner::QueryPlannerConfig; +use apollo_federation::Supergraph; + +pub(crate) fn run_bench( + supergraph: Supergraph, + queries_dir: &PathBuf, + config: QueryPlannerConfig, +) -> Result, FederationError> { + let planner = QueryPlanner::new(&supergraph, config.clone()).expect("Invalid planner"); + + let mut entries = std::fs::read_dir(queries_dir) + .unwrap() + .map(|res| res.map(|e| e.path())) + .collect::, std::io::Error>>() + .unwrap(); + + entries.sort(); + + let mut results = Vec::with_capacity(entries.len()); + + for query_path in entries.into_iter() { + let query_string = std::fs::read_to_string(query_path.clone()).unwrap(); + + let file_name = query_path + .file_name() + .to_owned() + .unwrap() + .to_string_lossy() + .to_string(); + + let document = match ExecutableDocument::parse_and_validate( + supergraph.schema.schema(), + query_string, + "query", + ) { + Ok(document) => document, + Err(_) => { + results.push(BenchOutput { + query_name: file_name.split('-').next().unwrap().to_string(), + file_name, + timing: 0.0, + eval_plans: None, + error: Some("error".to_string()), + }); + + continue; + } + }; + let now = Instant::now(); + let plan = planner.build_query_plan(&document, None); + let elapsed = now.elapsed().as_secs_f64() * 1000.0; + let mut eval_plans = None; + let mut error = None; + if let Ok(p) = plan { + eval_plans = Some(p.statistics.evaluated_plan_count.into_inner().to_string()); + } else { + error = Some("error".to_string()); + }; + + results.push(BenchOutput { + query_name: file_name.split('-').next().unwrap().to_string(), + file_name, + timing: elapsed, + eval_plans, + error, + }); + } + + // totally arbitrary + results.sort_by(|a, b| a.partial_cmp(b).unwrap_or(a.query_name.cmp(&b.query_name))); + Ok(results) +} + +#[derive(Debug)] +#[cfg_attr(test, derive(serde::Serialize))] +pub(crate) struct BenchOutput { + file_name: String, + query_name: String, + timing: f64, + eval_plans: Option, + error: Option, +} + +impl PartialEq for BenchOutput { + fn eq(&self, other: &Self) -> bool { + self.timing == other.timing + } +} + +impl PartialOrd for BenchOutput { + fn partial_cmp(&self, other: &Self) -> Option { + match other.timing.partial_cmp(&self.timing) { + Some(core::cmp::Ordering::Equal) => Some(core::cmp::Ordering::Equal), + ord => ord, + } + } +} + +impl Display for BenchOutput { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "| [{}](queries/{}) | {} | {} | {} |", + self.query_name, + self.file_name, + self.timing, + self.eval_plans.clone().unwrap_or(" ".to_string()), + self.error.clone().unwrap_or(" ".to_string()) + ) + } +} diff --git a/apollo-federation/cli/src/main.rs b/apollo-federation/cli/src/main.rs index 415b7efb7b..ab42f16151 100644 --- a/apollo-federation/cli/src/main.rs +++ b/apollo-federation/cli/src/main.rs @@ -1,5 +1,6 @@ use std::fs; use std::io; +use std::num::NonZeroU32; use std::path::Path; use std::path::PathBuf; use std::process::ExitCode; @@ -11,8 +12,38 @@ use apollo_federation::query_graph; use apollo_federation::query_plan::query_planner::QueryPlanner; use apollo_federation::query_plan::query_planner::QueryPlannerConfig; use apollo_federation::subgraph; +use bench::BenchOutput; use clap::Parser; +mod bench; +use bench::run_bench; + +#[derive(Parser)] +struct QueryPlannerArgs { + /// Enable @defer support. + #[arg(long, default_value_t = false)] + enable_defer: bool, + /// Reuse fragments to compress subgraph queries. + #[arg(long, default_value_t = false)] + reuse_fragments: bool, + /// Generate fragments to compress subgraph queries. + #[arg(long, default_value_t = false)] + generate_fragments: bool, + /// Run GraphQL validation check on generated subgraph queries. (default: true) + #[arg(long, default_missing_value = "true", require_equals = true, num_args = 0..=1)] + subgraph_validation: Option, + /// Set the `debug.max_evaluated_plans` option. + #[arg(long)] + max_evaluated_plans: Option, + /// Set the `debug.paths_limit` option. + #[arg(long)] + paths_limit: Option, + /// If the supergraph only represents a single subgraph, pass through queries directly without + /// planning. + #[arg(long, default_value_t = false)] + single_subgraph_passthrough: bool, +} + /// CLI arguments. See #[derive(Parser)] struct Args { @@ -26,6 +57,9 @@ enum Command { Api { /// Path(s) to one supergraph schema file, `-` for stdin or multiple subgraph schemas. schemas: Vec, + /// Enable @defer support. + #[arg(long, default_value_t = false)] + enable_defer: bool, }, /// Outputs the query graph from a supergraph schema or subgraph schemas QueryGraph { @@ -42,6 +76,8 @@ enum Command { query: PathBuf, /// Path(s) to one supergraph schema file, `-` for stdin or multiple subgraph schemas. schemas: Vec, + #[command(flatten)] + planner: QueryPlannerArgs, }, /// Validate one supergraph schema file or multiple subgraph schemas Validate { @@ -60,21 +96,64 @@ enum Command { /// The output directory for the extracted subgraph schemas destination_dir: Option, }, + Bench { + /// The path to the supergraph schema file + supergraph_schema: PathBuf, + /// The path to the directory that contains all operations to run against + operations_dir: PathBuf, + #[command(flatten)] + planner: QueryPlannerArgs, + }, +} + +impl QueryPlannerArgs { + fn apply(&self, config: &mut QueryPlannerConfig) { + config.incremental_delivery.enable_defer = self.enable_defer; + // --generate-fragments trumps --reuse-fragments + config.reuse_query_fragments = self.reuse_fragments && !self.generate_fragments; + config.generate_query_fragments = self.generate_fragments; + config.subgraph_graphql_validation = self.subgraph_validation.unwrap_or(true); + if let Some(max_evaluated_plans) = self.max_evaluated_plans { + config.debug.max_evaluated_plans = max_evaluated_plans; + } + config.debug.paths_limit = self.paths_limit; + config.debug.bypass_planner_for_single_subgraph = self.single_subgraph_passthrough; + } +} + +impl From for QueryPlannerConfig { + fn from(value: QueryPlannerArgs) -> Self { + let mut config = QueryPlannerConfig::default(); + value.apply(&mut config); + config + } } fn main() -> ExitCode { let args = Args::parse(); let result = match args.command { - Command::Api { schemas } => to_api_schema(&schemas), - Command::QueryGraph { schemas } => dot_query_graph(&schemas), - Command::FederatedGraph { schemas } => dot_federated_graph(&schemas), - Command::Plan { query, schemas } => plan(&query, &schemas), + Command::Api { + schemas, + enable_defer, + } => cmd_api_schema(&schemas, enable_defer), + Command::QueryGraph { schemas } => cmd_query_graph(&schemas), + Command::FederatedGraph { schemas } => cmd_federated_graph(&schemas), + Command::Plan { + query, + schemas, + planner, + } => cmd_plan(&query, &schemas, planner), Command::Validate { schemas } => cmd_validate(&schemas), Command::Compose { schemas } => cmd_compose(&schemas), Command::Extract { supergraph_schema, destination_dir, } => cmd_extract(&supergraph_schema, destination_dir.as_ref()), + Command::Bench { + supergraph_schema, + operations_dir, + planner, + } => cmd_bench(&supergraph_schema, &operations_dir, planner), }; match result { Err(error) => { @@ -94,10 +173,10 @@ fn read_input(input_path: &Path) -> String { } } -fn to_api_schema(file_paths: &[PathBuf]) -> Result<(), FederationError> { +fn cmd_api_schema(file_paths: &[PathBuf], enable_defer: bool) -> Result<(), FederationError> { let supergraph = load_supergraph(file_paths)?; let api_schema = supergraph.to_api_schema(apollo_federation::ApiSchemaOptions { - include_defer: true, + include_defer: enable_defer, include_stream: false, })?; println!("{}", api_schema.schema()); @@ -140,7 +219,7 @@ fn load_supergraph( } } -fn dot_query_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { +fn cmd_query_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { let supergraph = load_supergraph(file_paths)?; let name: &str = if file_paths.len() == 1 { file_paths[0].file_stem().unwrap().to_str().unwrap() @@ -153,7 +232,7 @@ fn dot_query_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { Ok(()) } -fn dot_federated_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { +fn cmd_federated_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { let supergraph = load_supergraph(file_paths)?; let api_schema = supergraph.to_api_schema(Default::default())?; let query_graph = @@ -162,14 +241,19 @@ fn dot_federated_graph(file_paths: &[PathBuf]) -> Result<(), FederationError> { Ok(()) } -fn plan(query_path: &Path, schema_paths: &[PathBuf]) -> Result<(), FederationError> { +fn cmd_plan( + query_path: &Path, + schema_paths: &[PathBuf], + planner: QueryPlannerArgs, +) -> Result<(), FederationError> { let query = read_input(query_path); let supergraph = load_supergraph(schema_paths)?; - let query_doc = - ExecutableDocument::parse_and_validate(supergraph.schema.schema(), query, query_path)?; - // TODO: add CLI parameters for config as needed - let config = QueryPlannerConfig::default(); + + let config = QueryPlannerConfig::from(planner); let planner = QueryPlanner::new(&supergraph, config)?; + + let query_doc = + ExecutableDocument::parse_and_validate(planner.api_schema().schema(), query, query_path)?; print!("{}", planner.build_query_plan(&query_doc, None)?); Ok(()) } @@ -210,3 +294,38 @@ fn cmd_extract(file_path: &Path, dest: Option<&PathBuf>) -> Result<(), Federatio } Ok(()) } + +fn _cmd_bench( + file_path: &Path, + operations_dir: &PathBuf, + config: QueryPlannerConfig, +) -> Result, FederationError> { + let supergraph = load_supergraph_file(file_path)?; + run_bench(supergraph, operations_dir, config) +} + +fn cmd_bench( + file_path: &Path, + operations_dir: &PathBuf, + planner: QueryPlannerArgs, +) -> Result<(), FederationError> { + let results = _cmd_bench(file_path, operations_dir, planner.into())?; + println!("| operation_name | time (ms) | evaluated_plans (max 10000) | error |"); + println!("|----------------|----------------|-----------|-----------------------------|"); + for r in results { + println!("{}", r); + } + Ok(()) +} + +#[test] +fn test_bench() { + insta::assert_json_snapshot!( + _cmd_bench( + Path::new("./fixtures/starstuff.graphql"), + &PathBuf::from("./fixtures/queries"), + Default::default(), + ).unwrap(), + { "[].timing" => 1.234 }, + ); +} diff --git a/apollo-federation/cli/src/snapshots/apollo_federation_cli__bench.snap b/apollo-federation/cli/src/snapshots/apollo_federation_cli__bench.snap new file mode 100644 index 0000000000..40588f3640 --- /dev/null +++ b/apollo-federation/cli/src/snapshots/apollo_federation_cli__bench.snap @@ -0,0 +1,20 @@ +--- +source: apollo-federation/cli/src/main.rs +expression: "_cmd_bench(&Path::new(\"./test/starstuff.graphql\"),\n &PathBuf::from(\"./test/queries\")).unwrap()" +--- +[ + { + "file_name": "topproducts.graphql", + "query_name": "topproducts.graphql", + "timing": 1.234, + "eval_plans": "1", + "error": null + }, + { + "file_name": "topproducts2.graphql", + "query_name": "topproducts2.graphql", + "timing": 1.234, + "eval_plans": "1", + "error": null + } +] diff --git a/apollo-federation/src/compat.rs b/apollo-federation/src/compat.rs index 4af8e90010..701337714c 100644 --- a/apollo-federation/src/compat.rs +++ b/apollo-federation/src/compat.rs @@ -9,10 +9,13 @@ use apollo_compiler::ast::Value; use apollo_compiler::collections::IndexMap; +use apollo_compiler::executable; use apollo_compiler::schema::Directive; use apollo_compiler::schema::ExtendedType; use apollo_compiler::schema::InputValueDefinition; use apollo_compiler::schema::Type; +use apollo_compiler::validation::Valid; +use apollo_compiler::ExecutableDocument; use apollo_compiler::Name; use apollo_compiler::Node; use apollo_compiler::Schema; @@ -281,6 +284,86 @@ pub fn coerce_schema_default_values(schema: &mut Schema) { } } +fn coerce_directive_application_values( + schema: &Valid, + directives: &mut executable::DirectiveList, +) { + for directive in directives { + let Some(definition) = schema.directive_definitions.get(&directive.name) else { + continue; + }; + let directive = directive.make_mut(); + for arg in &mut directive.arguments { + let Some(definition) = definition.argument_by_name(&arg.name) else { + continue; + }; + let arg = arg.make_mut(); + _ = coerce_value(&schema.types, &mut arg.value, &definition.ty); + } + } +} + +fn coerce_selection_set_values( + schema: &Valid, + selection_set: &mut executable::SelectionSet, +) { + for selection in &mut selection_set.selections { + match selection { + executable::Selection::Field(field) => { + let definition = field.definition.clone(); // Clone so we can mutate `field`. + let field = field.make_mut(); + for arg in &mut field.arguments { + let Some(definition) = definition.argument_by_name(&arg.name) else { + continue; + }; + let arg = arg.make_mut(); + _ = coerce_value(&schema.types, &mut arg.value, &definition.ty); + } + coerce_directive_application_values(schema, &mut field.directives); + coerce_selection_set_values(schema, &mut field.selection_set); + } + executable::Selection::FragmentSpread(frag) => { + let frag = frag.make_mut(); + coerce_directive_application_values(schema, &mut frag.directives); + } + executable::Selection::InlineFragment(frag) => { + let frag = frag.make_mut(); + coerce_directive_application_values(schema, &mut frag.directives); + coerce_selection_set_values(schema, &mut frag.selection_set); + } + } + } +} + +fn coerce_operation_values(schema: &Valid, operation: &mut Node) { + let operation = operation.make_mut(); + + for variable in &mut operation.variables { + let variable = variable.make_mut(); + let Some(default_value) = &mut variable.default_value else { + continue; + }; + + // On error, the default value is invalid. This would have been caught by validation. + // In schemas, we explicitly remove the default value if it's invalid, to match the JS + // query planner behaviour. + // In queries, I hope we can just reject queries with invalid default values instead of + // silently doing the wrong thing :) + _ = coerce_value(&schema.types, default_value, &variable.ty); + } + + coerce_selection_set_values(schema, &mut operation.selection_set); +} + +pub fn coerce_executable_values(schema: &Valid, document: &mut ExecutableDocument) { + if let Some(operation) = &mut document.operations.anonymous { + coerce_operation_values(schema, operation); + } + for operation in document.operations.named.values_mut() { + coerce_operation_values(schema, operation); + } +} + /// Applies default value coercion and removes non-semantic directives so that /// the apollo-rs serialized output of the schema matches the result of /// `printSchema(buildSchema()` in graphql-js. @@ -288,3 +371,46 @@ pub fn make_print_schema_compatible(schema: &mut Schema) { remove_non_semantic_directives(schema); coerce_schema_default_values(schema); } + +#[cfg(test)] +mod tests { + use apollo_compiler::validation::Valid; + use apollo_compiler::ExecutableDocument; + use apollo_compiler::Schema; + + use super::coerce_executable_values; + + fn parse_and_coerce(schema: &Valid, input: &str) -> String { + let mut document = ExecutableDocument::parse(schema, input, "test.graphql").unwrap(); + coerce_executable_values(schema, &mut document); + document.to_string() + } + + #[test] + fn coerces_list_values() { + let schema = Schema::parse_and_validate( + r#" + type Query { + test( + bools: [Boolean], + ints: [Int], + strings: [String], + floats: [Float], + ): Int + } + "#, + "schema.graphql", + ) + .unwrap(); + + insta::assert_snapshot!(parse_and_coerce(&schema, r#" + { + test(bools: true, ints: 1, strings: "string", floats: 2.0) + } + "#), @r#" + { + test(bools: [true], ints: [1], strings: ["string"], floats: [2.0]) + } + "#); + } +} diff --git a/apollo-federation/src/error/mod.rs b/apollo-federation/src/error/mod.rs index f13ec59757..555d1a4339 100644 --- a/apollo-federation/src/error/mod.rs +++ b/apollo-federation/src/error/mod.rs @@ -29,6 +29,18 @@ impl From for String { } } +#[derive(Clone, Debug, strum_macros::Display, PartialEq, Eq)] +pub enum UnsupportedFeatureKind { + #[strum(to_string = "progressive overrides")] + ProgressiveOverrides, + #[strum(to_string = "defer")] + Defer, + #[strum(to_string = "context")] + Context, + #[strum(to_string = "alias")] + Alias, +} + #[derive(Debug, Clone, thiserror::Error)] pub enum SingleFederationError { #[error( @@ -185,7 +197,10 @@ pub enum SingleFederationError { #[error("{message}")] OverrideOnInterface { message: String }, #[error("{message}")] - UnsupportedFeature { message: String }, + UnsupportedFeature { + message: String, + kind: UnsupportedFeatureKind, + }, #[error("{message}")] InvalidFederationSupergraph { message: String }, #[error("{message}")] diff --git a/apollo-federation/src/lib.rs b/apollo-federation/src/lib.rs index 4eb4afa5be..ed02b15ada 100644 --- a/apollo-federation/src/lib.rs +++ b/apollo-federation/src/lib.rs @@ -29,6 +29,7 @@ pub mod query_graph; pub mod query_plan; pub mod schema; pub mod subgraph; +pub(crate) mod supergraph; pub(crate) mod utils; use apollo_compiler::ast::NamedType; @@ -46,10 +47,10 @@ use crate::link::spec::Identity; use crate::link::spec_definition::SpecDefinitions; use crate::merge::merge_subgraphs; use crate::merge::MergeFailure; -pub use crate::query_graph::extract_subgraphs_from_supergraph::ValidFederationSubgraph; -pub use crate::query_graph::extract_subgraphs_from_supergraph::ValidFederationSubgraphs; use crate::schema::ValidFederationSchema; use crate::subgraph::ValidSubgraph; +pub use crate::supergraph::ValidFederationSubgraph; +pub use crate::supergraph::ValidFederationSubgraphs; pub(crate) type SupergraphSpecs = (&'static LinkSpecDefinition, &'static JoinSpecDefinition); @@ -128,10 +129,7 @@ impl Supergraph { } pub fn extract_subgraphs(&self) -> Result { - crate::query_graph::extract_subgraphs_from_supergraph::extract_subgraphs_from_supergraph( - &self.schema, - None, - ) + supergraph::extract_subgraphs_from_supergraph(&self.schema, None) } } diff --git a/apollo-federation/src/link/cost_spec_definition.rs b/apollo-federation/src/link/cost_spec_definition.rs new file mode 100644 index 0000000000..db49185b04 --- /dev/null +++ b/apollo-federation/src/link/cost_spec_definition.rs @@ -0,0 +1,192 @@ +use apollo_compiler::ast::Argument; +use apollo_compiler::ast::Directive; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::name; +use apollo_compiler::schema::Component; +use apollo_compiler::schema::EnumType; +use apollo_compiler::schema::ObjectType; +use apollo_compiler::schema::ScalarType; +use apollo_compiler::Name; +use apollo_compiler::Node; +use lazy_static::lazy_static; + +use crate::error::FederationError; +use crate::link::spec::Identity; +use crate::link::spec::Url; +use crate::link::spec::Version; +use crate::link::spec_definition::SpecDefinition; +use crate::link::spec_definition::SpecDefinitions; +use crate::schema::position::EnumTypeDefinitionPosition; +use crate::schema::position::ObjectTypeDefinitionPosition; +use crate::schema::position::ScalarTypeDefinitionPosition; +use crate::schema::FederationSchema; + +pub(crate) const COST_DIRECTIVE_NAME_IN_SPEC: Name = name!("cost"); +pub(crate) const COST_DIRECTIVE_NAME_DEFAULT: Name = name!("federation__cost"); + +pub(crate) const LIST_SIZE_DIRECTIVE_NAME_IN_SPEC: Name = name!("listSize"); +pub(crate) const LIST_SIZE_DIRECTIVE_NAME_DEFAULT: Name = name!("federation__listSize"); + +#[derive(Clone)] +pub(crate) struct CostSpecDefinition { + url: Url, + minimum_federation_version: Option, +} + +macro_rules! propagate_demand_control_directives { + ($func_name:ident, $directives_ty:ty, $wrap_ty:expr) => { + pub(crate) fn $func_name( + &self, + subgraph_schema: &FederationSchema, + source: &$directives_ty, + dest: &mut $directives_ty, + original_directive_names: &IndexMap, + ) -> Result<(), FederationError> { + let cost_directive_name = original_directive_names.get(&COST_DIRECTIVE_NAME_IN_SPEC); + let cost_directive = cost_directive_name.and_then(|name| source.get(name.as_str())); + if let Some(cost_directive) = cost_directive { + dest.push($wrap_ty(self.cost_directive( + subgraph_schema, + cost_directive.arguments.clone(), + )?)); + } + + let list_size_directive_name = + original_directive_names.get(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC); + let list_size_directive = + list_size_directive_name.and_then(|name| source.get(name.as_str())); + if let Some(list_size_directive) = list_size_directive { + dest.push($wrap_ty(self.list_size_directive( + subgraph_schema, + list_size_directive.arguments.clone(), + )?)); + } + + Ok(()) + } + }; +} + +macro_rules! propagate_demand_control_directives_to_position { + ($func_name:ident, $source_ty:ty, $dest_ty:ty) => { + pub(crate) fn $func_name( + &self, + subgraph_schema: &mut FederationSchema, + source: &Node<$source_ty>, + dest: &$dest_ty, + original_directive_names: &IndexMap, + ) -> Result<(), FederationError> { + let cost_directive_name = original_directive_names.get(&COST_DIRECTIVE_NAME_IN_SPEC); + let cost_directive = + cost_directive_name.and_then(|name| source.directives.get(name.as_str())); + if let Some(cost_directive) = cost_directive { + dest.insert_directive( + subgraph_schema, + Component::from( + self.cost_directive(subgraph_schema, cost_directive.arguments.clone())?, + ), + )?; + } + + let list_size_directive_name = + original_directive_names.get(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC); + let list_size_directive = + list_size_directive_name.and_then(|name| source.directives.get(name.as_str())); + if let Some(list_size_directive) = list_size_directive { + dest.insert_directive( + subgraph_schema, + Component::from(self.list_size_directive( + subgraph_schema, + list_size_directive.arguments.clone(), + )?), + )?; + } + + Ok(()) + } + }; +} + +impl CostSpecDefinition { + pub(crate) fn new(version: Version, minimum_federation_version: Option) -> Self { + Self { + url: Url { + identity: Identity::cost_identity(), + version, + }, + minimum_federation_version, + } + } + + pub(crate) fn cost_directive( + &self, + schema: &FederationSchema, + arguments: Vec>, + ) -> Result { + let name = self + .directive_name_in_schema(schema, &COST_DIRECTIVE_NAME_IN_SPEC)? + .unwrap_or(COST_DIRECTIVE_NAME_DEFAULT); + + Ok(Directive { name, arguments }) + } + + pub(crate) fn list_size_directive( + &self, + schema: &FederationSchema, + arguments: Vec>, + ) -> Result { + let name = self + .directive_name_in_schema(schema, &LIST_SIZE_DIRECTIVE_NAME_IN_SPEC)? + .unwrap_or(LIST_SIZE_DIRECTIVE_NAME_DEFAULT); + + Ok(Directive { name, arguments }) + } + + propagate_demand_control_directives!( + propagate_demand_control_directives, + apollo_compiler::ast::DirectiveList, + Node::new + ); + propagate_demand_control_directives!( + propagate_demand_control_schema_directives, + apollo_compiler::schema::DirectiveList, + Component::from + ); + + propagate_demand_control_directives_to_position!( + propagate_demand_control_directives_for_enum, + EnumType, + EnumTypeDefinitionPosition + ); + propagate_demand_control_directives_to_position!( + propagate_demand_control_directives_for_object, + ObjectType, + ObjectTypeDefinitionPosition + ); + propagate_demand_control_directives_to_position!( + propagate_demand_control_directives_for_scalar, + ScalarType, + ScalarTypeDefinitionPosition + ); +} + +impl SpecDefinition for CostSpecDefinition { + fn url(&self) -> &Url { + &self.url + } + + fn minimum_federation_version(&self) -> Option<&Version> { + self.minimum_federation_version.as_ref() + } +} + +lazy_static! { + pub(crate) static ref COST_VERSIONS: SpecDefinitions = { + let mut definitions = SpecDefinitions::new(Identity::cost_identity()); + definitions.add(CostSpecDefinition::new( + Version { major: 0, minor: 1 }, + Some(Version { major: 2, minor: 9 }), + )); + definitions + }; +} diff --git a/apollo-federation/src/link/database.rs b/apollo-federation/src/link/database.rs index d96c8ecc4b..94ea7ba0ff 100644 --- a/apollo-federation/src/link/database.rs +++ b/apollo-federation/src/link/database.rs @@ -1,9 +1,9 @@ use std::borrow::Cow; -use std::collections::HashMap; use std::sync::Arc; use apollo_compiler::ast::Directive; use apollo_compiler::ast::DirectiveLocation; +use apollo_compiler::collections::IndexMap; use apollo_compiler::schema::DirectiveDefinition; use apollo_compiler::ty; use apollo_compiler::Schema; @@ -46,10 +46,10 @@ pub fn links_metadata(schema: &Schema) -> Result, LinkErro // all of the @link usages (starting with the bootstrapping one) and extract their metadata. let link_name_in_schema = &bootstrap_directive.name; let mut links = Vec::new(); - let mut by_identity = HashMap::new(); - let mut by_name_in_schema = HashMap::new(); - let mut types_by_imported_name = HashMap::new(); - let mut directives_by_imported_name = HashMap::new(); + let mut by_identity = IndexMap::default(); + let mut by_name_in_schema = IndexMap::default(); + let mut types_by_imported_name = IndexMap::default(); + let mut directives_by_imported_name = IndexMap::default(); let link_applications = schema .schema_definition .directives diff --git a/apollo-federation/src/link/federation_spec_definition.rs b/apollo-federation/src/link/federation_spec_definition.rs index b62fb7d762..67f181ec8b 100644 --- a/apollo-federation/src/link/federation_spec_definition.rs +++ b/apollo-federation/src/link/federation_spec_definition.rs @@ -13,6 +13,8 @@ use crate::error::FederationError; use crate::error::SingleFederationError; use crate::link::argument::directive_optional_boolean_argument; use crate::link::argument::directive_required_string_argument; +use crate::link::cost_spec_definition::CostSpecDefinition; +use crate::link::cost_spec_definition::COST_VERSIONS; use crate::link::spec::Identity; use crate::link::spec::Url; use crate::link::spec::Version; @@ -387,6 +389,17 @@ impl FederationSpecDefinition { arguments, }) } + + pub(crate) fn get_cost_spec_definition( + &self, + schema: &FederationSchema, + ) -> Option<&'static CostSpecDefinition> { + schema + .metadata() + .and_then(|metadata| metadata.for_identity(&Identity::cost_identity())) + .and_then(|link| COST_VERSIONS.find(&link.url.version)) + .or_else(|| COST_VERSIONS.find_for_federation_version(self.version())) + } } impl SpecDefinition for FederationSpecDefinition { @@ -426,6 +439,22 @@ lazy_static! { major: 2, minor: 5, })); + definitions.add(FederationSpecDefinition::new(Version { + major: 2, + minor: 6, + })); + definitions.add(FederationSpecDefinition::new(Version { + major: 2, + minor: 7, + })); + definitions.add(FederationSpecDefinition::new(Version { + major: 2, + minor: 8, + })); + definitions.add(FederationSpecDefinition::new(Version { + major: 2, + minor: 9, + })); definitions }; } diff --git a/apollo-federation/src/link/mod.rs b/apollo-federation/src/link/mod.rs index 272c5f4adc..96473e59db 100644 --- a/apollo-federation/src/link/mod.rs +++ b/apollo-federation/src/link/mod.rs @@ -1,14 +1,16 @@ -use std::collections::HashMap; use std::fmt; use std::str; use std::sync::Arc; use apollo_compiler::ast::Directive; use apollo_compiler::ast::Value; +use apollo_compiler::collections::IndexMap; use apollo_compiler::name; +use apollo_compiler::schema::Component; use apollo_compiler::InvalidNameError; use apollo_compiler::Name; use apollo_compiler::Node; +use apollo_compiler::Schema; use thiserror::Error; use crate::error::FederationError; @@ -20,6 +22,7 @@ use crate::link::spec::Identity; use crate::link::spec::Url; pub(crate) mod argument; +pub(crate) mod cost_spec_definition; pub mod database; pub(crate) mod federation_spec_definition; pub(crate) mod graphql_definition; @@ -329,6 +332,24 @@ impl Link { purpose, }) } + + pub fn for_identity<'schema>( + schema: &'schema Schema, + identity: &Identity, + ) -> Option<(Self, &'schema Component)> { + schema + .schema_definition + .directives + .iter() + .find_map(|directive| { + let link = Link::from_directive_application(directive).ok()?; + if link.url.identity == *identity { + Some((link, directive)) + } else { + None + } + }) + } } impl fmt::Display for Link { @@ -366,10 +387,10 @@ pub struct LinkedElement { #[derive(Default, Eq, PartialEq, Debug)] pub struct LinksMetadata { pub(crate) links: Vec>, - pub(crate) by_identity: HashMap>, - pub(crate) by_name_in_schema: HashMap>, - pub(crate) types_by_imported_name: HashMap, Arc)>, - pub(crate) directives_by_imported_name: HashMap, Arc)>, + pub(crate) by_identity: IndexMap>, + pub(crate) by_name_in_schema: IndexMap>, + pub(crate) types_by_imported_name: IndexMap, Arc)>, + pub(crate) directives_by_imported_name: IndexMap, Arc)>, } impl LinksMetadata { diff --git a/apollo-federation/src/link/spec.rs b/apollo-federation/src/link/spec.rs index 25dd24c4b2..5c1386644b 100644 --- a/apollo-federation/src/link/spec.rs +++ b/apollo-federation/src/link/spec.rs @@ -88,6 +88,13 @@ impl Identity { name: name!("inaccessible"), } } + + pub fn cost_identity() -> Identity { + Identity { + domain: APOLLO_SPEC_DOMAIN.to_string(), + name: name!("cost"), + } + } } /// The version of a `@link` specification, in the form of a major and minor version numbers. diff --git a/apollo-federation/src/link/spec_definition.rs b/apollo-federation/src/link/spec_definition.rs index 5826f8f4d9..1fb084afe5 100644 --- a/apollo-federation/src/link/spec_definition.rs +++ b/apollo-federation/src/link/spec_definition.rs @@ -182,6 +182,17 @@ impl SpecDefinitions { self.definitions.get(requested) } + pub(crate) fn find_for_federation_version(&self, federation_version: &Version) -> Option<&T> { + for definition in self.definitions.values() { + if let Some(minimum_federation_version) = definition.minimum_federation_version() { + if minimum_federation_version >= federation_version { + return Some(definition); + } + } + } + None + } + pub(crate) fn versions(&self) -> Keys { self.definitions.keys() } diff --git a/apollo-federation/src/merge.rs b/apollo-federation/src/merge.rs index 0aed048c60..5c5531c5ec 100644 --- a/apollo-federation/src/merge.rs +++ b/apollo-federation/src/merge.rs @@ -1,4 +1,3 @@ -use std::collections::HashSet; use std::fmt::Debug; use std::fmt::Formatter; use std::iter; @@ -128,6 +127,7 @@ impl Merger { needs_inaccessible: false, } } + fn merge(&mut self, subgraphs: ValidFederationSubgraphs) -> Result { let mut subgraphs = subgraphs .into_iter() @@ -534,20 +534,33 @@ impl Merger { ); for arg in field.arguments.iter() { - let arguments = &mut supergraph_field.make_mut().arguments; - if let Some(index) = arguments.iter().position(|a| a.name == arg.name) { - if let Some(existing_arg) = arguments.get_mut(index) { - // TODO add args - let mutable_arg = existing_arg.make_mut(); - self.add_inaccessible( - directive_names, - &mut mutable_arg.directives, - &arg.directives, - ); - } else { - // TODO mismatch no args - } - } + let arguments_to_merge = &mut supergraph_field.make_mut().arguments; + let argument_to_merge = arguments_to_merge + .iter_mut() + .find_map(|a| (a.name == arg.name).then(|| a.make_mut())); + + if let Some(argument) = argument_to_merge { + self.add_inaccessible( + directive_names, + &mut argument.directives, + &arg.directives, + ); + } else { + let mut argument = InputValueDefinition { + name: arg.name.clone(), + description: arg.description.clone(), + directives: Default::default(), + ty: arg.ty.clone(), + default_value: arg.default_value.clone(), + }; + + self.add_inaccessible( + directive_names, + &mut argument.directives, + &arg.directives, + ); + arguments_to_merge.push(argument.into()); + }; } let requires_directive_option = field @@ -704,19 +717,6 @@ impl Merger { } } -fn filter_directives<'a, D, I, O>(deny_list: &IndexSet, directives: D) -> O -where - D: IntoIterator, - I: 'a + AsRef + Clone, - O: FromIterator, -{ - directives - .into_iter() - .filter(|d| !deny_list.contains(&d.as_ref().name)) - .cloned() - .collect() -} - struct DirectiveNames { key: Name, requires: Name, @@ -1581,8 +1581,8 @@ fn add_core_feature_inaccessible(supergraph: &mut Schema) { // TODO use apollo_compiler::executable::FieldSet fn parse_keys<'a>( directives: impl Iterator> + Sized, -) -> HashSet<&'a str> { - HashSet::from_iter( +) -> IndexSet<&'a str> { + IndexSet::from_iter( directives .flat_map(|k| { let field_set = directive_string_arg_value(k, &name!("fields")).unwrap(); diff --git a/apollo-federation/src/operation/contains.rs b/apollo-federation/src/operation/contains.rs index d947a8faf2..e69f978b3f 100644 --- a/apollo-federation/src/operation/contains.rs +++ b/apollo-federation/src/operation/contains.rs @@ -1,8 +1,4 @@ -use std::collections::HashMap; - use apollo_compiler::executable; -use apollo_compiler::Name; -use apollo_compiler::Node; use super::FieldSelection; use super::FragmentSpreadSelection; @@ -11,202 +7,6 @@ use super::InlineFragmentSelection; use super::Selection; use super::SelectionSet; -/// Compare two input values, with two special cases for objects: assuming no duplicate keys, -/// and order-independence. -/// -/// This comes from apollo-rs: https://github.com/apollographql/apollo-rs/blob/6825be88fe13cd0d67b83b0e4eb6e03c8ab2555e/crates/apollo-compiler/src/validation/selection.rs#L160-L188 -/// Hopefully we can do this more easily in the future! -fn same_value(left: &executable::Value, right: &executable::Value) -> bool { - use apollo_compiler::executable::Value; - match (left, right) { - (Value::Null, Value::Null) => true, - (Value::Enum(left), Value::Enum(right)) => left == right, - (Value::Variable(left), Value::Variable(right)) => left == right, - (Value::String(left), Value::String(right)) => left == right, - (Value::Float(left), Value::Float(right)) => left == right, - (Value::Int(left), Value::Int(right)) => left == right, - (Value::Boolean(left), Value::Boolean(right)) => left == right, - (Value::List(left), Value::List(right)) if left.len() == right.len() => left - .iter() - .zip(right.iter()) - .all(|(left, right)| same_value(left, right)), - (Value::Object(left), Value::Object(right)) if left.len() == right.len() => { - left.iter().all(|(key, value)| { - right - .iter() - .find(|(other_key, _)| key == other_key) - .is_some_and(|(_, other_value)| same_value(value, other_value)) - }) - } - _ => false, - } -} - -/// Sort an input value, which means specifically sorting their object values by keys (assuming no -/// duplicates). This is used for hashing input values in a way consistent with [same_value()]. -fn sort_value(value: &mut executable::Value) { - use apollo_compiler::executable::Value; - match value { - Value::List(elems) => { - elems - .iter_mut() - .for_each(|value| sort_value(value.make_mut())); - } - Value::Object(pairs) => { - pairs - .iter_mut() - .for_each(|(_, value)| sort_value(value.make_mut())); - pairs.sort_by(|left, right| left.0.cmp(&right.0)); - } - _ => {} - } -} - -/// Compare sorted input values, which means specifically establishing an order between the variants -/// of input values, and comparing values for the same variants accordingly. This is used for -/// hashing directives in a way consistent with [same_directives()]. -/// -/// Note that Floats and Ints are compared textually and not parsed numerically. This is fine for -/// the purposes of hashing. For object comparison semantics, see [compare_sorted_object_pairs()]. -fn compare_sorted_value(left: &executable::Value, right: &executable::Value) -> std::cmp::Ordering { - use apollo_compiler::executable::Value; - fn discriminant(value: &Value) -> u8 { - match value { - Value::Null => 0, - Value::Enum(_) => 1, - Value::Variable(_) => 2, - Value::String(_) => 3, - Value::Float(_) => 4, - Value::Int(_) => 5, - Value::Boolean(_) => 6, - Value::List(_) => 7, - Value::Object(_) => 8, - } - } - match (left, right) { - (Value::Null, Value::Null) => std::cmp::Ordering::Equal, - (Value::Enum(left), Value::Enum(right)) => left.cmp(right), - (Value::Variable(left), Value::Variable(right)) => left.cmp(right), - (Value::String(left), Value::String(right)) => left.cmp(right), - (Value::Float(left), Value::Float(right)) => left.as_str().cmp(right.as_str()), - (Value::Int(left), Value::Int(right)) => left.as_str().cmp(right.as_str()), - (Value::Boolean(left), Value::Boolean(right)) => left.cmp(right), - (Value::List(left), Value::List(right)) => left.len().cmp(&right.len()).then_with(|| { - left.iter() - .zip(right) - .map(|(left, right)| compare_sorted_value(left, right)) - .find(|o| o.is_ne()) - .unwrap_or(std::cmp::Ordering::Equal) - }), - (Value::Object(left), Value::Object(right)) => compare_sorted_name_value_pairs( - left.iter().map(|pair| &pair.0), - left.iter().map(|pair| &pair.1), - right.iter().map(|pair| &pair.0), - right.iter().map(|pair| &pair.1), - ), - _ => discriminant(left).cmp(&discriminant(right)), - } -} - -/// Compare the (name, value) pair iterators, which are assumed to be sorted by name and have sorted -/// values. This is used for hashing objects/arguments in a way consistent with [same_directives()]. -/// -/// Note that pair iterators are compared by length, then lexicographically by name, then finally -/// recursively by value. This is intended to compute an ordering quickly for hashing. -fn compare_sorted_name_value_pairs<'doc>( - left_names: impl ExactSizeIterator, - left_values: impl ExactSizeIterator>, - right_names: impl ExactSizeIterator, - right_values: impl ExactSizeIterator>, -) -> std::cmp::Ordering { - left_names - .len() - .cmp(&right_names.len()) - .then_with(|| left_names.cmp(right_names)) - .then_with(|| { - left_values - .zip(right_values) - .map(|(left, right)| compare_sorted_value(left, right)) - .find(|o| o.is_ne()) - .unwrap_or(std::cmp::Ordering::Equal) - }) -} - -/// Returns true if two argument lists are equivalent. -/// -/// The arguments and values must be the same, independent of order. -fn same_arguments( - left: &[Node], - right: &[Node], -) -> bool { - if left.len() != right.len() { - return false; - } - - let right = right - .iter() - .map(|arg| (&arg.name, arg)) - .collect::>(); - - left.iter().all(|arg| { - right - .get(&arg.name) - .is_some_and(|right_arg| same_value(&arg.value, &right_arg.value)) - }) -} - -/// Sort arguments, which means specifically sorting arguments by names and object values by keys -/// (assuming no duplicates). This is used for hashing arguments in a way consistent with -/// [same_arguments()]. -pub(super) fn sort_arguments(arguments: &mut [Node]) { - arguments - .iter_mut() - .for_each(|arg| sort_value(arg.make_mut().value.make_mut())); - arguments.sort_by(|left, right| left.name.cmp(&right.name)); -} - -/// Compare sorted arguments; see [compare_sorted_name_value_pairs()] for semantics. This is used -/// for hashing directives in a way consistent with [same_directives()]. -fn compare_sorted_arguments( - left: &[Node], - right: &[Node], -) -> std::cmp::Ordering { - compare_sorted_name_value_pairs( - left.iter().map(|arg| &arg.name), - left.iter().map(|arg| &arg.value), - right.iter().map(|arg| &arg.name), - right.iter().map(|arg| &arg.value), - ) -} - -/// Returns true if two directive lists are equivalent, independent of order. -fn same_directives(left: &executable::DirectiveList, right: &executable::DirectiveList) -> bool { - if left.len() != right.len() { - return false; - } - - left.iter().all(|left_directive| { - right.iter().any(|right_directive| { - left_directive.name == right_directive.name - && same_arguments(&left_directive.arguments, &right_directive.arguments) - }) - }) -} - -/// Sort directives, which means specifically sorting their arguments, sorting the directives by -/// name, and then breaking directive-name ties by comparing sorted arguments. This is used for -/// hashing arguments in a way consistent with [same_directives()]. -pub(super) fn sort_directives(directives: &mut executable::DirectiveList) { - directives - .iter_mut() - .for_each(|directive| sort_arguments(&mut directive.make_mut().arguments)); - directives.sort_by(|left, right| { - left.name - .cmp(&right.name) - .then_with(|| compare_sorted_arguments(&left.arguments, &right.arguments)) - }); -} - pub(super) fn is_deferred_selection(directives: &executable::DirectiveList) -> bool { directives.has("defer") } @@ -278,8 +78,8 @@ impl FieldSelection { pub fn containment(&self, other: &FieldSelection, options: ContainmentOptions) -> Containment { if self.field.name() != other.field.name() || self.field.alias != other.field.alias - || !same_arguments(&self.field.arguments, &other.field.arguments) - || !same_directives(&self.field.directives, &other.field.directives) + || self.field.arguments != other.field.arguments + || self.field.directives != other.field.directives { return Containment::NotContained; } diff --git a/apollo-federation/src/operation/directive_list.rs b/apollo-federation/src/operation/directive_list.rs new file mode 100644 index 0000000000..913a1184e6 --- /dev/null +++ b/apollo-federation/src/operation/directive_list.rs @@ -0,0 +1,410 @@ +use std::fmt; +use std::fmt::Display; +use std::hash::BuildHasher; +use std::hash::Hash; +use std::hash::Hasher; +use std::ops::Deref; +use std::sync::Arc; +use std::sync::OnceLock; + +use apollo_compiler::executable; +use apollo_compiler::Name; +use apollo_compiler::Node; + +use super::sort_arguments; + +/// Compare sorted input values, which means specifically establishing an order between the variants +/// of input values, and comparing values for the same variants accordingly. +/// +/// Note that Floats and Ints are compared textually and not parsed numerically. This is fine for +/// the purposes of hashing. +fn compare_sorted_value(left: &executable::Value, right: &executable::Value) -> std::cmp::Ordering { + use apollo_compiler::executable::Value; + /// Returns an arbitrary index for each value type so values of different types are sorted consistently. + fn discriminant(value: &Value) -> u8 { + match value { + Value::Null => 0, + Value::Enum(_) => 1, + Value::Variable(_) => 2, + Value::String(_) => 3, + Value::Float(_) => 4, + Value::Int(_) => 5, + Value::Boolean(_) => 6, + Value::List(_) => 7, + Value::Object(_) => 8, + } + } + match (left, right) { + (Value::Null, Value::Null) => std::cmp::Ordering::Equal, + (Value::Enum(left), Value::Enum(right)) => left.cmp(right), + (Value::Variable(left), Value::Variable(right)) => left.cmp(right), + (Value::String(left), Value::String(right)) => left.cmp(right), + (Value::Float(left), Value::Float(right)) => left.as_str().cmp(right.as_str()), + (Value::Int(left), Value::Int(right)) => left.as_str().cmp(right.as_str()), + (Value::Boolean(left), Value::Boolean(right)) => left.cmp(right), + (Value::List(left), Value::List(right)) => left.len().cmp(&right.len()).then_with(|| { + left.iter() + .zip(right) + .map(|(left, right)| compare_sorted_value(left, right)) + .find(|o| o.is_ne()) + .unwrap_or(std::cmp::Ordering::Equal) + }), + (Value::Object(left), Value::Object(right)) => compare_sorted_name_value_pairs( + left.iter().map(|pair| &pair.0), + left.iter().map(|pair| &pair.1), + right.iter().map(|pair| &pair.0), + right.iter().map(|pair| &pair.1), + ), + _ => discriminant(left).cmp(&discriminant(right)), + } +} + +/// Compare the (name, value) pair iterators, which are assumed to be sorted by name and have sorted +/// values. This is used for hashing objects/arguments in a way consistent with [same_directives()]. +/// +/// Note that pair iterators are compared by length, then lexicographically by name, then finally +/// recursively by value. This is intended to compute an ordering quickly for hashing. +fn compare_sorted_name_value_pairs<'doc>( + left_names: impl ExactSizeIterator, + left_values: impl ExactSizeIterator>, + right_names: impl ExactSizeIterator, + right_values: impl ExactSizeIterator>, +) -> std::cmp::Ordering { + left_names + .len() + .cmp(&right_names.len()) + .then_with(|| left_names.cmp(right_names)) + .then_with(|| { + left_values + .zip(right_values) + .map(|(left, right)| compare_sorted_value(left, right)) + .find(|o| o.is_ne()) + .unwrap_or(std::cmp::Ordering::Equal) + }) +} + +/// Compare sorted arguments; see [compare_sorted_name_value_pairs()] for semantics. This is used +/// for hashing directives in a way consistent with [same_directives()]. +fn compare_sorted_arguments( + left: &[Node], + right: &[Node], +) -> std::cmp::Ordering { + compare_sorted_name_value_pairs( + left.iter().map(|arg| &arg.name), + left.iter().map(|arg| &arg.value), + right.iter().map(|arg| &arg.name), + right.iter().map(|arg| &arg.value), + ) +} + +/// An empty apollo-compiler directive list that we can return a reference to when a +/// [`DirectiveList`] is in the empty state. +static EMPTY_DIRECTIVE_LIST: executable::DirectiveList = executable::DirectiveList(vec![]); + +/// Contents for a non-empty directive list. +#[derive(Debug, Clone)] +struct DirectiveListInner { + // Cached hash: hashing may be expensive with deeply nested values or very many directives, + // so we only want to do it once. + // The hash is eagerly precomputed because we expect to, most of the time, hash a DirectiveList + // at least once (when inserting its selection into a selection map). + hash: u64, + // Mutable access to the underlying directive list should not be handed out because `sort_order` + // may get out of sync. + directives: executable::DirectiveList, + sort_order: Vec, +} + +impl PartialEq for DirectiveListInner { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + && self + .iter_sorted() + .zip(other.iter_sorted()) + .all(|(left, right)| { + // We can just use `Eq` because the arguments are sorted recursively + left.name == right.name && left.arguments == right.arguments + }) + } +} + +impl Eq for DirectiveListInner {} + +impl DirectiveListInner { + fn rehash(&mut self) { + static SHARED_RANDOM: OnceLock = OnceLock::new(); + + let mut state = SHARED_RANDOM.get_or_init(Default::default).build_hasher(); + self.len().hash(&mut state); + // Hash in sorted order + for d in self.iter_sorted() { + d.hash(&mut state); + } + self.hash = state.finish(); + } + + fn len(&self) -> usize { + self.directives.len() + } + + fn iter_sorted(&self) -> DirectiveIterSorted<'_> { + DirectiveIterSorted { + directives: &self.directives.0, + inner: self.sort_order.iter(), + } + } +} + +/// A list of directives, with order-independent hashing and equality. +/// +/// Original order of directive applications is stored but is not part of hashing, +/// so it may not be maintained exactly when round-tripping several directive lists +/// through a HashSet for example. +/// +/// Arguments and input object values provided to directives are all sorted and the +/// original order is not tracked. +/// +/// This list is cheaply cloneable, but not intended for frequent mutations. +/// When the list is empty, it does not require an allocation. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub(crate) struct DirectiveList { + inner: Option>, +} + +impl Deref for DirectiveList { + type Target = executable::DirectiveList; + fn deref(&self) -> &Self::Target { + self.inner + .as_ref() + .map_or(&EMPTY_DIRECTIVE_LIST, |inner| &inner.directives) + } +} + +impl Hash for DirectiveList { + fn hash(&self, state: &mut H) { + state.write_u64(self.inner.as_ref().map_or(0, |inner| inner.hash)) + } +} + +impl Display for DirectiveList { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(inner) = &self.inner { + inner.directives.fmt(f) + } else { + Ok(()) + } + } +} + +impl From for DirectiveList { + fn from(mut directives: executable::DirectiveList) -> Self { + if directives.is_empty() { + return Self::new(); + } + + // Sort directives, which means specifically sorting their arguments, sorting the directives by + // name, and then breaking directive-name ties by comparing sorted arguments. This is used for + // hashing arguments in a way consistent with [same_directives()]. + + for directive in directives.iter_mut() { + sort_arguments(&mut directive.make_mut().arguments); + } + + let mut sort_order = (0usize..directives.len()).collect::>(); + sort_order.sort_by(|left, right| { + let left = &directives[*left]; + let right = &directives[*right]; + left.name + .cmp(&right.name) + .then_with(|| compare_sorted_arguments(&left.arguments, &right.arguments)) + }); + + let mut partially_initialized = DirectiveListInner { + hash: 0, + directives, + sort_order, + }; + partially_initialized.rehash(); + Self { + inner: Some(Arc::new(partially_initialized)), + } + } +} + +impl FromIterator> for DirectiveList { + fn from_iter>>(iter: T) -> Self { + Self::from(executable::DirectiveList::from_iter(iter)) + } +} + +impl FromIterator for DirectiveList { + fn from_iter>(iter: T) -> Self { + Self::from(executable::DirectiveList::from_iter(iter)) + } +} + +impl DirectiveList { + /// Create an empty directive list. + pub(crate) const fn new() -> Self { + Self { inner: None } + } + + /// Create a directive list with a single directive. + /// + /// This sorts arguments and input object values provided to the directive. + pub(crate) fn one(directive: impl Into>) -> Self { + std::iter::once(directive.into()).collect() + } + + #[cfg(test)] + pub(crate) fn parse(input: &str) -> Self { + use apollo_compiler::ast; + let input = format!( + r#"query {{ field +# Directive input: +{input} +# +}}"# + ); + let mut parser = apollo_compiler::parser::Parser::new(); + let document = parser + .parse_ast(&input, "DirectiveList::parse.graphql") + .unwrap(); + let Some(ast::Definition::OperationDefinition(operation)) = document.definitions.first() + else { + unreachable!(); + }; + let Some(ast::Selection::Field(field)) = operation.selection_set.first() else { + unreachable!(); + }; + field.directives.clone().into() + } + + /// Iterate the directives in their original order. + pub(crate) fn iter(&self) -> impl ExactSizeIterator> { + self.inner + .as_ref() + .map_or(&EMPTY_DIRECTIVE_LIST, |inner| &inner.directives) + .iter() + } + + /// Iterate the directives in a consistent sort order. + pub(crate) fn iter_sorted(&self) -> DirectiveIterSorted<'_> { + self.inner + .as_ref() + .map_or_else(DirectiveIterSorted::empty, |inner| inner.iter_sorted()) + } + + /// Remove one directive application by name. + /// + /// To remove a repeatable directive, you may need to call this multiple times. + pub(crate) fn remove_one(&mut self, name: &str) -> Option> { + let Some(inner) = self.inner.as_mut() else { + // Nothing to do on an empty list + return None; + }; + let Some(index) = inner.directives.iter().position(|dir| dir.name == name) else { + return None; + }; + + // The directive exists and is the only directive: switch to the empty representation + if inner.len() == 1 { + // The index is guaranteed to exist so we can safely use the panicky [] syntax. + let item = inner.directives[index].clone(); + self.inner = None; + return Some(item); + } + + // The directive exists: clone the inner structure if necessary. + let inner = Arc::make_mut(inner); + let sort_index = inner + .sort_order + .iter() + .position(|sorted| *sorted == index) + .expect("index must exist in sort order"); + let item = inner.directives.remove(index); + inner.sort_order.remove(sort_index); + + for order in &mut inner.sort_order { + if *order > index { + *order -= 1; + } + } + inner.rehash(); + Some(item) + } +} + +/// Iterate over a [`DirectiveList`] in a consistent sort order. +pub(crate) struct DirectiveIterSorted<'a> { + directives: &'a [Node], + inner: std::slice::Iter<'a, usize>, +} +impl<'a> Iterator for DirectiveIterSorted<'a> { + type Item = &'a Node; + + fn next(&mut self) -> Option { + self.inner.next().map(|index| &self.directives[*index]) + } +} + +impl ExactSizeIterator for DirectiveIterSorted<'_> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl DirectiveIterSorted<'_> { + fn empty() -> Self { + Self { + directives: &[], + inner: [].iter(), + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use super::*; + + #[test] + fn consistent_hash() { + let mut set = HashSet::new(); + + assert!(set.insert(DirectiveList::new())); + assert!(!set.insert(DirectiveList::new())); + + assert!(set.insert(DirectiveList::parse("@a @b"))); + assert!(!set.insert(DirectiveList::parse("@b @a"))); + } + + #[test] + fn order_independent_equality() { + assert_eq!(DirectiveList::new(), DirectiveList::new()); + assert_eq!( + DirectiveList::parse("@a @b"), + DirectiveList::parse("@b @a"), + "equality should be order independent" + ); + + assert_eq!( + DirectiveList::parse("@a(arg1: true, arg2: false) @b(arg2: false, arg1: true)"), + DirectiveList::parse("@b(arg1: true, arg2: false) @a(arg1: true, arg2: false)"), + "arguments should be order independent" + ); + + assert_eq!( + DirectiveList::parse("@nested(object: { a: 1, b: 2, c: 3 })"), + DirectiveList::parse("@nested(object: { b: 2, c: 3, a: 1 })"), + "input objects should be order independent" + ); + + assert_eq!( + DirectiveList::parse("@nested(object: [true, { a: 1, b: 2, c: { a: 3 } }])"), + DirectiveList::parse("@nested(object: [true, { b: 2, c: { a: 3 }, a: 1 }])"), + "input objects should be order independent" + ); + } +} diff --git a/apollo-federation/src/operation/merging.rs b/apollo-federation/src/operation/merging.rs new file mode 100644 index 0000000000..4c2b31cbd3 --- /dev/null +++ b/apollo-federation/src/operation/merging.rs @@ -0,0 +1,398 @@ +//! Provides methods for recursively merging selections and selection sets. +use std::sync::Arc; + +use apollo_compiler::collections::IndexMap; + +use super::selection_map; +use super::FieldSelection; +use super::FieldSelectionValue; +use super::FragmentSpreadSelection; +use super::FragmentSpreadSelectionValue; +use super::HasSelectionKey as _; +use super::InlineFragmentSelection; +use super::InlineFragmentSelectionValue; +use super::NamedFragments; +use super::Selection; +use super::SelectionSet; +use super::SelectionValue; +use crate::error::FederationError; + +impl<'a> FieldSelectionValue<'a> { + /// Merges the given field selections into this one. + /// + /// # Preconditions + /// All selections must have the same selection key (alias + directives). Otherwise + /// this function produces invalid output. + /// + /// # Errors + /// Returns an error if: + /// - The parent type or schema of any selection does not match `self`'s. + /// - Any selection does not select the same field position as `self`. + fn merge_into<'op>( + &mut self, + others: impl Iterator, + ) -> Result<(), FederationError> { + let self_field = &self.get().field; + let mut selection_sets = vec![]; + for other in others { + let other_field = &other.field; + if other_field.schema != self_field.schema { + return Err(FederationError::internal( + "Cannot merge field selections from different schemas", + )); + } + if other_field.field_position != self_field.field_position { + return Err(FederationError::internal(format!( + "Cannot merge field selection for field \"{}\" into a field selection for field \"{}\"", + other_field.field_position, + self_field.field_position, + ))); + } + if self.get().selection_set.is_some() { + let Some(other_selection_set) = &other.selection_set else { + return Err(FederationError::internal(format!( + "Field \"{}\" has composite type but not a selection set", + other_field.field_position, + ))); + }; + selection_sets.push(other_selection_set); + } else if other.selection_set.is_some() { + return Err(FederationError::internal(format!( + "Field \"{}\" has non-composite type but also has a selection set", + other_field.field_position, + ))); + } + } + if let Some(self_selection_set) = self.get_selection_set_mut() { + self_selection_set.merge_into(selection_sets.into_iter())?; + } + Ok(()) + } +} + +impl<'a> InlineFragmentSelectionValue<'a> { + /// Merges the given normalized inline fragment selections into this one. + /// + /// # Preconditions + /// All selections must have the same selection key (directives). Otherwise this function + /// produces invalid output. + /// + /// # Errors + /// Returns an error if the parent type or schema of any selection does not match `self`'s. + fn merge_into<'op>( + &mut self, + others: impl Iterator, + ) -> Result<(), FederationError> { + let self_inline_fragment = &self.get().inline_fragment; + let mut selection_sets = vec![]; + for other in others { + let other_inline_fragment = &other.inline_fragment; + if other_inline_fragment.schema != self_inline_fragment.schema { + return Err(FederationError::internal( + "Cannot merge inline fragment from different schemas", + )); + } + if other_inline_fragment.parent_type_position + != self_inline_fragment.parent_type_position + { + return Err(FederationError::internal( + format!( + "Cannot merge inline fragment of parent type \"{}\" into an inline fragment of parent type \"{}\"", + other_inline_fragment.parent_type_position, + self_inline_fragment.parent_type_position, + ), + )); + } + selection_sets.push(&other.selection_set); + } + self.get_selection_set_mut() + .merge_into(selection_sets.into_iter())?; + Ok(()) + } +} + +impl<'a> FragmentSpreadSelectionValue<'a> { + /// Merges the given normalized fragment spread selections into this one. + /// + /// # Preconditions + /// All selections must have the same selection key (fragment name + directives). + /// Otherwise this function produces invalid output. + /// + /// # Errors + /// Returns an error if the parent type or schema of any selection does not match `self`'s. + fn merge_into<'op>( + &mut self, + others: impl Iterator, + ) -> Result<(), FederationError> { + let self_fragment_spread = &self.get().spread; + for other in others { + let other_fragment_spread = &other.spread; + if other_fragment_spread.schema != self_fragment_spread.schema { + return Err(FederationError::internal( + "Cannot merge fragment spread from different schemas", + )); + } + // Nothing to do since the fragment spread is already part of the selection set. + // Fragment spreads are uniquely identified by fragment name and applied directives. + // Since there is already an entry for the same fragment spread, there is no point + // in attempting to merge its sub-selections, as the underlying entry should be + // exactly the same as the currently processed one. + } + Ok(()) + } +} + +impl SelectionSet { + /// NOTE: This is a private API and should be used with care, use `add_selection_set` instead. + /// + /// Merges the given normalized selection sets into this one. + /// + /// # Errors + /// Returns an error if the parent type or schema of any selection does not match `self`'s. + /// + /// Returns an error if any selection contains invalid GraphQL that prevents the merge. + fn merge_into<'op>( + &mut self, + others: impl Iterator, + ) -> Result<(), FederationError> { + let mut selections_to_merge = vec![]; + for other in others { + if other.schema != self.schema { + return Err(FederationError::internal( + "Cannot merge selection sets from different schemas", + )); + } + if other.type_position != self.type_position { + return Err(FederationError::internal( + format!( + "Cannot merge selection set for type \"{}\" into a selection set for type \"{}\"", + other.type_position, + self.type_position, + ), + )); + } + selections_to_merge.extend(other.selections.values()); + } + self.merge_selections_into(selections_to_merge.into_iter()) + } + + /// NOTE: This is a private API and should be used with care, use `add_selection` instead. + /// + /// A helper function for merging the given selections into this one. + /// + /// # Errors + /// Returns an error if the parent type or schema of any selection does not match `self`'s. + /// + /// Returns an error if any selection contains invalid GraphQL that prevents the merge. + pub(super) fn merge_selections_into<'op>( + &mut self, + others: impl Iterator, + ) -> Result<(), FederationError> { + let mut fields = IndexMap::default(); + let mut fragment_spreads = IndexMap::default(); + let mut inline_fragments = IndexMap::default(); + let target = Arc::make_mut(&mut self.selections); + for other_selection in others { + let other_key = other_selection.key(); + match target.entry(other_key.clone()) { + selection_map::Entry::Occupied(existing) => match existing.get() { + Selection::Field(self_field_selection) => { + let Selection::Field(other_field_selection) = other_selection else { + return Err(FederationError::internal( + format!( + "Field selection key for field \"{}\" references non-field selection", + self_field_selection.field.field_position, + ), + )); + }; + fields + .entry(other_key) + .or_insert_with(Vec::new) + .push(other_field_selection); + } + Selection::FragmentSpread(self_fragment_spread_selection) => { + let Selection::FragmentSpread(other_fragment_spread_selection) = + other_selection + else { + return Err(FederationError::internal( + format!( + "Fragment spread selection key for fragment \"{}\" references non-field selection", + self_fragment_spread_selection.spread.fragment_name, + ), + )); + }; + fragment_spreads + .entry(other_key) + .or_insert_with(Vec::new) + .push(other_fragment_spread_selection); + } + Selection::InlineFragment(self_inline_fragment_selection) => { + let Selection::InlineFragment(other_inline_fragment_selection) = + other_selection + else { + return Err(FederationError::internal( + format!( + "Inline fragment selection key under parent type \"{}\" {}references non-field selection", + self_inline_fragment_selection.inline_fragment.parent_type_position, + self_inline_fragment_selection.inline_fragment.type_condition_position.clone() + .map_or_else( + String::new, + |cond| format!("(type condition: {}) ", cond), + ), + ), + )); + }; + inline_fragments + .entry(other_key) + .or_insert_with(Vec::new) + .push(other_inline_fragment_selection); + } + }, + selection_map::Entry::Vacant(vacant) => { + vacant.insert(other_selection.clone())?; + } + } + } + + for (key, self_selection) in target.iter_mut() { + match self_selection { + SelectionValue::Field(mut self_field_selection) => { + if let Some(other_field_selections) = fields.shift_remove(key) { + self_field_selection.merge_into( + other_field_selections.iter().map(|selection| &***selection), + )?; + } + } + SelectionValue::FragmentSpread(mut self_fragment_spread_selection) => { + if let Some(other_fragment_spread_selections) = + fragment_spreads.shift_remove(key) + { + self_fragment_spread_selection.merge_into( + other_fragment_spread_selections + .iter() + .map(|selection| &***selection), + )?; + } + } + SelectionValue::InlineFragment(mut self_inline_fragment_selection) => { + if let Some(other_inline_fragment_selections) = + inline_fragments.shift_remove(key) + { + self_inline_fragment_selection.merge_into( + other_inline_fragment_selections + .iter() + .map(|selection| &***selection), + )?; + } + } + } + } + + Ok(()) + } + + /// Inserts a `Selection` into the inner map. Should a selection with the same key already + /// exist in the map, the existing selection and the given selection are merged, replacing the + /// + /// existing selection while keeping the same insertion index. + /// + /// # Preconditions + /// The provided selection must have the same schema and type position as `self`. Rebase your + /// selection first if it may not meet that precondition. + /// + /// # Errors + /// Returns an error if either `self` or the selection contain invalid GraphQL that prevents the merge. + pub(crate) fn add_local_selection( + &mut self, + selection: &Selection, + ) -> Result<(), FederationError> { + debug_assert_eq!( + &self.schema, + selection.schema(), + "In order to add selection it needs to point to the same schema" + ); + self.merge_selections_into(std::iter::once(selection)) + } + + /// Inserts a `SelectionSet` into the inner map. Should any sub selection with the same key already + /// exist in the map, the existing selection and the given selection are merged, replacing the + /// existing selection while keeping the same insertion index. + /// + /// # Preconditions + /// The provided selection set must have the same schema and type position as `self`. Use + /// [`SelectionSet::add_selection_set`] if your selection set may not meet that precondition. + /// + /// # Errors + /// Returns an error if either selection set contains invalid GraphQL that prevents the merge. + pub(crate) fn add_local_selection_set( + &mut self, + selection_set: &SelectionSet, + ) -> Result<(), FederationError> { + debug_assert_eq!( + self.schema, selection_set.schema, + "In order to add selection set it needs to point to the same schema." + ); + debug_assert_eq!( + self.type_position, selection_set.type_position, + "In order to add selection set it needs to point to the same type position" + ); + self.merge_into(std::iter::once(selection_set)) + } + + /// Rebase given `SelectionSet` on self and then inserts it into the inner map. Assumes that given + /// selection set does not reference ANY named fragments. If it does, Use `add_selection_set_with_fragments` + /// instead. + /// + /// Should any sub selection with the same key already exist in the map, the existing selection + /// and the given selection are merged, replacing the existing selection while keeping the same + /// insertion index. + /// + /// # Errors + /// Returns an error if either selection set contains invalid GraphQL that prevents the merge. + pub(crate) fn add_selection_set( + &mut self, + selection_set: &SelectionSet, + ) -> Result<(), FederationError> { + self.add_selection_set_with_fragments(selection_set, &Default::default()) + } + + /// Rebase given `SelectionSet` on self with the specified fragments and then inserts it into the + /// inner map. + /// + /// Should any sub selection with the same key already exist in the map, the existing selection + /// and the given selection are merged, replacing the existing selection while keeping the same + /// insertion index. + /// + /// # Errors + /// Returns an error if either selection set contains invalid GraphQL that prevents the merge. + pub(crate) fn add_selection_set_with_fragments( + &mut self, + selection_set: &SelectionSet, + named_fragments: &NamedFragments, + ) -> Result<(), FederationError> { + let rebased = + selection_set.rebase_on(&self.type_position, named_fragments, &self.schema)?; + self.add_local_selection_set(&rebased) + } +} + +/// # Preconditions +/// There must be at least one selection set. +/// The selection sets must all have the same schema and type position. +/// +/// # Errors +/// Returns an error if any selection set contains invalid GraphQL that prevents the merge. +pub(crate) fn merge_selection_sets( + mut selection_sets: Vec, +) -> Result { + let Some((first, remainder)) = selection_sets.split_first_mut() else { + return Err(FederationError::internal( + "merge_selection_sets(): must have at least one selection set", + )); + }; + first.merge_into(remainder.iter())?; + + // Take ownership of the first element and discard the rest; + // we can unwrap because `split_first_mut()` guarantees at least one element will be yielded + Ok(selection_sets.into_iter().next().unwrap()) +} diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index 468d1e88d4..71b4aece8b 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -13,15 +13,12 @@ //! [`Field`], and the selection type is [`FieldSelection`]. use std::borrow::Cow; -use std::collections::HashMap; -use std::collections::HashSet; use std::fmt::Display; use std::fmt::Formatter; use std::hash::Hash; use std::ops::Deref; use std::sync::atomic; use std::sync::Arc; -use std::sync::OnceLock; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; @@ -32,6 +29,7 @@ use apollo_compiler::Name; use apollo_compiler::Node; use serde::Serialize; +use crate::compat::coerce_executable_values; use crate::error::FederationError; use crate::error::SingleFederationError; use crate::error::SingleFederationError::Internal; @@ -49,6 +47,8 @@ use crate::schema::position::SchemaRootDefinitionKind; use crate::schema::ValidFederationSchema; mod contains; +mod directive_list; +mod merging; mod optimize; mod rebase; mod simplify; @@ -56,6 +56,8 @@ mod simplify; mod tests; pub(crate) use contains::*; +pub(crate) use directive_list::DirectiveList; +pub(crate) use merging::*; pub(crate) use rebase::*; pub(crate) const TYPENAME_FIELD: Name = name!("__typename"); @@ -67,7 +69,10 @@ static NEXT_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(1); /// /// Note that we shouldn't add `derive(Serialize, Deserialize)` to this without changing the types /// to be something like UUIDs. -#[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize)] +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +// NOTE(@TylerBloom): This feature gate can be removed once the condition in the comment above is +// met. Note that there are `serde(skip)` statements that should be removed once this is removed. +#[cfg_attr(feature = "snapshot_tracing", derive(Serialize))] pub(crate) struct SelectionId(usize); impl SelectionId { @@ -77,6 +82,101 @@ impl SelectionId { } } +/// A list of arguments to a field or directive. +/// +/// All arguments and input object values are sorted in a consistent order. +/// +/// This type is immutable and cheaply cloneable. +#[derive(Clone, PartialEq, Eq, Default)] +pub(crate) struct ArgumentList { + /// The inner list *must* be sorted with `sort_arguments`. + inner: Option]>>, +} + +impl std::fmt::Debug for ArgumentList { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + // Print the slice representation. + self.deref().fmt(f) + } +} + +/// Sort an input value, which means specifically sorting their object values by keys (assuming no +/// duplicates). +/// +/// After sorting, hashing and plain-Rust equality have the expected result for values that are +/// spec-equivalent. +fn sort_value(value: &mut executable::Value) { + use apollo_compiler::executable::Value; + match value { + Value::List(elems) => { + elems + .iter_mut() + .for_each(|value| sort_value(value.make_mut())); + } + Value::Object(pairs) => { + pairs + .iter_mut() + .for_each(|(_, value)| sort_value(value.make_mut())); + pairs.sort_by(|left, right| left.0.cmp(&right.0)); + } + _ => {} + } +} + +/// Sort arguments, which means specifically sorting arguments by names and object values by keys +/// (assuming no duplicates). +/// +/// After sorting, hashing and plain-Rust equality have the expected result for lists that are +/// spec-equivalent. +fn sort_arguments(arguments: &mut [Node]) { + arguments + .iter_mut() + .for_each(|arg| sort_value(arg.make_mut().value.make_mut())); + arguments.sort_by(|left, right| left.name.cmp(&right.name)); +} + +impl From>> for ArgumentList { + fn from(mut arguments: Vec>) -> Self { + if arguments.is_empty() { + return Self::new(); + } + + sort_arguments(&mut arguments); + + Self { + inner: Some(Arc::from(arguments)), + } + } +} + +impl FromIterator> for ArgumentList { + fn from_iter>>(iter: T) -> Self { + Self::from(Vec::from_iter(iter)) + } +} + +impl Deref for ArgumentList { + type Target = [Node]; + + fn deref(&self) -> &Self::Target { + self.inner.as_deref().unwrap_or_default() + } +} + +impl ArgumentList { + /// Create an empty argument list. + pub(crate) const fn new() -> Self { + Self { inner: None } + } + + /// Create a argument list with a single argument. + /// + /// This sorts any input object values provided to the argument. + pub(crate) fn one(argument: impl Into>) -> Self { + Self::from(vec![argument.into()]) + } +} + /// An analogue of the apollo-compiler type `Operation` with these changes: /// - Stores the schema that the operation is queried against. /// - Swaps `operation_type` with `root_kind` (using the analogous apollo-federation type). @@ -89,7 +189,7 @@ pub struct Operation { pub(crate) root_kind: SchemaRootDefinitionKind, pub(crate) name: Option, pub(crate) variables: Arc>>, - pub(crate) directives: Arc, + pub(crate) directives: DirectiveList, pub(crate) selection_set: SelectionSet, pub(crate) named_fragments: NamedFragments, } @@ -97,7 +197,7 @@ pub struct Operation { pub(crate) struct NormalizedDefer { pub operation: Operation, pub has_defers: bool, - pub assigned_defer_labels: HashSet, + pub assigned_defer_labels: IndexSet, pub defer_conditions: IndexMap>, } @@ -134,7 +234,7 @@ impl Operation { root_kind: operation.operation_type.into(), name: operation.name.clone(), variables: Arc::new(operation.variables.clone()), - directives: Arc::new(operation.directives.clone()), + directives: operation.directives.clone().into(), selection_set, named_fragments, }) @@ -146,7 +246,7 @@ impl Operation { NormalizedDefer { operation: self, has_defers: false, - assigned_defer_labels: HashSet::new(), + assigned_defer_labels: IndexSet::default(), defer_conditions: IndexMap::default(), } // TODO(@TylerBloom): Once defer is implement, the above statement needs to be replaced @@ -158,7 +258,7 @@ impl Operation { NormalizedDefer { operation: self, has_defers: false, - assigned_defer_labels: HashSet::new(), + assigned_defer_labels: IndexSet::default(), defer_conditions: IndexMap::default(), } } @@ -211,7 +311,6 @@ mod selection_map { use std::sync::Arc; use apollo_compiler::collections::IndexMap; - use apollo_compiler::executable; use serde::Serialize; use crate::error::FederationError; @@ -219,6 +318,7 @@ mod selection_map { use crate::operation::field_selection::FieldSelection; use crate::operation::fragment_spread_selection::FragmentSpreadSelection; use crate::operation::inline_fragment_selection::InlineFragmentSelection; + use crate::operation::DirectiveList; use crate::operation::HasSelectionKey; use crate::operation::Selection; use crate::operation::SelectionKey; @@ -430,7 +530,7 @@ mod selection_map { } } - pub(super) fn get_directives_mut(&mut self) -> &mut Arc { + pub(super) fn get_directives_mut(&mut self) -> &mut DirectiveList { match self { Self::Field(field) => field.get_directives_mut(), Self::FragmentSpread(spread) => spread.get_directives_mut(), @@ -463,7 +563,7 @@ mod selection_map { Arc::make_mut(self.0).field.sibling_typename_mut() } - pub(super) fn get_directives_mut(&mut self) -> &mut Arc { + pub(super) fn get_directives_mut(&mut self) -> &mut DirectiveList { Arc::make_mut(self.0).field.directives_mut() } @@ -480,7 +580,7 @@ mod selection_map { Self(fragment_spread_selection) } - pub(super) fn get_directives_mut(&mut self) -> &mut Arc { + pub(super) fn get_directives_mut(&mut self) -> &mut DirectiveList { Arc::make_mut(self.0).spread.directives_mut() } @@ -505,7 +605,7 @@ mod selection_map { self.0 } - pub(super) fn get_directives_mut(&mut self) -> &mut Arc { + pub(super) fn get_directives_mut(&mut self) -> &mut DirectiveList { Arc::make_mut(self.0).inline_fragment.directives_mut() } @@ -613,24 +713,25 @@ pub(crate) enum SelectionKey { response_name: Name, /// directives applied on the field #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - directives: Arc, + directives: DirectiveList, }, FragmentSpread { /// The name of the fragment. fragment_name: Name, /// Directives applied on the fragment spread (does not contain @defer). #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - directives: Arc, + directives: DirectiveList, }, InlineFragment { /// The optional type condition of the fragment. type_condition: Option, /// Directives applied on the fragment spread (does not contain @defer). #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - directives: Arc, + directives: DirectiveList, }, Defer { /// Unique selection ID used to distinguish deferred fragment spreads that cannot be merged. + #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] deferred_id: SelectionId, }, } @@ -741,7 +842,7 @@ impl Selection { } } - fn directives(&self) -> &Arc { + fn directives(&self) -> &DirectiveList { match self { Selection::Field(field_selection) => &field_selection.field.directives, Selection::FragmentSpread(fragment_spread_selection) => { @@ -835,25 +936,6 @@ impl Selection { } } - fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { - match self { - Selection::Field(field_selection) => { - if let Some(s) = field_selection.selection_set.clone() { - s.collect_used_fragment_names(aggregator) - } - } - Selection::InlineFragment(inline) => { - inline.selection_set.collect_used_fragment_names(aggregator); - } - Selection::FragmentSpread(fragment) => { - let current_count = aggregator - .entry(fragment.spread.fragment_name.clone()) - .or_default(); - *current_count += 1; - } - } - } - pub(crate) fn with_updated_selection_set( &self, selection_set: Option, @@ -890,7 +972,7 @@ impl Selection { pub(crate) fn with_updated_directives( &self, - directives: executable::DirectiveList, + directives: impl Into, ) -> Result { match self { Selection::Field(field) => Ok(Selection::Field(Arc::new( @@ -936,22 +1018,6 @@ impl Selection { } } } - - pub(crate) fn for_each_element( - &self, - parent_type_position: CompositeTypeDefinitionPosition, - callback: &mut impl FnMut(OpPathElement) -> Result<(), FederationError>, - ) -> Result<(), FederationError> { - match self { - Selection::Field(field_selection) => field_selection.for_each_element(callback), - Selection::InlineFragment(inline_fragment_selection) => { - inline_fragment_selection.for_each_element(callback) - } - Selection::FragmentSpread(fragment_spread_selection) => { - fragment_spread_selection.for_each_element(parent_type_position, callback) - } - } - } } impl From for Selection { @@ -997,7 +1063,7 @@ pub(crate) struct Fragment { pub(crate) schema: ValidFederationSchema, pub(crate) name: Name, pub(crate) type_condition_position: CompositeTypeDefinitionPosition, - pub(crate) directives: Arc, + pub(crate) directives: DirectiveList, pub(crate) selection_set: SelectionSet, } @@ -1013,7 +1079,7 @@ impl Fragment { type_condition_position: schema .get_type(fragment.type_condition().clone())? .try_into()?, - directives: Arc::new(fragment.directives.clone()), + directives: fragment.directives.clone().into(), selection_set: SelectionSet::from_selection_set( &fragment.selection_set, named_fragments, @@ -1022,18 +1088,6 @@ impl Fragment { }) } - // PORT NOTE: in JS code this is stored on the fragment - pub(crate) fn fragment_usages(&self) -> HashMap { - let mut usages = HashMap::new(); - self.selection_set.collect_used_fragment_names(&mut usages); - usages - } - - // PORT NOTE: in JS code this is stored on the fragment - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { - self.selection_set.collect_used_fragment_names(aggregator) - } - fn has_defer(&self) -> bool { self.selection_set.has_defer() } @@ -1043,17 +1097,14 @@ mod field_selection { use std::hash::Hash; use std::hash::Hasher; use std::ops::Deref; - use std::sync::Arc; use apollo_compiler::ast; - use apollo_compiler::executable; use apollo_compiler::Name; - use apollo_compiler::Node; use serde::Serialize; use crate::error::FederationError; - use crate::operation::sort_arguments; - use crate::operation::sort_directives; + use crate::operation::ArgumentList; + use crate::operation::DirectiveList; use crate::operation::HasSelectionKey; use crate::operation::SelectionKey; use crate::operation::SelectionSet; @@ -1096,10 +1147,7 @@ mod field_selection { } } - pub(crate) fn with_updated_directives( - &self, - directives: executable::DirectiveList, - ) -> Self { + pub(crate) fn with_updated_directives(&self, directives: impl Into) -> Self { Self { field: self.field.with_updated_directives(directives), selection_set: self.selection_set.clone(), @@ -1123,8 +1171,6 @@ mod field_selection { pub(crate) struct Field { data: FieldData, key: SelectionKey, - #[serde(serialize_with = "crate::display_helpers::serialize_as_debug_string")] - sorted_arguments: Arc>>, } impl std::fmt::Debug for Field { @@ -1137,7 +1183,7 @@ mod field_selection { fn eq(&self, other: &Self) -> bool { self.data.field_position.field_name() == other.data.field_position.field_name() && self.key == other.key - && self.sorted_arguments == other.sorted_arguments + && self.data.arguments == other.data.arguments } } @@ -1147,7 +1193,7 @@ mod field_selection { fn hash(&self, state: &mut H) { self.data.field_position.field_name().hash(state); self.key.hash(state); - self.sorted_arguments.hash(state); + self.data.arguments.hash(state); } } @@ -1161,11 +1207,8 @@ mod field_selection { impl Field { pub(crate) fn new(data: FieldData) -> Self { - let mut arguments = data.arguments.as_ref().clone(); - sort_arguments(&mut arguments); Self { key: data.key(), - sorted_arguments: Arc::new(arguments), data, } } @@ -1245,7 +1288,7 @@ mod field_selection { &self.data } - pub(super) fn directives_mut(&mut self) -> &mut Arc { + pub(super) fn directives_mut(&mut self) -> &mut DirectiveList { &mut self.data.directives } @@ -1259,10 +1302,10 @@ mod field_selection { pub(crate) fn with_updated_directives( &self, - directives: executable::DirectiveList, + directives: impl Into, ) -> Field { let mut data = self.data.clone(); - data.directives = Arc::new(directives); + data.directives = directives.into(); Self::new(data) } @@ -1302,9 +1345,9 @@ mod field_selection { pub(crate) field_position: FieldDefinitionPosition, pub(crate) alias: Option, #[serde(serialize_with = "crate::display_helpers::serialize_as_debug_string")] - pub(crate) arguments: Arc>>, + pub(crate) arguments: ArgumentList, #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - pub(crate) directives: Arc, + pub(crate) directives: DirectiveList, pub(crate) sibling_typename: Option, } @@ -1353,11 +1396,9 @@ mod field_selection { impl HasSelectionKey for FieldData { fn key(&self) -> SelectionKey { - let mut directives = self.directives.as_ref().clone(); - sort_directives(&mut directives); SelectionKey::Field { response_name: self.response_name(), - directives: Arc::new(directives), + directives: self.directives.clone(), } } } @@ -1370,14 +1411,12 @@ pub(crate) use field_selection::SiblingTypename; mod fragment_spread_selection { use std::ops::Deref; - use std::sync::Arc; - use apollo_compiler::executable; use apollo_compiler::Name; use serde::Serialize; use crate::operation::is_deferred_selection; - use crate::operation::sort_directives; + use crate::operation::DirectiveList; use crate::operation::HasSelectionKey; use crate::operation::SelectionId; use crate::operation::SelectionKey; @@ -1440,7 +1479,7 @@ mod fragment_spread_selection { &self.data } - pub(super) fn directives_mut(&mut self) -> &mut Arc { + pub(super) fn directives_mut(&mut self) -> &mut DirectiveList { &mut self.data.directives } } @@ -1459,14 +1498,15 @@ mod fragment_spread_selection { pub(crate) type_condition_position: CompositeTypeDefinitionPosition, // directives applied on the fragment spread selection #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - pub(crate) directives: Arc, + pub(crate) directives: DirectiveList, // directives applied within the fragment definition // // PORT_NOTE: The JS codebase combined the fragment spread's directives with the fragment // definition's directives. This was invalid GraphQL as those directives may not be applicable // on different locations. While we now keep track of those references, they are currently ignored. #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - pub(crate) fragment_directives: Arc, + pub(crate) fragment_directives: DirectiveList, + #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] pub(crate) selection_id: SelectionId, } @@ -1477,11 +1517,9 @@ mod fragment_spread_selection { deferred_id: self.selection_id.clone(), } } else { - let mut directives = self.directives.as_ref().clone(); - sort_directives(&mut directives); SelectionKey::FragmentSpread { fragment_name: self.fragment_name.clone(), - directives: Arc::new(directives), + directives: self.directives.clone(), } } } @@ -1568,22 +1606,6 @@ impl FragmentSpreadSelection { } self.selection_set.any_element(predicate) } - - pub(crate) fn for_each_element( - &self, - parent_type_position: CompositeTypeDefinitionPosition, - callback: &mut impl FnMut(OpPathElement) -> Result<(), FederationError>, - ) -> Result<(), FederationError> { - let inline_fragment = InlineFragment::new(InlineFragmentData { - schema: self.spread.schema.clone(), - parent_type_position, - type_condition_position: Some(self.spread.type_condition_position.clone()), - directives: self.spread.directives.clone(), - selection_id: self.spread.selection_id.clone(), - }); - callback(inline_fragment.into())?; - self.selection_set.for_each_element(callback) - } } impl FragmentSpreadData { @@ -1595,7 +1617,7 @@ impl FragmentSpreadData { schema: fragment.schema.clone(), fragment_name: fragment.name.clone(), type_condition_position: fragment.type_condition_position.clone(), - directives: Arc::new(spread_directives.clone()), + directives: spread_directives.clone().into(), fragment_directives: fragment.directives.clone(), selection_id: SelectionId::new(), } @@ -1606,16 +1628,14 @@ mod inline_fragment_selection { use std::hash::Hash; use std::hash::Hasher; use std::ops::Deref; - use std::sync::Arc; - use apollo_compiler::executable; use serde::Serialize; use crate::error::FederationError; use crate::link::graphql_definition::defer_directive_arguments; use crate::link::graphql_definition::DeferDirectiveArguments; use crate::operation::is_deferred_selection; - use crate::operation::sort_directives; + use crate::operation::DirectiveList; use crate::operation::HasSelectionKey; use crate::operation::SelectionId; use crate::operation::SelectionKey; @@ -1646,10 +1666,7 @@ mod inline_fragment_selection { } } - pub(crate) fn with_updated_directives( - &self, - directives: executable::DirectiveList, - ) -> Self { + pub(crate) fn with_updated_directives(&self, directives: impl Into) -> Self { Self { inline_fragment: self.inline_fragment.with_updated_directives(directives), selection_set: self.selection_set.clone(), @@ -1715,7 +1732,7 @@ mod inline_fragment_selection { &self.data } - pub(super) fn directives_mut(&mut self) -> &mut Arc { + pub(super) fn directives_mut(&mut self) -> &mut DirectiveList { &mut self.data.directives } @@ -1729,10 +1746,10 @@ mod inline_fragment_selection { } pub(crate) fn with_updated_directives( &self, - directives: executable::DirectiveList, + directives: impl Into, ) -> InlineFragment { let mut data = self.data().clone(); - data.directives = Arc::new(directives); + data.directives = directives.into(); Self::new(data) } @@ -1758,7 +1775,8 @@ mod inline_fragment_selection { pub(crate) parent_type_position: CompositeTypeDefinitionPosition, pub(crate) type_condition_position: Option, #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] - pub(crate) directives: Arc, + pub(crate) directives: DirectiveList, + #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] pub(crate) selection_id: SelectionId, } @@ -1787,14 +1805,12 @@ mod inline_fragment_selection { deferred_id: self.selection_id.clone(), } } else { - let mut directives = self.directives.as_ref().clone(); - sort_directives(&mut directives); SelectionKey::InlineFragment { type_condition: self .type_condition_position .as_ref() .map(|pos| pos.type_name().clone()), - directives: Arc::new(directives), + directives: self.directives.clone(), } } } @@ -2051,7 +2067,7 @@ impl SelectionSet { // if we don't expand fragments, we need to normalize it let normalized_fragment_spread = FragmentSpreadSelection::from_fragment_spread( fragment_spread_selection, - &fragment, + fragment, )?; destination.push(Selection::FragmentSpread(Arc::new( normalized_fragment_spread, @@ -2099,144 +2115,6 @@ impl SelectionSet { Ok(()) } - /// NOTE: This is a private API and should be used with care, use `add_selection_set` instead. - /// - /// Merges the given normalized selection sets into this one. - fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { - let mut selections_to_merge = vec![]; - for other in others { - if other.schema != self.schema { - return Err(FederationError::internal( - "Cannot merge selection sets from different schemas", - )); - } - if other.type_position != self.type_position { - return Err(FederationError::internal( - format!( - "Cannot merge selection set for type \"{}\" into a selection set for type \"{}\"", - other.type_position, - self.type_position, - ), - )); - } - selections_to_merge.extend(other.selections.values()); - } - self.merge_selections_into(selections_to_merge.into_iter()) - } - - /// NOTE: This is a private API and should be used with care, use `add_selection` instead. - /// - /// A helper function for merging the given selections into this one. - fn merge_selections_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { - let mut fields = IndexMap::default(); - let mut fragment_spreads = IndexMap::default(); - let mut inline_fragments = IndexMap::default(); - let target = Arc::make_mut(&mut self.selections); - for other_selection in others { - let other_key = other_selection.key(); - match target.entry(other_key.clone()) { - selection_map::Entry::Occupied(existing) => match existing.get() { - Selection::Field(self_field_selection) => { - let Selection::Field(other_field_selection) = other_selection else { - return Err(Internal { - message: format!( - "Field selection key for field \"{}\" references non-field selection", - self_field_selection.field.field_position, - ), - }.into()); - }; - fields - .entry(other_key) - .or_insert_with(Vec::new) - .push(other_field_selection); - } - Selection::FragmentSpread(self_fragment_spread_selection) => { - let Selection::FragmentSpread(other_fragment_spread_selection) = - other_selection - else { - return Err(Internal { - message: format!( - "Fragment spread selection key for fragment \"{}\" references non-field selection", - self_fragment_spread_selection.spread.fragment_name, - ), - }.into()); - }; - fragment_spreads - .entry(other_key) - .or_insert_with(Vec::new) - .push(other_fragment_spread_selection); - } - Selection::InlineFragment(self_inline_fragment_selection) => { - let Selection::InlineFragment(other_inline_fragment_selection) = - other_selection - else { - return Err(Internal { - message: format!( - "Inline fragment selection key under parent type \"{}\" {}references non-field selection", - self_inline_fragment_selection.inline_fragment.parent_type_position, - self_inline_fragment_selection.inline_fragment.type_condition_position.clone() - .map_or_else( - String::new, - |cond| format!("(type condition: {}) ", cond), - ), - ), - }.into()); - }; - inline_fragments - .entry(other_key) - .or_insert_with(Vec::new) - .push(other_inline_fragment_selection); - } - }, - selection_map::Entry::Vacant(vacant) => { - vacant.insert(other_selection.clone())?; - } - } - } - - for (key, self_selection) in target.iter_mut() { - match self_selection { - SelectionValue::Field(mut self_field_selection) => { - if let Some(other_field_selections) = fields.shift_remove(key) { - self_field_selection.merge_into( - other_field_selections.iter().map(|selection| &***selection), - )?; - } - } - SelectionValue::FragmentSpread(mut self_fragment_spread_selection) => { - if let Some(other_fragment_spread_selections) = - fragment_spreads.shift_remove(key) - { - self_fragment_spread_selection.merge_into( - other_fragment_spread_selections - .iter() - .map(|selection| &***selection), - )?; - } - } - SelectionValue::InlineFragment(mut self_inline_fragment_selection) => { - if let Some(other_inline_fragment_selections) = - inline_fragments.shift_remove(key) - { - self_inline_fragment_selection.merge_into( - other_inline_fragment_selections - .iter() - .map(|selection| &***selection), - )?; - } - } - } - } - - Ok(()) - } - pub(crate) fn expand_all_fragments(&self) -> Result { let mut expanded_selections = vec![]; SelectionSet::expand_selection_set(&mut expanded_selections, self)?; @@ -2632,6 +2510,8 @@ impl SelectionSet { ) -> Result { let mut selection_map = SelectionMap::new(); if let Some(parent) = parent_type_if_abstract { + // XXX(@goto-bus-stop): if the selection set has an *alias* named __typename for some + // other field, this doesn't work right. is that allowed? if !self.has_top_level_typename_field() { let typename_selection = Selection::from_field( Field::new_introspection_typename(&self.schema, &parent.into(), None), @@ -2666,84 +2546,12 @@ impl SelectionSet { } fn has_top_level_typename_field(&self) -> bool { - // Needs to be behind a OnceLock because `Arc::new` is non-const. - // XXX(@goto-bus-stop): Note this does *not* count `__typename @include(if: true)`. - // This seems wrong? But it's what JS does, too. - static TYPENAME_KEY: OnceLock = OnceLock::new(); - let key = TYPENAME_KEY.get_or_init(|| SelectionKey::Field { + const TYPENAME_KEY: SelectionKey = SelectionKey::Field { response_name: TYPENAME_FIELD, - directives: Arc::new(Default::default()), - }); - - self.selections.contains_key(key) - } - - /// Inserts a `Selection` into the inner map. Should a selection with the same key already - /// exist in the map, the existing selection and the given selection are merged, replacing the - /// existing selection while keeping the same insertion index. - /// - /// NOTE: This method assumes selection already points to the correct schema and parent type. - pub(crate) fn add_local_selection( - &mut self, - selection: &Selection, - ) -> Result<(), FederationError> { - debug_assert_eq!( - &self.schema, - selection.schema(), - "In order to add selection it needs to point to the same schema" - ); - self.merge_selections_into(std::iter::once(selection)) - } - - /// Inserts a `SelectionSet` into the inner map. Should any sub selection with the same key already - /// exist in the map, the existing selection and the given selection are merged, replacing the - /// existing selection while keeping the same insertion index. - /// - /// NOTE: This method assumes the target selection set already points to the same schema and type - /// position. Use `add_selection_set` instead if you need to rebase the selection set. - pub(crate) fn add_local_selection_set( - &mut self, - selection_set: &SelectionSet, - ) -> Result<(), FederationError> { - debug_assert_eq!( - self.schema, selection_set.schema, - "In order to add selection set it needs to point to the same schema." - ); - debug_assert_eq!( - self.type_position, selection_set.type_position, - "In order to add selection set it needs to point to the same type position" - ); - self.merge_into(std::iter::once(selection_set)) - } + directives: DirectiveList::new(), + }; - /// Rebase given `SelectionSet` on self and then inserts it into the inner map. Assumes that given - /// selection set does not reference ANY named fragments. If it does, Use `add_selection_set_with_fragments` - /// instead. - /// - /// Should any sub selection with the same key already exist in the map, the existing selection - /// and the given selection are merged, replacing the existing selection while keeping the same - /// insertion index. - pub(crate) fn add_selection_set( - &mut self, - selection_set: &SelectionSet, - ) -> Result<(), FederationError> { - self.add_selection_set_with_fragments(selection_set, &NamedFragments::default()) - } - - /// Rebase given `SelectionSet` on self with the specified fragments and then inserts it into the - /// inner map. - /// - /// Should any sub selection with the same key already exist in the map, the existing selection - /// and the given selection are merged, replacing the existing selection while keeping the same - /// insertion index. - pub(crate) fn add_selection_set_with_fragments( - &mut self, - selection_set: &SelectionSet, - named_fragments: &NamedFragments, - ) -> Result<(), FederationError> { - let rebased = - selection_set.rebase_on(&self.type_position, named_fragments, &self.schema)?; - self.add_local_selection_set(&rebased) + self.selections.contains_key(&TYPENAME_KEY) } /// Adds a path, and optional some selections following that path, to this selection map. @@ -2854,16 +2662,12 @@ impl SelectionSet { Ok(()) } - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { - self.selections - .iter() - .for_each(|(_, s)| s.collect_used_fragment_names(aggregator)); - } - /// Removes the @defer directive from all selections without removing that selection. fn without_defer(&mut self) { for (_key, mut selection) in Arc::make_mut(&mut self.selections).iter_mut() { - Arc::make_mut(selection.get_directives_mut()).retain(|dir| dir.name != name!("defer")); + // TODO(@goto-bus-stop): doing this changes the key of the selection! + // We have to rebuild the selection map. + selection.get_directives_mut().remove_one("defer"); if let Some(set) = selection.get_selection_set_mut() { set.without_defer(); } @@ -2918,7 +2722,8 @@ impl SelectionSet { return Ok(self.clone()); } - let mut at_current_level: HashMap = HashMap::new(); + let mut at_current_level: IndexMap = + IndexMap::default(); let mut remaining: Vec<&FieldToAlias> = Vec::new(); for alias in aliases { @@ -3114,19 +2919,6 @@ impl SelectionSet { } Ok(false) } - - /// Runs the given callback for all elements in the selection set and their descendants. Note - /// that fragment spread selections are converted to inline fragment elements, and their - /// fragment selection sets are recursed into. - pub(crate) fn for_each_element( - &self, - callback: &mut impl FnMut(OpPathElement) -> Result<(), FederationError>, - ) -> Result<(), FederationError> { - for selection in self.selections.values() { - selection.for_each_element(self.type_position.clone(), callback)? - } - Ok(()) - } } impl IntoIterator for SelectionSet { @@ -3208,7 +3000,7 @@ fn compute_aliases_for_non_merging_fields( alias_collector: &mut Vec, schema: &ValidFederationSchema, ) -> Result<(), FederationError> { - let mut seen_response_names: HashMap = HashMap::new(); + let mut seen_response_names: IndexMap = IndexMap::default(); // - `s.selections` must be fragment-spread-free. fn rebased_fields_in_set(s: &SelectionSetAtPath) -> impl Iterator + '_ { @@ -3340,7 +3132,7 @@ fn compute_aliases_for_non_merging_fields( Ok(()) } -fn gen_alias_name(base_name: &Name, unavailable_names: &HashMap) -> Name { +fn gen_alias_name(base_name: &Name, unavailable_names: &IndexMap) -> Name { let mut counter = 0usize; loop { if let Ok(name) = Name::try_from(format!("{base_name}__alias_{counter}")) { @@ -3401,8 +3193,8 @@ impl FieldSelection { schema: schema.clone(), field_position, alias: field.alias.clone(), - arguments: Arc::new(field.arguments.clone()), - directives: Arc::new(field.directives.clone()), + arguments: field.arguments.clone().into(), + directives: field.directives.clone().into(), sibling_typename: None, }), selection_set: if is_composite { @@ -3442,71 +3234,6 @@ impl FieldSelection { } Ok(false) } - - pub(crate) fn for_each_element( - &self, - callback: &mut impl FnMut(OpPathElement) -> Result<(), FederationError>, - ) -> Result<(), FederationError> { - callback(self.field.clone().into())?; - if let Some(selection_set) = &self.selection_set { - selection_set.for_each_element(callback)?; - } - Ok(()) - } -} - -impl<'a> FieldSelectionValue<'a> { - /// Merges the given normalized field selections into this one (this method assumes the keys - /// already match). - pub(crate) fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { - let self_field = &self.get().field; - let mut selection_sets = vec![]; - for other in others { - let other_field = &other.field; - if other_field.schema != self_field.schema { - return Err(Internal { - message: "Cannot merge field selections from different schemas".to_owned(), - } - .into()); - } - if other_field.field_position != self_field.field_position { - return Err(Internal { - message: format!( - "Cannot merge field selection for field \"{}\" into a field selection for field \"{}\"", - other_field.field_position, - self_field.field_position, - ), - }.into()); - } - if self.get().selection_set.is_some() { - let Some(other_selection_set) = &other.selection_set else { - return Err(Internal { - message: format!( - "Field \"{}\" has composite type but not a selection set", - other_field.field_position, - ), - } - .into()); - }; - selection_sets.push(other_selection_set); - } else if other.selection_set.is_some() { - return Err(Internal { - message: format!( - "Field \"{}\" has non-composite type but also has a selection set", - other_field.field_position, - ), - } - .into()); - } - } - if let Some(self_selection_set) = self.get_selection_set_mut() { - self_selection_set.merge_into(selection_sets.into_iter())?; - } - Ok(()) - } } impl Field { @@ -3530,32 +3257,6 @@ impl Field { } } -impl<'a> FragmentSpreadSelectionValue<'a> { - /// Merges the given normalized fragment spread selections into this one (this method assumes - /// the keys already match). - pub(crate) fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { - let self_fragment_spread = &self.get().spread; - for other in others { - let other_fragment_spread = &other.spread; - if other_fragment_spread.schema != self_fragment_spread.schema { - return Err(Internal { - message: "Cannot merge fragment spread from different schemas".to_owned(), - } - .into()); - } - // Nothing to do since the fragment spread is already part of the selection set. - // Fragment spreads are uniquely identified by fragment name and applied directives. - // Since there is already an entry for the same fragment spread, there is no point - // in attempting to merge its sub-selections, as the underlying entry should be - // exactly the same as the currently processed one. - } - Ok(()) - } -} - impl InlineFragmentSelection { pub(crate) fn new(inline_fragment: InlineFragment, selection_set: SelectionSet) -> Self { debug_assert_eq!( @@ -3608,7 +3309,7 @@ impl InlineFragmentSelection { schema: schema.clone(), parent_type_position: parent_type_position.clone(), type_condition_position, - directives: Arc::new(inline_fragment.directives.clone()), + directives: inline_fragment.directives.clone().into(), selection_id: SelectionId::new(), }); Ok(InlineFragmentSelection::new( @@ -3647,7 +3348,7 @@ impl InlineFragmentSelection { pub(crate) fn from_selection_set( parent_type_position: CompositeTypeDefinitionPosition, selection_set: SelectionSet, - directives: Arc, + directives: DirectiveList, ) -> Self { let inline_fragment_data = InlineFragmentData { schema: selection_set.schema.clone(), @@ -3696,66 +3397,6 @@ impl InlineFragmentSelection { } self.selection_set.any_element(predicate) } - - pub(crate) fn for_each_element( - &self, - callback: &mut impl FnMut(OpPathElement) -> Result<(), FederationError>, - ) -> Result<(), FederationError> { - callback(self.inline_fragment.clone().into())?; - self.selection_set.for_each_element(callback) - } -} - -impl<'a> InlineFragmentSelectionValue<'a> { - /// Merges the given normalized inline fragment selections into this one (this method assumes - /// the keys already match). - pub(crate) fn merge_into<'op>( - &mut self, - others: impl Iterator, - ) -> Result<(), FederationError> { - let self_inline_fragment = &self.get().inline_fragment; - let mut selection_sets = vec![]; - for other in others { - let other_inline_fragment = &other.inline_fragment; - if other_inline_fragment.schema != self_inline_fragment.schema { - return Err(Internal { - message: "Cannot merge inline fragment from different schemas".to_owned(), - } - .into()); - } - if other_inline_fragment.parent_type_position - != self_inline_fragment.parent_type_position - { - return Err(Internal { - message: format!( - "Cannot merge inline fragment of parent type \"{}\" into an inline fragment of parent type \"{}\"", - other_inline_fragment.parent_type_position, - self_inline_fragment.parent_type_position, - ), - }.into()); - } - selection_sets.push(&other.selection_set); - } - self.get_selection_set_mut() - .merge_into(selection_sets.into_iter())?; - Ok(()) - } -} - -pub(crate) fn merge_selection_sets( - mut selection_sets: Vec, -) -> Result { - let Some((first, remainder)) = selection_sets.split_first_mut() else { - return Err(Internal { - message: "".to_owned(), - } - .into()); - }; - first.merge_into(remainder.iter())?; - - // Take ownership of the first element and discard the rest; - // we can unwrap because `split_first_mut()` guarantees at least one element will be yielded - Ok(selection_sets.into_iter().next().unwrap()) } /// This uses internal copy-on-write optimization to make `Clone` cheap. @@ -3784,7 +3425,7 @@ impl NamedFragments { self.fragments.len() == 0 } - pub(crate) fn size(&self) -> usize { + pub(crate) fn len(&self) -> usize { self.fragments.len() } @@ -3821,25 +3462,14 @@ impl NamedFragments { } } - pub(crate) fn get(&self, name: &Name) -> Option> { - self.fragments.get(name).cloned() + pub(crate) fn get(&self, name: &str) -> Option<&Node> { + self.fragments.get(name) } - pub(crate) fn contains(&self, name: &Name) -> bool { + pub(crate) fn contains(&self, name: &str) -> bool { self.fragments.contains_key(name) } - /** - * Collect the usages of fragments that are used within the selection of other fragments. - */ - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut HashMap) { - for fragment in self.fragments.values() { - fragment - .selection_set - .collect_used_fragment_names(aggregator); - } - } - /// JS PORT NOTE: In JS implementation this method was named mapInDependencyOrder and accepted a lambda to /// apply transformation on the fragments. It was called when rebasing/filtering/expanding selection sets. /// JS PORT NOTE: In JS implementation this method was potentially returning `undefined`. In order to simplify the code @@ -3860,7 +3490,7 @@ impl NamedFragments { // the outcome of `map_to_expanded_selection_sets`. let mut fragments_map: IndexMap = IndexMap::default(); for fragment in fragments.values() { - let mut fragment_usages: HashMap = HashMap::new(); + let mut fragment_usages = IndexMap::default(); NamedFragments::collect_fragment_usages(&fragment.selection_set, &mut fragment_usages); let usages: Vec = fragment_usages.keys().cloned().collect::>(); fragments_map.insert( @@ -3872,7 +3502,7 @@ impl NamedFragments { ); } - let mut removed_fragments: HashSet = HashSet::new(); + let mut removed_fragments: IndexSet = IndexSet::default(); let mut mapped_fragments = NamedFragments::default(); while !fragments_map.is_empty() { // Note that graphQL specifies that named fragments cannot have cycles (https://spec.graphql.org/draft/#sec-Fragment-spreads-must-not-form-cycles) @@ -3890,7 +3520,7 @@ impl NamedFragments { // JS code has methods for // * add and throw exception if entry already there // * add_if_not_exists - // Rust HashMap exposes insert (that overwrites) and try_insert (that throws) + // Rust IndexMap exposes insert (that overwrites) and try_insert (that throws) mapped_fragments.insert(normalized); } else { removed_fragments.insert(name.clone()); @@ -3903,10 +3533,10 @@ impl NamedFragments { mapped_fragments } - // JS PORT - we need to calculate those for both executable::SelectionSet and SelectionSet + /// Just like our `SelectionSet::used_fragments`, but with apollo-compiler types fn collect_fragment_usages( selection_set: &executable::SelectionSet, - aggregator: &mut HashMap, + aggregator: &mut IndexMap, ) { selection_set.selections.iter().for_each(|s| match s { executable::Selection::Field(f) => { @@ -3950,174 +3580,174 @@ impl NamedFragments { } } -/// Tracks fragments from the original operation, along with versions rebased on other subgraphs. -// XXX(@goto-bus-stop): improve/replace/reduce this structure. My notes: -// This gets cloned only in recursive query planning. Then whenever `.for_subgraph()` ends up being -// called, it always clones the `rebased_fragments` map. `.for_subgraph()` is called whenever the -// plan is turned into plan nodes by the FetchDependencyGraphToQueryPlanProcessor. -// This suggests that we can remove the Arc wrapper for `rebased_fragments` because we end up cloning the inner data anyways. -// -// This data structure is also used as an argument in several `crate::operation` functions. This -// seems wrong. The only useful method on this structure is `.for_subgraph()`, which is only used -// by the fetch dependency graph when creating plan nodes. That necessarily implies that all other -// uses of this structure only access `.original_fragments`. In that case, we should pass around -// the `NamedFragments` itself, not this wrapper structure. -// -// `.for_subgraph()` also requires a mutable reference to fill in the data. But -// `.rebased_fragments` is really a cache, so requiring a mutable reference isn't an ideal API. -// Conceptually you are just computing something and getting the result. Perhaps we can use a -// concurrent map, or prepopulate the HashMap for all subgraphs, or precompute the whole thing for -// all subgraphs (or precompute a hash map of subgraph names to OnceLocks). -#[derive(Clone)] -pub(crate) struct RebasedFragments { - pub(crate) original_fragments: NamedFragments, - // JS PORT NOTE: In JS implementation values were optional - /// Map key: subgraph name - rebased_fragments: Arc, NamedFragments>>, +// Collect fragment usages from operation types. + +impl Selection { + fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { + match self { + Selection::Field(field_selection) => { + if let Some(s) = &field_selection.selection_set { + s.collect_used_fragment_names(aggregator) + } + } + Selection::InlineFragment(inline) => { + inline.selection_set.collect_used_fragment_names(aggregator); + } + Selection::FragmentSpread(fragment) => { + let current_count = aggregator + .entry(fragment.spread.fragment_name.clone()) + .or_default(); + *current_count += 1; + } + } + } } -impl RebasedFragments { - pub(crate) fn new(fragments: NamedFragments) -> Self { - Self { - original_fragments: fragments, - rebased_fragments: Arc::new(HashMap::new()), +impl SelectionSet { + pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { + for s in self.selections.values() { + s.collect_used_fragment_names(aggregator); } } - pub(crate) fn for_subgraph( - &mut self, - subgraph_name: impl Into>, - subgraph_schema: &ValidFederationSchema, - ) -> &NamedFragments { - Arc::make_mut(&mut self.rebased_fragments) - .entry(subgraph_name.into()) - .or_insert_with(|| { - self.original_fragments - .rebase_on(subgraph_schema) - .unwrap_or_default() - }) + pub(crate) fn used_fragments(&self) -> IndexMap { + let mut usages = IndexMap::default(); + self.collect_used_fragment_names(&mut usages); + usages + } +} + +impl Fragment { + pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { + self.selection_set.collect_used_fragment_names(aggregator) + } +} + +impl NamedFragments { + /// Collect the usages of fragments that are used within the selection of other fragments. + pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { + for fragment in self.fragments.values() { + fragment + .selection_set + .collect_used_fragment_names(aggregator); + } } } // Collect used variables from operation types. -fn collect_variables_from_value<'selection>( - value: &'selection executable::Value, - variables: &mut HashSet<&'selection Name>, -) { - match value { - executable::Value::Variable(v) => { - variables.insert(v); +pub(crate) struct VariableCollector<'s> { + variables: IndexSet<&'s Name>, +} + +impl<'s> VariableCollector<'s> { + pub(crate) fn new() -> Self { + Self { + variables: Default::default(), } - executable::Value::List(list) => { - for value in list { - collect_variables_from_value(value, variables); + } + + fn visit_value(&mut self, value: &'s executable::Value) { + match value { + executable::Value::Variable(v) => { + self.variables.insert(v); } - } - executable::Value::Object(object) => { - for (_key, value) in object { - collect_variables_from_value(value, variables); + executable::Value::List(list) => { + for value in list { + self.visit_value(value); + } } + executable::Value::Object(object) => { + for (_key, value) in object { + self.visit_value(value); + } + } + _ => {} } - _ => {} } -} -fn collect_variables_from_directive<'selection>( - directive: &'selection executable::Directive, - variables: &mut HashSet<&'selection Name>, -) { - for arg in directive.arguments.iter() { - collect_variables_from_value(&arg.value, variables) + fn visit_directive(&mut self, directive: &'s executable::Directive) { + for arg in directive.arguments.iter() { + self.visit_value(&arg.value); + } } -} -impl Field { - fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { - for arg in self.arguments.iter() { - collect_variables_from_value(&arg.value, variables) + pub(crate) fn visit_directive_list(&mut self, directives: &'s executable::DirectiveList) { + for dir in directives.iter() { + self.visit_directive(dir); } - for dir in self.directives.iter() { - collect_variables_from_directive(dir, variables) + } + + fn visit_field(&mut self, field: &'s Field) { + for arg in field.arguments.iter() { + self.visit_value(&arg.value); } + self.visit_directive_list(&field.directives); } -} -impl FieldSelection { - /// # Errors - /// Returns an error if the selection contains a named fragment spread. - fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { - self.field.collect_variables(variables); - if let Some(set) = &self.selection_set { - set.collect_variables(variables)? + fn visit_field_selection(&mut self, selection: &'s FieldSelection) { + self.visit_field(&selection.field); + if let Some(set) = &selection.selection_set { + self.visit_selection_set(set); } - Ok(()) } -} -impl InlineFragment { - fn collect_variables<'selection>(&'selection self, variables: &mut HashSet<&'selection Name>) { - for dir in self.directives.iter() { - collect_variables_from_directive(dir, variables) + fn visit_inline_fragment(&mut self, fragment: &'s InlineFragment) { + self.visit_directive_list(&fragment.directives); + } + + fn visit_inline_fragment_selection(&mut self, selection: &'s InlineFragmentSelection) { + self.visit_inline_fragment(&selection.inline_fragment); + self.visit_selection_set(&selection.selection_set); + } + + fn visit_fragment_spread(&mut self, fragment: &'s FragmentSpread) { + self.visit_directive_list(&fragment.directives); + self.visit_directive_list(&fragment.fragment_directives); + } + + fn visit_fragment_spread_selection(&mut self, selection: &'s FragmentSpreadSelection) { + self.visit_fragment_spread(&selection.spread); + self.visit_selection_set(&selection.selection_set); + } + + fn visit_selection(&mut self, selection: &'s Selection) { + match selection { + Selection::Field(field) => self.visit_field_selection(field), + Selection::InlineFragment(frag) => self.visit_inline_fragment_selection(frag), + Selection::FragmentSpread(frag) => self.visit_fragment_spread_selection(frag), } } -} -impl InlineFragmentSelection { - /// # Errors - /// Returns an error if the selection contains a named fragment spread. - fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { - self.inline_fragment.collect_variables(variables); - self.selection_set.collect_variables(variables) + pub(crate) fn visit_selection_set(&mut self, selection_set: &'s SelectionSet) { + for selection in selection_set.iter() { + self.visit_selection(selection); + } + } + + /// Consume the collector and return the collected names. + pub(crate) fn into_inner(self) -> IndexSet<&'s Name> { + self.variables } } -impl Selection { - /// # Errors - /// Returns an error if the selection contains a named fragment spread. - fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { - match self { - Selection::Field(field) => field.collect_variables(variables), - Selection::InlineFragment(inline_fragment) => { - inline_fragment.collect_variables(variables) - } - Selection::FragmentSpread(_) => Err(FederationError::internal( - "collect_variables(): unexpected fragment spread", - )), - } +impl Fragment { + /// Returns the variable names that are used by this fragment. + pub(crate) fn used_variables(&self) -> IndexSet<&'_ Name> { + let mut collector = VariableCollector::new(); + collector.visit_directive_list(&self.directives); + collector.visit_selection_set(&self.selection_set); + collector.into_inner() } } impl SelectionSet { - /// Returns the variable names that are used by this selection set. - /// - /// # Errors - /// Returns an error if the selection set contains a named fragment spread. - pub(crate) fn used_variables(&self) -> Result, FederationError> { - let mut variables = HashSet::new(); - self.collect_variables(&mut variables)?; - Ok(variables) - } - - /// # Errors - /// Returns an error if the selection set contains a named fragment spread. - fn collect_variables<'selection>( - &'selection self, - variables: &mut HashSet<&'selection Name>, - ) -> Result<(), FederationError> { - for selection in self.selections.values() { - selection.collect_variables(variables)? - } - Ok(()) + /// Returns the variable names that are used by this selection set, including through fragment + /// spreads. + pub(crate) fn used_variables(&self) -> IndexSet<&'_ Name> { + let mut collector = VariableCollector::new(); + collector.visit_selection_set(self); + collector.into_inner() } } @@ -4132,7 +3762,7 @@ impl TryFrom<&Operation> for executable::Operation { operation_type, name: normalized_operation.name.clone(), variables: normalized_operation.variables.deref().clone(), - directives: normalized_operation.directives.deref().clone(), + directives: normalized_operation.directives.iter().cloned().collect(), selection_set: (&normalized_operation.selection_set).try_into()?, }) } @@ -4144,7 +3774,7 @@ impl TryFrom<&Fragment> for executable::Fragment { fn try_from(normalized_fragment: &Fragment) -> Result { Ok(Self { name: normalized_fragment.name.clone(), - directives: normalized_fragment.directives.deref().clone(), + directives: normalized_fragment.directives.iter().cloned().collect(), selection_set: (&normalized_fragment.selection_set).try_into()?, }) } @@ -4215,7 +3845,7 @@ impl TryFrom<&Field> for executable::Field { alias: normalized_field.alias.to_owned(), name: normalized_field.name().to_owned(), arguments: normalized_field.arguments.deref().to_owned(), - directives: normalized_field.directives.deref().to_owned(), + directives: normalized_field.directives.iter().cloned().collect(), selection_set, }) } @@ -4249,7 +3879,11 @@ impl TryFrom<&InlineFragment> for executable::InlineFragment { }); Ok(Self { type_condition, - directives: normalized_inline_fragment.directives.deref().to_owned(), + directives: normalized_inline_fragment + .directives + .iter() + .cloned() + .collect(), selection_set: executable::SelectionSet { ty, selections: Vec::new(), @@ -4274,7 +3908,11 @@ impl From<&FragmentSpreadSelection> for executable::FragmentSpread { let normalized_fragment_spread = &val.spread; Self { fragment_name: normalized_fragment_spread.fragment_name.to_owned(), - directives: normalized_fragment_spread.directives.deref().to_owned(), + directives: normalized_fragment_spread + .directives + .iter() + .cloned() + .collect(), } } } @@ -4299,6 +3937,7 @@ impl TryFrom for Valid { let mut document = executable::ExecutableDocument::new(); document.fragments = fragments; document.operations.insert(operation); + coerce_executable_values(value.schema.schema(), &mut document); Ok(document.validate(value.schema.schema())?) } } @@ -4457,7 +4096,7 @@ pub(crate) fn normalize_operation( root_kind: operation.operation_type.into(), name: operation.name.clone(), variables: Arc::new(operation.variables.clone()), - directives: Arc::new(operation.directives.clone()), + directives: operation.directives.clone().into(), selection_set: normalized_selection_set, named_fragments, }; diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index 12d1642158..2bd4262e88 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -35,17 +35,18 @@ //! ## `reuse_fragments` methods (putting everything together) //! Recursive optimization of selection and selection sets. -use std::collections::HashMap; -use std::collections::HashSet; -use std::ops::Not; use std::sync::Arc; +use apollo_compiler::collections::IndexMap; +use apollo_compiler::collections::IndexSet; use apollo_compiler::executable; +use apollo_compiler::executable::VariableDefinition; use apollo_compiler::Name; use apollo_compiler::Node; use super::Containment; use super::ContainmentOptions; +use super::DirectiveList; use super::Field; use super::FieldSelection; use super::Fragment; @@ -54,13 +55,41 @@ use super::InlineFragmentSelection; use super::NamedFragments; use super::Operation; use super::Selection; -use super::SelectionKey; use super::SelectionMapperReturn; use super::SelectionOrSet; use super::SelectionSet; use crate::error::FederationError; +use crate::operation::FragmentSpread; +use crate::operation::FragmentSpreadData; +use crate::operation::SelectionValue; use crate::schema::position::CompositeTypeDefinitionPosition; +#[derive(Debug)] +struct ReuseContext<'a> { + fragments: &'a NamedFragments, + operation_variables: Option>, +} + +impl<'a> ReuseContext<'a> { + fn for_fragments(fragments: &'a NamedFragments) -> Self { + Self { + fragments, + operation_variables: None, + } + } + + // Taking two separate parameters so the caller can still mutate the operation's selection set. + fn for_operation( + fragments: &'a NamedFragments, + operation_variables: &'a [Node], + ) -> Self { + Self { + fragments, + operation_variables: Some(operation_variables.iter().map(|var| &var.name).collect()), + } + } +} + //============================================================================= // Add __typename field for abstract types in named fragment definitions @@ -86,7 +115,7 @@ impl NamedFragments { )?; let mut mapped_selection_set = mapper(&expanded_selection_set)?; // `mapped_selection_set` must be fragment-spread-free. - mapped_selection_set.reuse_fragments(&result)?; + mapped_selection_set.reuse_fragments(&ReuseContext::for_fragments(&result))?; let updated = Fragment { selection_set: mapped_selection_set, schema: fragment.schema.clone(), @@ -171,7 +200,7 @@ impl NamedFragments { // PORT_NOTE: The JS version asserts if `updated` is empty or not. But, we really want to // check the `updated` has the same set of fragments. To avoid performance hit, only the // size is checked here. - if updated.size() != self.size() { + if updated.len() != self.len() { return Err(FederationError::internal( "Unexpected change in the number of fragments", )); @@ -363,7 +392,7 @@ impl NamedFragments { // `Option`. However, `None` validator makes it clearer that validation is // unnecessary. struct FieldsConflictValidator { - by_response_name: HashMap>>>, + by_response_name: IndexMap>>>, } impl FieldsConflictValidator { @@ -377,7 +406,8 @@ impl FieldsConflictValidator { fn for_level<'a>(level: &[&'a SelectionSet]) -> Self { // Group `level`'s fields by the response-name/field - let mut at_level: HashMap>> = HashMap::new(); + let mut at_level: IndexMap>> = + IndexMap::default(); for selection_set in level { for field_selection in selection_set.field_selections() { let response_name = field_selection.field.response_name(); @@ -392,10 +422,10 @@ impl FieldsConflictValidator { } // Collect validators per response-name/field - let mut by_response_name = HashMap::new(); + let mut by_response_name = IndexMap::default(); for (response_name, fields) in at_level { - let mut at_response_name: HashMap>> = - HashMap::new(); + let mut at_response_name: IndexMap>> = + IndexMap::default(); for (field, selection_sets) in fields { if selection_sets.is_empty() { at_response_name.insert(field, None); @@ -602,7 +632,7 @@ struct FragmentRestrictionAtType { #[derive(Default)] struct FragmentRestrictionAtTypeCache { - map: HashMap<(Name, CompositeTypeDefinitionPosition), Arc>, + map: IndexMap<(Name, CompositeTypeDefinitionPosition), Arc>, } impl FragmentRestrictionAtTypeCache { @@ -615,8 +645,8 @@ impl FragmentRestrictionAtTypeCache { // the lifetime does not really want to work out. // (&'cache mut self) -> Result<&'cache FragmentRestrictionAtType> match self.map.entry((fragment.name.clone(), ty.clone())) { - std::collections::hash_map::Entry::Occupied(entry) => Ok(Arc::clone(entry.get())), - std::collections::hash_map::Entry::Vacant(entry) => Ok(Arc::clone( + indexmap::map::Entry::Occupied(entry) => Ok(Arc::clone(entry.get())), + indexmap::map::Entry::Vacant(entry) => Ok(Arc::clone( entry.insert(Arc::new(fragment.expanded_selection_set_at_type(ty)?)), )), } @@ -665,7 +695,7 @@ impl Fragment { ty: &CompositeTypeDefinitionPosition, ) -> Result { let expanded_selection_set = self.selection_set.expand_all_fragments()?; - let normalized_selection_set = expanded_selection_set.flatten_unnecessary_fragments( + let selection_set = expanded_selection_set.flatten_unnecessary_fragments( ty, /*named_fragments*/ &Default::default(), &self.schema, @@ -677,7 +707,7 @@ impl Fragment { // Thus, we have to use the full validator in this case. (see // https://github.com/graphql/graphql-spec/issues/1085 for details.) return Ok(FragmentRestrictionAtType::new( - normalized_selection_set.clone(), + selection_set.clone(), Some(FieldsConflictValidator::from_selection_set( &expanded_selection_set, )), @@ -693,13 +723,11 @@ impl Fragment { // validator because we know the non-trimmed parts cannot create field conflict issues so // we're trying to build a smaller validator, but it's ok if trimmed is not as small as it // theoretically can be. - let trimmed = expanded_selection_set.minus(&normalized_selection_set)?; - let validator = trimmed - .is_empty() - .not() - .then(|| FieldsConflictValidator::from_selection_set(&trimmed)); + let trimmed = expanded_selection_set.minus(&selection_set)?; + let validator = + (!trimmed.is_empty()).then(|| FieldsConflictValidator::from_selection_set(&trimmed)); Ok(FragmentRestrictionAtType::new( - normalized_selection_set.clone(), + selection_set.clone(), validator, )) } @@ -721,10 +749,10 @@ impl Fragment { return false; } - self.selection_set.selections.iter().any(|(selection_key, _)| { + self.selection_set.selections.iter().any(|(_, selection)| { matches!( - selection_key, - SelectionKey::FragmentSpread {fragment_name, directives: _} if fragment_name == other_fragment_name, + selection, + Selection::FragmentSpread(fragment) if fragment.spread.fragment_name == *other_fragment_name ) }) } @@ -735,7 +763,7 @@ enum FullMatchingFragmentCondition<'a> { ForInlineFragmentSelection { // the type condition and directives on an inline fragment selection. type_condition_position: &'a CompositeTypeDefinitionPosition, - directives: &'a Arc, + directives: &'a DirectiveList, }, } @@ -782,7 +810,8 @@ enum SelectionSetOrFragment { } impl SelectionSet { - /// Reduce the list of applicable fragments by eliminating ones that are subsumed by another. + /// Reduce the list of applicable fragments by eliminating fragments that directly include + /// another fragment. // // We have found the list of fragments that applies to some subset of sub-selection. In // general, we want to now produce the selection set with spread for those fragments plus @@ -838,7 +867,7 @@ impl SelectionSet { ) { // Note: It's not possible for two fragments to include each other. So, we don't need to // worry about inclusion cycles. - let included_fragments: HashSet = applicable_fragments + let included_fragments: IndexSet = applicable_fragments .iter() .filter(|(fragment, _)| { applicable_fragments @@ -861,7 +890,7 @@ impl SelectionSet { fn try_apply_fragments( &self, parent_type: &CompositeTypeDefinitionPosition, - fragments: &NamedFragments, + context: &ReuseContext<'_>, validator: &mut FieldsConflictMultiBranchValidator, fragments_at_type: &mut FragmentRestrictionAtTypeCache, full_match_condition: FullMatchingFragmentCondition, @@ -873,7 +902,9 @@ impl SelectionSet { // fragment whose type _is_ the fragment condition (at which point, this // `can_apply_directly_at_type` method will apply. Also note that this is because we have // this restriction that calling `expanded_selection_set_at_type` is ok. - let candidates = fragments.get_all_may_apply_directly_at_type(parent_type); + let candidates = context + .fragments + .get_all_may_apply_directly_at_type(parent_type); // First, we check which of the candidates do apply inside the selection set, if any. If we // find a candidate that applies to the whole selection set, then we stop and only return @@ -888,6 +919,27 @@ impl SelectionSet { continue; } + // I don't love this, but fragments may introduce new fields to the operation, including + // fields that use variables that are not declared in the operation. There are two ways + // to work around this: adjusting the fragments so they only list the fields that we + // actually need, or excluding fragments that introduce variable references from reuse. + // The former would be ideal, as we would not execute more fields than required. It's + // also much trickier to do. The latter fixes this particular issue but leaves the + // output in a less than ideal state. + // The consideration here is: `generate_query_fragments` has significant advantages + // over fragment reuse, and so we do not want to invest a lot of time into improving + // fragment reuse. We do the simple, less-than-ideal thing. + if let Some(variable_definitions) = &context.operation_variables { + let fragment_variables = candidate.used_variables(); + if fragment_variables + .difference(variable_definitions) + .next() + .is_some() + { + continue; + } + } + // As we check inclusion, we ignore the case where the fragment queries __typename // but the `self` does not. The rational is that querying `__typename` // unnecessarily is mostly harmless (it always works and it's super cheap) so we @@ -1080,21 +1132,19 @@ impl NamedFragments { selection_set: &SelectionSet, min_usage_to_optimize: u32, ) -> Result { - let min_usage_to_optimize: i32 = min_usage_to_optimize.try_into().unwrap_or(i32::MAX); - // Call `reduce_inner` repeatedly until we reach a fix-point, since newly computed // selection set may drop some fragment references due to normalization, which could lead // to further reduction. // - It is hard to avoid this chain reaction, since we need to account for the effects of // normalization. - let mut last_size = self.size(); + let mut last_size = self.len(); let mut last_selection_set = selection_set.clone(); while last_size > 0 { let new_selection_set = self.reduce_inner(&last_selection_set, min_usage_to_optimize)?; // Reached a fix-point => stop - if self.size() == last_size { + if self.len() == last_size { // Assumes that `new_selection_set` is the same as `last_selection_set` in this // case. break; @@ -1113,27 +1163,23 @@ impl NamedFragments { // case without additional complexity. // Prepare the next iteration - last_size = self.size(); + last_size = self.len(); last_selection_set = new_selection_set; } Ok(last_selection_set) } /// The inner loop body of `reduce` method. - /// - Takes i32 `min_usage_to_optimize` since `collect_used_fragment_names` counts usages in - /// i32. fn reduce_inner( &mut self, selection_set: &SelectionSet, - min_usage_to_optimize: i32, + min_usage_to_optimize: u32, ) -> Result { - // Initial computation of fragment usages in `selection_set`. - let mut usages = HashMap::new(); - selection_set.collect_used_fragment_names(&mut usages); + let mut usages = selection_set.used_fragments(); // Short-circuiting: Nothing was used => Drop everything (selection_set is unchanged). if usages.is_empty() { - self.retain(|_, _| false); + *self = Default::default(); return Ok(selection_set.clone()); } @@ -1149,7 +1195,7 @@ impl NamedFragments { // - We take advantage of the fact that `NamedFragments` is already sorted in dependency // order. // PORT_NOTE: The `computeFragmentsToKeep` function is implemented here. - let original_size = self.size(); + let original_size = self.len(); for fragment in self.iter_rev() { let usage_count = usages.get(&fragment.name).copied().unwrap_or_default(); if usage_count >= min_usage_to_optimize { @@ -1167,7 +1213,7 @@ impl NamedFragments { }); // Short-circuiting: Nothing was dropped (fully used) => Nothing to change. - if self.size() == original_size { + if self.len() == original_size { return Ok(selection_set.clone()); } @@ -1204,8 +1250,12 @@ impl NamedFragments { ) } - fn update_usages(usages: &mut HashMap, fragment: &Node, usage_count: i32) { - let mut inner_usages = HashMap::new(); + fn update_usages( + usages: &mut IndexMap, + fragment: &Node, + usage_count: u32, + ) { + let mut inner_usages = IndexMap::default(); fragment.collect_used_fragment_names(&mut inner_usages); for (name, inner_count) in inner_usages { @@ -1220,17 +1270,17 @@ impl NamedFragments { impl Selection { fn reuse_fragments_inner( &self, - fragments: &NamedFragments, + context: &ReuseContext<'_>, validator: &mut FieldsConflictMultiBranchValidator, fragments_at_type: &mut FragmentRestrictionAtTypeCache, ) -> Result { match self { Selection::Field(field) => Ok(field - .reuse_fragments_inner(fragments, validator, fragments_at_type)? + .reuse_fragments_inner(context, validator, fragments_at_type)? .into()), Selection::FragmentSpread(_) => Ok(self.clone()), // Do nothing Selection::InlineFragment(inline_fragment) => Ok(inline_fragment - .reuse_fragments_inner(fragments, validator, fragments_at_type)? + .reuse_fragments_inner(context, validator, fragments_at_type)? .into()), } } @@ -1239,7 +1289,7 @@ impl Selection { impl FieldSelection { fn reuse_fragments_inner( &self, - fragments: &NamedFragments, + context: &ReuseContext<'_>, validator: &mut FieldsConflictMultiBranchValidator, fragments_at_type: &mut FragmentRestrictionAtTypeCache, ) -> Result { @@ -1257,28 +1307,24 @@ impl FieldSelection { // First, see if we can reuse fragments for the selection of this field. let opt = selection_set.try_apply_fragments( &base_composite_type, - fragments, + context, &mut field_validator, fragments_at_type, FullMatchingFragmentCondition::ForFieldSelection, )?; - let mut optimized; - match opt { + let mut optimized = match opt { SelectionSetOrFragment::Fragment(fragment) => { let fragment_selection = FragmentSpreadSelection::from_fragment( &fragment, /*directives*/ &Default::default(), ); - optimized = - SelectionSet::from_selection(base_composite_type, fragment_selection.into()); + SelectionSet::from_selection(base_composite_type, fragment_selection.into()) } - SelectionSetOrFragment::SelectionSet(selection_set) => { - optimized = selection_set; - } - } + SelectionSetOrFragment::SelectionSet(selection_set) => selection_set, + }; optimized = - optimized.reuse_fragments_inner(fragments, &mut field_validator, fragments_at_type)?; + optimized.reuse_fragments_inner(context, &mut field_validator, fragments_at_type)?; Ok(self.with_updated_selection_set(Some(optimized))) } } @@ -1303,17 +1349,17 @@ impl From for Selection { impl InlineFragmentSelection { fn reuse_fragments_inner( &self, - fragments: &NamedFragments, + context: &ReuseContext<'_>, validator: &mut FieldsConflictMultiBranchValidator, fragments_at_type: &mut FragmentRestrictionAtTypeCache, ) -> Result { - let mut optimized = self.selection_set.clone(); + let optimized; let type_condition_position = &self.inline_fragment.type_condition_position; if let Some(type_condition_position) = type_condition_position { let opt = self.selection_set.try_apply_fragments( type_condition_position, - fragments, + context, validator, fragments_at_type, FullMatchingFragmentCondition::ForInlineFragmentSelection { @@ -1358,34 +1404,37 @@ impl InlineFragmentSelection { ) .into(), ); - // fall-through } } SelectionSetOrFragment::SelectionSet(selection_set) => { optimized = selection_set; - // fall-through } } + } else { + optimized = self.selection_set.clone(); } - // Then, recurse inside the field sub-selection (note that if we matched some fragments - // above, this recursion will "ignore" those as `FragmentSpreadSelection`'s - // `reuse_fragments()` is a no-op). - optimized = optimized.reuse_fragments_inner(fragments, validator, fragments_at_type)?; - Ok(InlineFragmentSelection::new(self.inline_fragment.clone(), optimized).into()) + Ok(InlineFragmentSelection::new( + self.inline_fragment.clone(), + // Then, recurse inside the field sub-selection (note that if we matched some fragments + // above, this recursion will "ignore" those as `FragmentSpreadSelection`'s + // `reuse_fragments()` is a no-op). + optimized.reuse_fragments_inner(context, validator, fragments_at_type)?, + ) + .into()) } } impl SelectionSet { fn reuse_fragments_inner( &self, - fragments: &NamedFragments, + context: &ReuseContext<'_>, validator: &mut FieldsConflictMultiBranchValidator, fragments_at_type: &mut FragmentRestrictionAtTypeCache, ) -> Result { - self.lazy_map(fragments, |selection| { + self.lazy_map(context.fragments, |selection| { Ok(selection - .reuse_fragments_inner(fragments, validator, fragments_at_type)? + .reuse_fragments_inner(context, validator, fragments_at_type)? .into()) }) } @@ -1402,16 +1451,16 @@ impl SelectionSet { /// ## Errors /// Returns an error if the selection set contains a named fragment spread. - fn reuse_fragments(&mut self, fragments: &NamedFragments) -> Result<(), FederationError> { - if fragments.is_empty() { + fn reuse_fragments(&mut self, context: &ReuseContext<'_>) -> Result<(), FederationError> { + if context.fragments.is_empty() { return Ok(()); } if self.contains_fragment_spread() { - return Err(FederationError::internal("optimize() must only be used on selection sets that do not contain named fragment spreads")); + return Err(FederationError::internal("reuse_fragments() must only be used on selection sets that do not contain named fragment spreads")); } - // Calling optimize() will not match a fragment that would have expanded at + // Calling reuse_fragments() will not match a fragment that would have expanded at // top-level. That is, say we have the selection set `{ x y }` for a top-level `Query`, and // we have a fragment // ``` @@ -1420,12 +1469,12 @@ impl SelectionSet { // y // } // ``` - // then calling `self.optimize(fragments)` would only apply check if F apply to + // then calling `self.reuse_fragments(fragments)` would only apply check if F apply to // `x` and then `y`. // // To ensure the fragment match in this case, we "wrap" the selection into a trivial // fragment of the selection parent, so in the example above, we create selection `... on - // Query { x y}`. With that, `optimize` will correctly match on the `on Query` + // Query { x y }`. With that, `reuse_fragments` will correctly match on the `on Query` // fragment; after which we can unpack the final result. let wrapped = InlineFragmentSelection::from_selection_set( self.type_position.clone(), // parent type @@ -1436,7 +1485,7 @@ impl SelectionSet { FieldsConflictValidator::from_selection_set(self), ); let optimized = wrapped.reuse_fragments_inner( - fragments, + context, &mut validator, &mut FragmentRestrictionAtTypeCache::default(), )?; @@ -1456,7 +1505,7 @@ impl SelectionSet { } impl Operation { - // PORT_NOTE: The JS version of `optimize` takes an optional `minUsagesToOptimize` argument. + // PORT_NOTE: The JS version of `reuse_fragments` takes an optional `minUsagesToOptimize` argument. // However, it's only used in tests. So, it's removed in the Rust version. const DEFAULT_MIN_USAGES_TO_OPTIMIZE: u32 = 2; @@ -1473,7 +1522,8 @@ impl Operation { // Optimize the operation's selection set by re-using existing fragments. let before_optimization = self.selection_set.clone(); - self.selection_set.reuse_fragments(fragments)?; + self.selection_set + .reuse_fragments(&ReuseContext::for_operation(fragments, &self.variables))?; if before_optimization == self.selection_set { return Ok(()); } @@ -1501,6 +1551,25 @@ impl Operation { self.reuse_fragments_inner(fragments, Self::DEFAULT_MIN_USAGES_TO_OPTIMIZE) } + /// Optimize the parsed size of the operation by generating fragments based on the selections + /// in the operation. + pub(crate) fn generate_fragments(&mut self) -> Result<(), FederationError> { + // Currently, this method simply pulls out every inline fragment into a named fragment. If + // multiple inline fragments are the same, they use the same named fragment. + // + // This method can generate named fragments that are only used once. It's not ideal, but it + // also doesn't seem that bad. Avoiding this is possible but more work, and keeping this + // as simple as possible is a big benefit for now. + // + // When we have more advanced correctness testing, we can add more features to fragment + // generation, like factoring out partial repeated slices of selection sets or only + // introducing named fragments for patterns that occur more than once. + let mut generator = FragmentGenerator::default(); + generator.visit_selection_set(&mut self.selection_set)?; + self.named_fragments = generator.into_inner(); + Ok(()) + } + /// Used by legacy roundtrip tests. /// - This lowers `min_usages_to_optimize` to `1` in order to make it easier to write unit tests. #[cfg(test)] @@ -1531,6 +1600,197 @@ impl Operation { } } +/// Returns a consistent GraphQL name for the given index. +fn fragment_name(mut index: usize) -> Name { + /// https://spec.graphql.org/draft/#NameContinue + const NAME_CHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"; + /// https://spec.graphql.org/draft/#NameStart + const NAME_START_CHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"; + + if index < NAME_START_CHARS.len() { + Name::new_static_unchecked(&NAME_START_CHARS[index..index + 1]) + } else { + let mut s = String::new(); + + let i = index % NAME_START_CHARS.len(); + s.push(NAME_START_CHARS.as_bytes()[i].into()); + index /= NAME_START_CHARS.len(); + + while index > 0 { + let i = index % NAME_CHARS.len(); + s.push(NAME_CHARS.as_bytes()[i].into()); + index /= NAME_CHARS.len(); + } + + Name::new_unchecked(&s) + } +} + +#[derive(Debug, Default)] +struct FragmentGenerator { + fragments: NamedFragments, + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + names: IndexMap<(String, usize), usize>, +} + +impl FragmentGenerator { + fn next_name(&self) -> Name { + fragment_name(self.fragments.len()) + } + + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + // In the future, we will just use `.next_name()`. + fn generate_name(&mut self, frag: &InlineFragmentSelection) -> Name { + use std::fmt::Write as _; + + let type_condition = frag + .inline_fragment + .type_condition_position + .as_ref() + .map_or_else( + || "undefined".to_string(), + |condition| condition.to_string(), + ); + let selections = frag.selection_set.selections.len(); + let mut name = format!("_generated_on{type_condition}{selections}"); + + let key = (type_condition, selections); + let index = self + .names + .entry(key) + .and_modify(|index| *index += 1) + .or_default(); + _ = write!(&mut name, "_{index}"); + + Name::new_unchecked(&name) + } + + /// Is a selection set worth using for a newly generated named fragment? + fn is_worth_using(selection_set: &SelectionSet) -> bool { + let mut iter = selection_set.iter(); + let Some(first) = iter.next() else { + // An empty selection is not worth using (and invalid!) + return false; + }; + let Selection::Field(field) = first else { + return true; + }; + // If there's more than one selection, or one selection with a subselection, + // it's probably worth using + iter.next().is_some() || field.selection_set.is_some() + } + + /// Modify the selection set so that eligible inline fragments are moved to named fragment spreads. + fn visit_selection_set( + &mut self, + selection_set: &mut SelectionSet, + ) -> Result<(), FederationError> { + let mut new_selection_set = SelectionSet::empty( + selection_set.schema.clone(), + selection_set.type_position.clone(), + ); + + for (_key, selection) in Arc::make_mut(&mut selection_set.selections).iter_mut() { + match selection { + SelectionValue::Field(mut field) => { + if let Some(selection_set) = field.get_selection_set_mut() { + self.visit_selection_set(selection_set)?; + } + new_selection_set + .add_local_selection(&Selection::Field(Arc::clone(field.get())))?; + } + SelectionValue::FragmentSpread(frag) => { + new_selection_set + .add_local_selection(&Selection::FragmentSpread(Arc::clone(frag.get())))?; + } + SelectionValue::InlineFragment(frag) + if !Self::is_worth_using(&frag.get().selection_set) => + { + new_selection_set + .add_local_selection(&Selection::InlineFragment(Arc::clone(frag.get())))?; + } + SelectionValue::InlineFragment(mut candidate) => { + self.visit_selection_set(candidate.get_selection_set_mut())?; + + let directives = &candidate.get().inline_fragment.directives; + let skip_include = directives + .iter() + .map(|directive| match directive.name.as_str() { + "skip" | "include" => Ok(directive.clone()), + _ => Err(()), + }) + .collect::>(); + + // If there are any directives *other* than @skip and @include, + // we can't just transfer them to the generated fragment spread, + // so we have to keep this inline fragment. + let Ok(skip_include) = skip_include else { + new_selection_set.add_local_selection(&Selection::InlineFragment( + Arc::clone(candidate.get()), + ))?; + continue; + }; + + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + // JS does not special-case @skip and @include. It never extracts a fragment if + // there's any directives on it. This code duplicates the body from the + // previous condition so it's very easy to remove when we're ready :) + if !skip_include.is_empty() { + new_selection_set.add_local_selection(&Selection::InlineFragment( + Arc::clone(candidate.get()), + ))?; + continue; + } + + let existing = self.fragments.iter().find(|existing| { + existing.type_condition_position + == candidate.get().inline_fragment.casted_type() + && existing.selection_set == candidate.get().selection_set + }); + + let existing = if let Some(existing) = existing { + existing + } else { + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + // This should be reverted to `self.next_name();` when we're ready. + let name = self.generate_name(candidate.get()); + self.fragments.insert(Fragment { + schema: selection_set.schema.clone(), + name: name.clone(), + type_condition_position: candidate.get().inline_fragment.casted_type(), + directives: Default::default(), + selection_set: candidate.get().selection_set.clone(), + }); + self.fragments.get(&name).unwrap() + }; + new_selection_set.add_local_selection(&Selection::from( + FragmentSpreadSelection { + spread: FragmentSpread::new(FragmentSpreadData { + schema: selection_set.schema.clone(), + fragment_name: existing.name.clone(), + type_condition_position: existing.type_condition_position.clone(), + directives: skip_include.into(), + fragment_directives: existing.directives.clone(), + selection_id: crate::operation::SelectionId::new(), + }), + selection_set: existing.selection_set.clone(), + }, + ))?; + } + } + } + + *selection_set = new_selection_set; + + Ok(()) + } + + /// Consumes the generator and returns the fragments it generated. + fn into_inner(self) -> NamedFragments { + self.fragments + } +} + //============================================================================= // Tests @@ -1558,6 +1818,13 @@ mod tests { }}; } + #[test] + fn generated_fragment_names() { + assert_eq!(fragment_name(0), "a"); + assert_eq!(fragment_name(100), "Vb"); + assert_eq!(fragment_name(usize::MAX), "oS5Uz8g3Iqw"); + } + #[test] fn duplicate_fragment_spreads_after_fragment_expansion() { // This is a regression test for FED-290, making sure `make_select` method can handle @@ -2939,8 +3206,8 @@ mod tests { /// #[test] - #[should_panic(expected = "directive cannot be used on FRAGMENT_DEFINITION")] - // TODO: Investigate this restriction on query document in Rust version. + #[should_panic(expected = "directive is not supported for FRAGMENT_DEFINITION")] + // XXX(@goto-bus-stop): this test does not make sense, we should remove this feature fn reuse_fragments_with_same_directive_on_the_fragment() { let schema_doc = r#" type Query { @@ -3148,6 +3415,89 @@ mod tests { "###); } + #[test] + fn reuse_fragments_with_non_intersecting_types() { + let schema = r#" + type Query { + t: T + s: S + s2: S + i: I + } + + interface I { + a: Int + b: Int + } + + type T implements I { + a: Int + b: Int + + c: Int + d: Int + } + type S implements I { + a: Int + b: Int + + f: Int + g: Int + } + "#; + let query = r#" + query A ($if: Boolean!) { + t { ...x } + s { ...x } + i { ...x } + } + query B { + s { + # this matches fragment x once it is flattened, + # because the `...on T` condition does not intersect with our + # current type `S` + __typename + a b + } + s2 { + # same snippet to get it to use the fragment + __typename + a b + } + } + fragment x on I { + __typename + a + b + ... on T { c d @include(if: $if) } + } + "#; + let schema = parse_schema(schema); + let query = ExecutableDocument::parse_and_validate(schema.schema(), query, "query.graphql") + .unwrap(); + + let operation_a = + Operation::from_operation_document(schema.clone(), &query, Some("A")).unwrap(); + let operation_b = + Operation::from_operation_document(schema.clone(), &query, Some("B")).unwrap(); + let expanded_b = operation_b.expand_all_fragments_and_normalize().unwrap(); + + assert_optimized!(expanded_b, operation_a.named_fragments, @r###" + query B { + s { + __typename + a + b + } + s2 { + __typename + a + b + } + } + "###); + } + /// /// empty branches removal /// @@ -3156,6 +3506,7 @@ mod tests { use apollo_compiler::name; use super::*; + use crate::operation::SelectionKey; const TEST_SCHEMA_FOR_EMPTY_BRANCH_REMOVAL: &str = r#" type Query { diff --git a/apollo-federation/src/operation/rebase.rs b/apollo-federation/src/operation/rebase.rs index 858d931b3f..99c770aa74 100644 --- a/apollo-federation/src/operation/rebase.rs +++ b/apollo-federation/src/operation/rebase.rs @@ -403,7 +403,7 @@ impl FragmentSpread { &self.schema, ) { Ok(FragmentSpread::new(FragmentSpreadData::from_fragment( - &named_fragment, + named_fragment, &self.directives, ))) } else { @@ -500,7 +500,7 @@ impl FragmentSpreadSelection { } let spread = FragmentSpread::new(FragmentSpreadData::from_fragment( - &named_fragment, + named_fragment, &self.spread.directives, )); Ok(FragmentSpreadSelection { @@ -1261,7 +1261,7 @@ type T { assert!(rebased_fragments.is_ok()); let rebased_fragments = rebased_fragments.unwrap(); // F1 reduces to nothing, and F2 reduces to just __typename so we shouldn't keep them. - assert_eq!(1, rebased_fragments.size()); + assert_eq!(1, rebased_fragments.len()); assert!(rebased_fragments.contains(&name!("F3"))); let rebased_fragment = rebased_fragments.fragments.get("F3").unwrap(); @@ -1337,7 +1337,7 @@ type T { assert!(rebased_fragments.is_ok()); let rebased_fragments = rebased_fragments.unwrap(); // F1 reduces to nothing, and F2 reduces to just __typename so we shouldn't keep them. - assert_eq!(1, rebased_fragments.size()); + assert_eq!(1, rebased_fragments.len()); assert!(rebased_fragments.contains(&name!("TheQuery"))); let rebased_fragment = rebased_fragments.fragments.get("TheQuery").unwrap(); @@ -1414,7 +1414,7 @@ type T { assert!(rebased_fragments.is_ok()); let rebased_fragments = rebased_fragments.unwrap(); // F1 reduces to nothing, and F2 reduces to just __typename so we shouldn't keep them. - assert_eq!(1, rebased_fragments.size()); + assert_eq!(1, rebased_fragments.len()); assert!(rebased_fragments.contains(&name!("TQuery"))); let rebased_fragment = rebased_fragments.fragments.get("TQuery").unwrap(); diff --git a/apollo-federation/src/operation/simplify.rs b/apollo-federation/src/operation/simplify.rs index 95ae8ad243..8555b241a2 100644 --- a/apollo-federation/src/operation/simplify.rs +++ b/apollo-federation/src/operation/simplify.rs @@ -2,9 +2,9 @@ use std::sync::Arc; use apollo_compiler::executable; use apollo_compiler::name; -use apollo_compiler::Node; use super::runtime_types_intersect; +use super::DirectiveList; use super::Field; use super::FieldData; use super::FieldSelection; @@ -83,22 +83,18 @@ impl FieldSelection { // sub-selection is empty. Which suggest something may be wrong with this part of the query // intent, but the query was valid while keeping an empty sub-selection isn't. So in that // case, we just add some "non-included" __typename field just to keep the query valid. - let directives = - executable::DirectiveList(vec![Node::new(executable::Directive { - name: name!("include"), - arguments: vec![Node::new(executable::Argument { - name: name!("if"), - value: Node::new(executable::Value::Boolean(false)), - })], - })]); + let directives = DirectiveList::one(executable::Directive { + name: name!("include"), + arguments: vec![(name!("if"), false).into()], + }); let non_included_typename = Selection::from_field( Field::new(FieldData { schema: schema.clone(), field_position: field_composite_type_position .introspection_typename_field(), alias: None, - arguments: Arc::new(vec![]), - directives: Arc::new(directives), + arguments: Default::default(), + directives, sibling_typename: None, }), None, @@ -158,12 +154,12 @@ impl InlineFragmentSelection { named_fragments: &NamedFragments, schema: &ValidFederationSchema, ) -> Result, FederationError> { - let this_condition = self.inline_fragment.type_condition_position.clone(); + let this_condition = self.inline_fragment.type_condition_position.as_ref(); // This method assumes by contract that `parent_type` runtimes intersects `self.inline_fragment.parent_type_position`'s, // but `parent_type` runtimes may be a subset. So first check if the selection should not be discarded on that account (that // is, we should not keep the selection if its condition runtimes don't intersect at all with those of // `parent_type` as that would ultimately make an invalid selection set). - if let Some(ref type_condition) = this_condition { + if let Some(type_condition) = this_condition { if (self.inline_fragment.schema != *schema || self.inline_fragment.parent_type_position != *parent_type) && !runtime_types_intersect(type_condition, parent_type, schema) @@ -182,7 +178,7 @@ impl InlineFragmentSelection { // cannot be restricting things further (it's typically a less precise interface/union). let useless_fragment = match this_condition { None => true, - Some(ref c) => self.inline_fragment.schema == *schema && c == parent_type, + Some(c) => self.inline_fragment.schema == *schema && c == parent_type, }; if useless_fragment || parent_type.is_object_type() { // Try to skip this fragment and flatten_unnecessary_fragments self.selection_set with `parent_type`, @@ -224,14 +220,10 @@ impl InlineFragmentSelection { // We should be able to rebase, or there is a bug, so error if that is the case. // If we rebased successfully then we add "non-included" __typename field selection // just to keep the query valid. - let directives = - executable::DirectiveList(vec![Node::new(executable::Directive { - name: name!("include"), - arguments: vec![Node::new(executable::Argument { - name: name!("if"), - value: Node::new(executable::Value::Boolean(false)), - })], - })]); + let directives = DirectiveList::one(executable::Directive { + name: name!("include"), + arguments: vec![(name!("if"), false).into()], + }); let parent_typename_field = if let Some(condition) = this_condition { condition.introspection_typename_field() } else { @@ -242,8 +234,8 @@ impl InlineFragmentSelection { schema: schema.clone(), field_position: parent_typename_field, alias: None, - arguments: Arc::new(vec![]), - directives: Arc::new(directives), + arguments: Default::default(), + directives, sibling_typename: None, }), None, diff --git a/apollo-federation/src/operation/tests/mod.rs b/apollo-federation/src/operation/tests/mod.rs index b2081d0bcc..ed69e54d71 100644 --- a/apollo-federation/src/operation/tests/mod.rs +++ b/apollo-federation/src/operation/tests/mod.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use apollo_compiler::collections::IndexSet; use apollo_compiler::name; use apollo_compiler::schema::Schema; @@ -40,27 +38,7 @@ pub(super) fn parse_schema(schema_doc: &str) -> ValidFederationSchema { } pub(super) fn parse_operation(schema: &ValidFederationSchema, query: &str) -> Operation { - let executable_document = apollo_compiler::ExecutableDocument::parse_and_validate( - schema.schema(), - query, - "query.graphql", - ) - .unwrap(); - let operation = executable_document.operations.get(None).unwrap(); - let named_fragments = NamedFragments::new(&executable_document.fragments, schema); - let selection_set = - SelectionSet::from_selection_set(&operation.selection_set, &named_fragments, schema) - .unwrap(); - - Operation { - schema: schema.clone(), - root_kind: operation.operation_type.into(), - name: operation.name.clone(), - variables: Arc::new(operation.variables.clone()), - directives: Arc::new(operation.directives.clone()), - selection_set, - named_fragments, - } + Operation::parse(schema.clone(), query, "query.graphql", None).unwrap() } /// Parse and validate the query similarly to `parse_operation`, but does not construct the @@ -1614,7 +1592,6 @@ fn used_variables() { let mut variables = operation .selection_set .used_variables() - .unwrap() .into_iter() .collect::>(); variables.sort(); @@ -1633,7 +1610,6 @@ fn used_variables() { .as_ref() .unwrap() .used_variables() - .unwrap() .into_iter() .collect::>(); variables.sort(); diff --git a/apollo-federation/src/query_graph/build_query_graph.rs b/apollo-federation/src/query_graph/build_query_graph.rs index 929e4b8d0e..3dd7abbcd6 100644 --- a/apollo-federation/src/query_graph/build_query_graph.rs +++ b/apollo-federation/src/query_graph/build_query_graph.rs @@ -21,7 +21,6 @@ use crate::link::federation_spec_definition::KeyDirectiveArguments; use crate::operation::merge_selection_sets; use crate::operation::Selection; use crate::operation::SelectionSet; -use crate::query_graph::extract_subgraphs_from_supergraph::extract_subgraphs_from_supergraph; use crate::query_graph::QueryGraph; use crate::query_graph::QueryGraphEdge; use crate::query_graph::QueryGraphEdgeTransition; @@ -41,6 +40,7 @@ use crate::schema::position::SchemaRootDefinitionPosition; use crate::schema::position::TypeDefinitionPosition; use crate::schema::position::UnionTypeDefinitionPosition; use crate::schema::ValidFederationSchema; +use crate::supergraph::extract_subgraphs_from_supergraph; /// Builds a "federated" query graph based on the provided supergraph and API schema. /// @@ -1339,8 +1339,9 @@ impl FederatedQueryGraphBuilder { let application = subgraph_data .federation_spec_definition .requires_directive_arguments(directive)?; + // @requires field set is validated against the supergraph let conditions = parse_field_set( - schema, + &self.supergraph_schema, field_definition_position.parent().type_name().clone(), application.fields, )?; @@ -1902,7 +1903,8 @@ impl FederatedQueryGraphBuilder { } .into()); }; - if conditions.selections == followup_conditions.selections { + + if conditions == followup_conditions { continue; } } @@ -1926,7 +1928,7 @@ impl FederatedQueryGraphBuilder { // since we can do "start of query" -> C and that's always better. if matches!( followup_edge_weight.transition, - QueryGraphEdgeTransition::SubgraphEnteringTransition + QueryGraphEdgeTransition::RootTypeResolution { .. } ) { continue; } diff --git a/apollo-federation/src/query_graph/graph_path.rs b/apollo-federation/src/query_graph/graph_path.rs index b65736311d..b9486f8434 100644 --- a/apollo-federation/src/query_graph/graph_path.rs +++ b/apollo-federation/src/query_graph/graph_path.rs @@ -11,7 +11,6 @@ use std::sync::Arc; use apollo_compiler::ast::Value; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; -use apollo_compiler::executable::DirectiveList; use itertools::Itertools; use petgraph::graph::EdgeIndex; use petgraph::graph::NodeIndex; @@ -30,6 +29,7 @@ use crate::link::graphql_definition::BooleanOrVariable; use crate::link::graphql_definition::DeferDirectiveArguments; use crate::link::graphql_definition::OperationConditional; use crate::link::graphql_definition::OperationConditionalKind; +use crate::operation::DirectiveList; use crate::operation::Field; use crate::operation::FieldData; use crate::operation::HasSelectionKey; @@ -227,6 +227,9 @@ pub(crate) struct SubgraphEnteringEdgeInfo { /// Note that we shouldn't add `derive(Serialize, Deserialize)` to this without changing the types /// to be something like UUIDs. #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +// NOTE(@TylerBloom): This feature gate can be removed once the condition in the comment above is +// met. +#[cfg_attr(feature = "snapshot_tracing", derive(serde::Serialize))] pub(crate) struct OverrideId(usize); /// Global storage for the counter used to allocate `OverrideId`s. @@ -307,7 +310,7 @@ impl HasSelectionKey for OpPathElement { } impl OpPathElement { - pub(crate) fn directives(&self) -> &Arc { + pub(crate) fn directives(&self) -> &DirectiveList { match self { OpPathElement::Field(field) => &field.directives, OpPathElement::InlineFragment(inline_fragment) => &inline_fragment.directives, @@ -424,6 +427,7 @@ impl OpPathElement { match self { Self::Field(_) => Some(self.clone()), // unchanged Self::InlineFragment(inline_fragment) => { + // TODO(@goto-bus-stop): is this not exactly the wrong way around? let updated_directives: DirectiveList = inline_fragment .directives .get_all("defer") @@ -877,6 +881,11 @@ where let mut edges = self.edges.clone(); let mut edge_triggers = self.edge_triggers.clone(); let mut edge_conditions = self.edge_conditions.clone(); + let mut last_subgraph_entering_edge_info = if defer.is_none() { + self.last_subgraph_entering_edge_info.clone() + } else { + None + }; let Some(new_edge) = edge.into() else { edges.push(edge); @@ -892,11 +901,7 @@ where // We clear `last_subgraph_entering_edge_info` as we enter a `@defer`. That is // because `last_subgraph_entering_edge_info` is used to eliminate some non-optimal // paths, but we don't want those optimizations to bypass a `@defer` application. - last_subgraph_entering_edge_info: if defer.is_some() { - None - } else { - self.last_subgraph_entering_edge_info.clone() - }, + last_subgraph_entering_edge_info, own_path_ids: self.own_path_ids.clone(), overriding_path_ids: self.overriding_path_ids.clone(), runtime_types_of_tail: Arc::new( @@ -1074,6 +1079,12 @@ where edges.push(edge); edge_triggers.push(Arc::new(trigger)); edge_conditions.push(condition_path_tree); + if defer.is_none() && self.graph.is_cross_subgraph_edge(new_edge)? { + last_subgraph_entering_edge_info = Some(SubgraphEnteringEdgeInfo { + index: self.edges.len() - 1, + conditions_cost: condition_cost, + }); + } return Ok(GraphPath { graph: self.graph.clone(), head: self.head, @@ -1086,16 +1097,7 @@ where // // PORT_NOTE: In the JS codebase, the information for the last subgraph-entering // is set incorrectly, in that the index is off by one. We fix that bug here. - last_subgraph_entering_edge_info: if defer.is_none() - && self.graph.is_cross_subgraph_edge(new_edge)? - { - Some(SubgraphEnteringEdgeInfo { - index: self.edges.len() - 1, - conditions_cost: condition_cost, - }) - } else { - None - }, + last_subgraph_entering_edge_info, own_path_ids: self.own_path_ids.clone(), overriding_path_ids: self.overriding_path_ids.clone(), runtime_types_of_tail: Arc::new(self.graph.advance_possible_runtime_types( @@ -1112,6 +1114,12 @@ where edges.push(edge); edge_triggers.push(Arc::new(trigger)); edge_conditions.push(condition_path_tree); + if defer.is_none() && self.graph.is_cross_subgraph_edge(new_edge)? { + last_subgraph_entering_edge_info = Some(SubgraphEnteringEdgeInfo { + index: self.edges.len(), + conditions_cost: condition_cost, + }); + } Ok(GraphPath { graph: self.graph.clone(), head: self.head, @@ -1121,16 +1129,7 @@ where edge_conditions, // Again, we don't want to set `last_subgraph_entering_edge_info` if we're entering a // `@defer` (see above). - last_subgraph_entering_edge_info: if defer.is_none() - && self.graph.is_cross_subgraph_edge(new_edge)? - { - Some(SubgraphEnteringEdgeInfo { - index: self.edges.len(), - conditions_cost: condition_cost, - }) - } else { - None - }, + last_subgraph_entering_edge_info, own_path_ids: self.own_path_ids.clone(), overriding_path_ids: self.overriding_path_ids.clone(), runtime_types_of_tail: Arc::new( @@ -1458,10 +1457,12 @@ where heap.push(HeapElement(self.clone())); while let Some(HeapElement(to_advance)) = heap.pop() { - let span = debug_span!("From {to_advance:?}"); + debug!("From {to_advance:?}"); + let span = debug_span!(" |"); let _guard = span.enter(); for edge in to_advance.next_edges()? { - let span = debug_span!("Testing edge {edge:?}"); + debug!("Testing edge {edge:?}"); + let span = debug_span!(" |"); let _guard = span.enter(); let edge_weight = self.graph.edge_weight(edge)?; if edge_weight.transition.collect_operation_elements() { @@ -1540,7 +1541,8 @@ where continue; } - let span = debug_span!("Validating conditions {edge_weight}"); + debug!("Validating conditions {edge_weight}"); + let span = debug_span!(" |"); let guard = span.enter(); // As we validate the condition for this edge, it might be necessary to jump to // another subgraph, but if for that we need to jump to the same subgraph we're @@ -1656,7 +1658,7 @@ where let last_subgraph_entering_edge_head_weight = self.graph.node_weight(last_subgraph_entering_edge_head)?; last_subgraph_entering_edge_head_weight.source - == last_subgraph_entering_edge_tail_weight.source + == edge_tail_weight.source }; let direct_path_end_node = @@ -1668,7 +1670,7 @@ where "Edge tail is unexpectedly a federated root", )); }; - self.check_direct_path_from_node( + to_advance.check_direct_path_from_node( last_subgraph_entering_edge_info.index + 1, direct_path_start_node, edge_tail_type_pos, @@ -1747,7 +1749,6 @@ where edge_tail_weight.source.clone(), Some((updated_path.clone(), cost)), ); - debug!("Using edge, advance path: {updated_path:?}"); // It can be necessary to "chain" keys, because different subgraphs may have // different keys exposed, and so we when we took a key, we want to check if // there is a new key we can now use that takes us to other subgraphs. For other @@ -2628,10 +2629,9 @@ impl OpGraphPath { debug!("Casting into requested type {field_parent_pos}"); Arc::new(IndexSet::from_iter([field_parent_pos.clone()])) } else { - if interface_path.is_some() { - debug!("No direct edge: type exploding interface {tail_weight} into possible runtime types {:?}", self.runtime_types_of_tail); - } else { - debug!("Type exploding interface {tail_weight} into possible runtime types {:?} as 2nd option", self.runtime_types_of_tail); + match &interface_path { + Some(_) => debug!("No direct edge: type exploding interface {tail_weight} into possible runtime types {:?}", self.runtime_types_of_tail), + None => debug!("Type exploding interface {tail_weight} into possible runtime types {:?} as 2nd option", self.runtime_types_of_tail), } self.runtime_types_of_tail.clone() }; @@ -2641,8 +2641,8 @@ impl OpGraphPath { // any gives us empty options, we bail. let mut options_for_each_implementation = vec![]; for implementation_type_pos in implementations.as_ref() { - let span = - debug_span!("Handling implementation {implementation_type_pos}"); + debug!("Handling implementation {implementation_type_pos}"); + let span = debug_span!(" |"); let guard = span.enter(); let implementation_inline_fragment = InlineFragment::new(InlineFragmentData { @@ -3091,7 +3091,7 @@ impl OpGraphPath { // account (it may very well be that whatever comes after `u` is not in `A`, for instance). let self_tail_weight = self.graph.node_weight(self.tail)?; let other_tail_weight = self.graph.node_weight(other.tail)?; - if self_tail_weight.source == other_tail_weight.source { + if self_tail_weight.source != other_tail_weight.source { // As described above, we want to know if one of the paths has no jumps at all (after // the common prefix) while the other has some. self.compare_subgraph_jumps_after_last_common_node(other) @@ -3179,9 +3179,9 @@ impl SimultaneousPaths { product.saturating_mul(options.len()) }); if num_options > 1_000_000 { - return Err(FederationError::internal( - "flat_cartesian_product: excessive number of combinations: {num_options}", - )); + return Err(FederationError::internal(format!( + "flat_cartesian_product: excessive number of combinations: {num_options}" + ))); } let mut product = Vec::with_capacity(num_options); @@ -3233,9 +3233,9 @@ impl SimultaneousPaths { match (self.0.as_slice(), other.0.as_slice()) { ([a], [b]) => a.compare_single_path_options_complexity_out_of_context(b), ([a], _) => a.compare_single_vs_multi_path_options_complexity_out_of_context(other), - (_, [b]) => Ok(b - .compare_single_vs_multi_path_options_complexity_out_of_context(self)? - .reverse()), + (_, [b]) => b + .compare_single_vs_multi_path_options_complexity_out_of_context(self) + .map(Ordering::reverse), _ => Ok(Ordering::Equal), } } @@ -3346,8 +3346,12 @@ impl SimultaneousPathsWithLazyIndirectPaths { operation_element: &OpPathElement, condition_resolver: &mut impl ConditionResolver, ) -> Result>, FederationError> { - let span = debug_span!("Trying to advance paths for operation", paths = %self.paths, operation = %operation_element); - let _gaurd = span.enter(); + debug!( + "Trying to advance paths for operation: path = {}, operation = {operation_element}", + self.paths + ); + let span = debug_span!(" |"); + let _guard = span.enter(); let updated_context = self.context.with_context_of(operation_element)?; let mut options_for_each_path = vec![]; @@ -3355,13 +3359,15 @@ impl SimultaneousPathsWithLazyIndirectPaths { // references to `self`, which means cloning these paths when iterating. let paths = self.paths.0.clone(); for (path_index, path) in paths.iter().enumerate() { - let span = debug_span!("Computing options for {path}"); + debug!("Computing options for {path}"); + let span = debug_span!(" |"); let gaurd = span.enter(); let mut options = None; let should_reenter_subgraph = path.defer_on_tail.is_some() && matches!(operation_element, OpPathElement::Field(_)); if !should_reenter_subgraph { - let span = debug_span!("Direct options"); + debug!("Direct options"); + let span = debug_span!(" |"); let gaurd = span.enter(); let (advance_options, has_only_type_exploded_results) = path .advance_with_operation_element( @@ -3414,8 +3420,6 @@ impl SimultaneousPathsWithLazyIndirectPaths { // defer), that's ok, we'll just try with non-collecting edges. let mut options = options.unwrap_or_else(Vec::new); if let OpPathElement::Field(operation_field) = operation_element { - let span = debug_span!("Computing indirect paths:"); - let _gaurd = span.enter(); // Add whatever options can be obtained by taking some non-collecting edges first. let paths_with_non_collecting_edges = self .indirect_options(&updated_context, path_index, condition_resolver)? @@ -3425,13 +3429,11 @@ impl SimultaneousPathsWithLazyIndirectPaths { "{} indirect paths", paths_with_non_collecting_edges.paths.len() ); - let span = debug_span!("Validating indirect options:"); - let _gaurd = span.enter(); for paths_with_non_collecting_edges in paths_with_non_collecting_edges.paths.iter() { - let span = - debug_span!("For indirect path {paths_with_non_collecting_edges}:"); + debug!("For indirect path {paths_with_non_collecting_edges}:"); + let span = debug_span!(" |"); let _gaurd = span.enter(); let (advance_options, _) = paths_with_non_collecting_edges .advance_with_operation_element( @@ -3676,18 +3678,16 @@ impl OpPath { } pub(crate) fn conditional_directives(&self) -> DirectiveList { - DirectiveList( - self.0 - .iter() - .flat_map(|path_element| { - path_element - .directives() - .iter() - .filter(|d| d.name == "include" || d.name == "skip") - }) - .cloned() - .collect(), - ) + self.0 + .iter() + .flat_map(|path_element| { + path_element + .directives() + .iter() + .filter(|d| d.name == "include" || d.name == "skip") + }) + .cloned() + .collect() } /// Filter any fragment element in the provided path whose type condition does not exist in the provided schema. @@ -3836,7 +3836,6 @@ fn is_useless_followup_element( mod tests { use std::sync::Arc; - use apollo_compiler::executable::DirectiveList; use apollo_compiler::Name; use apollo_compiler::Schema; use petgraph::stable_graph::EdgeIndex; @@ -3849,7 +3848,6 @@ mod tests { use crate::query_graph::graph_path::OpGraphPath; use crate::query_graph::graph_path::OpGraphPathTrigger; use crate::query_graph::graph_path::OpPathElement; - use crate::schema::position::FieldDefinitionPosition; use crate::schema::position::ObjectFieldDefinitionPosition; use crate::schema::ValidFederationSchema; @@ -3880,14 +3878,7 @@ mod tests { type_name: Name::new("T").unwrap(), field_name: Name::new("t").unwrap(), }; - let data = FieldData { - schema: schema.clone(), - field_position: FieldDefinitionPosition::Object(pos), - alias: None, - arguments: Arc::new(Vec::new()), - directives: Arc::new(DirectiveList::new()), - sibling_typename: None, - }; + let data = FieldData::from_position(&schema, pos.into()); let trigger = OpGraphPathTrigger::OpPathElement(OpPathElement::Field(Field::new(data))); let path = path .add( @@ -3905,14 +3896,7 @@ mod tests { type_name: Name::new("ID").unwrap(), field_name: Name::new("id").unwrap(), }; - let data = FieldData { - schema, - field_position: FieldDefinitionPosition::Object(pos), - alias: None, - arguments: Arc::new(Vec::new()), - directives: Arc::new(DirectiveList::new()), - sibling_typename: None, - }; + let data = FieldData::from_position(&schema, pos.into()); let trigger = OpGraphPathTrigger::OpPathElement(OpPathElement::Field(Field::new(data))); let path = path .add( diff --git a/apollo-federation/src/query_graph/mod.rs b/apollo-federation/src/query_graph/mod.rs index e77d191efa..15e83f49f9 100644 --- a/apollo-federation/src/query_graph/mod.rs +++ b/apollo-federation/src/query_graph/mod.rs @@ -30,7 +30,6 @@ use crate::schema::ValidFederationSchema; pub mod build_query_graph; pub(crate) mod condition_resolver; -pub(crate) mod extract_subgraphs_from_supergraph; pub(crate) mod graph_path; pub mod output; pub(crate) mod path_tree; diff --git a/apollo-federation/src/query_graph/path_tree.rs b/apollo-federation/src/query_graph/path_tree.rs index 3411458f89..02812159a3 100644 --- a/apollo-federation/src/query_graph/path_tree.rs +++ b/apollo-federation/src/query_graph/path_tree.rs @@ -464,7 +464,6 @@ where mod tests { use std::sync::Arc; - use apollo_compiler::executable::DirectiveList; use apollo_compiler::ExecutableDocument; use petgraph::stable_graph::NodeIndex; use petgraph::visit::EdgeRef; @@ -542,8 +541,8 @@ mod tests { schema: query_graph.schema().unwrap().clone(), field_position: field_def.clone(), alias: None, - arguments: Arc::new(Vec::new()), - directives: Arc::new(DirectiveList::new()), + arguments: Default::default(), + directives: Default::default(), sibling_typename: None, }; let trigger = OpGraphPathTrigger::OpPathElement(OpPathElement::Field(Field::new(data))); diff --git a/apollo-federation/src/query_plan/conditions.rs b/apollo-federation/src/query_plan/conditions.rs index ab84d2d8ca..d4a84b9f49 100644 --- a/apollo-federation/src/query_plan/conditions.rs +++ b/apollo-federation/src/query_plan/conditions.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use apollo_compiler::ast::Directive; use apollo_compiler::collections::IndexMap; -use apollo_compiler::executable::DirectiveList; use apollo_compiler::executable::Value; use apollo_compiler::Name; use apollo_compiler::Node; @@ -10,6 +9,7 @@ use indexmap::map::Entry; use serde::Serialize; use crate::error::FederationError; +use crate::operation::DirectiveList; use crate::operation::Selection; use crate::operation::SelectionMap; use crate::operation::SelectionSet; @@ -93,7 +93,7 @@ impl Conditions { pub(crate) fn from_directives(directives: &DirectiveList) -> Result { let mut variables = IndexMap::default(); - for directive in directives { + for directive in directives.iter_sorted() { let negated = match directive.name.as_str() { "include" => false, "skip" => true, @@ -285,8 +285,8 @@ pub(crate) fn remove_unneeded_top_level_fragment_directives( } // We can skip some of the fragment directives directive. - let final_selection = - inline_fragment.with_updated_directives(DirectiveList(needed_directives)); + let final_selection = inline_fragment + .with_updated_directives(DirectiveList::from_iter(needed_directives)); selection_map.insert(Selection::InlineFragment(Arc::new(final_selection))); } } @@ -308,19 +308,17 @@ fn remove_conditions_of_element( element: OpPathElement, conditions: &VariableConditions, ) -> OpPathElement { - let updated_directives: DirectiveList = DirectiveList( - element - .directives() - .iter() - .filter(|d| { - !matches_condition_for_kind(d, conditions, ConditionKind::Include) - && !matches_condition_for_kind(d, conditions, ConditionKind::Skip) - }) - .cloned() - .collect(), - ); + let updated_directives: DirectiveList = element + .directives() + .iter() + .filter(|d| { + !matches_condition_for_kind(d, conditions, ConditionKind::Include) + && !matches_condition_for_kind(d, conditions, ConditionKind::Skip) + }) + .cloned() + .collect(); - if updated_directives.0.len() == element.directives().len() { + if updated_directives.len() == element.directives().len() { element } else { element.with_updated_directives(updated_directives) diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index d5425e2187..dfc862e23b 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -1,5 +1,3 @@ -use std::collections::HashMap; -use std::collections::HashSet; use std::fmt::Write as _; use std::iter; use std::ops::Deref; @@ -28,24 +26,25 @@ use petgraph::visit::EdgeRef; use petgraph::visit::IntoNodeReferences; use serde::Serialize; +use super::query_planner::SubgraphOperationCompression; use crate::error::FederationError; use crate::error::SingleFederationError; use crate::link::graphql_definition::DeferDirectiveArguments; +use crate::operation::ArgumentList; use crate::operation::ContainmentOptions; +use crate::operation::DirectiveList; use crate::operation::Field; use crate::operation::FieldData; use crate::operation::InlineFragment; use crate::operation::InlineFragmentData; use crate::operation::InlineFragmentSelection; use crate::operation::Operation; -use crate::operation::RebasedFragments; use crate::operation::Selection; use crate::operation::SelectionId; use crate::operation::SelectionMap; use crate::operation::SelectionSet; +use crate::operation::VariableCollector; use crate::operation::TYPENAME_FIELD; -use crate::query_graph::extract_subgraphs_from_supergraph::FEDERATION_REPRESENTATIONS_ARGUMENTS_NAME; -use crate::query_graph::extract_subgraphs_from_supergraph::FEDERATION_REPRESENTATIONS_VAR_NAME; use crate::query_graph::graph_path::concat_op_paths; use crate::query_graph::graph_path::concat_paths_in_parents; use crate::query_graph::graph_path::OpGraphPathContext; @@ -74,6 +73,8 @@ use crate::schema::position::TypeDefinitionPosition; use crate::schema::ValidFederationSchema; use crate::subgraph::spec::ANY_SCALAR_NAME; use crate::subgraph::spec::ENTITIES_QUERY; +use crate::supergraph::FEDERATION_REPRESENTATIONS_ARGUMENTS_NAME; +use crate::supergraph::FEDERATION_REPRESENTATIONS_VAR_NAME; use crate::utils::logging::snapshot; /// Represents the value of a `@defer(label:)` argument. @@ -666,7 +667,9 @@ impl FetchDependencyGraph { // keeping nodes separated when they have a different path in their parent // allows to keep that "path in parent" more precisely, // which is important for some case of @requires). - for existing_id in self.children_of(parent.parent_node_id) { + for existing_id in + FetchDependencyGraph::sorted_nodes(self.children_of(parent.parent_node_id)) + { let existing = self.node_weight(existing_id)?; // we compare the subgraph names last because on average it improves performance if existing.merge_at.as_deref() == Some(merge_at) @@ -727,6 +730,7 @@ impl FetchDependencyGraph { /// Adds another node as a parent of `child`, /// meaning that this fetch should happen after the provided one. + /// Assumption: The parent node is not a descendant of the child. fn add_parent(&mut self, child_id: NodeIndex, parent_relation: ParentRelation) { let ParentRelation { parent_node_id, @@ -736,8 +740,8 @@ impl FetchDependencyGraph { return; } assert!( - !self.graph.contains_edge(child_id, parent_node_id), - "Node {parent_node_id:?} is a child of {child_id:?}: \ + !self.is_descendant_of(parent_node_id, child_id), + "Node {parent_node_id:?} is a descendant of {child_id:?}: \ adding it as parent would create a cycle" ); self.on_modification(); @@ -792,15 +796,7 @@ impl FetchDependencyGraph { } fn is_descendant_of(&self, node_id: NodeIndex, maybe_ancestor_id: NodeIndex) -> bool { - if node_id == maybe_ancestor_id { - return true; - } - for child_id in self.children_of(node_id) { - if self.is_descendant_of(child_id, maybe_ancestor_id) { - return true; - } - } - false + petgraph::algo::has_path_connecting(&self.graph, maybe_ancestor_id, node_id, None) } /// Returns whether `node_id` is both a child of `maybe_parent_id` but also if we can show that the @@ -893,6 +889,27 @@ impl FetchDependencyGraph { }) } + /// By default, petgraph iterates over the nodes in the order of their node indices, but if + /// we retrieve node iterator based on the edges (e.g. children of/parents of), then resulting + /// iteration order is unspecified. In practice, it appears that edges are iterated in the + /// *reverse* iteration order. + /// + /// Since this behavior can affect the query plans, we can use this method to explicitly sort + /// the iterator to ensure we consistently follow the node index order. + /// + /// NOTE: In JS implementation, whenever we remove/merge nodes, we always shift left remaining + /// nodes so there are no gaps in the node IDs and the newly created nodes are always created + /// with the largest IDs. RS implementation has different behavior - whenever nodes are removed, + /// their IDs are later reused by petgraph so we no longer have guarantee that node with the + /// largest ID is the last node that was created. Due to the above, sorting by node IDs may still + /// result in different iteration order than the JS code, but in practice might be enough to + /// ensure correct plans. + fn sorted_nodes<'graph>( + nodes: impl Iterator + 'graph, + ) -> impl Iterator + 'graph { + nodes.sorted_by_key(|n| n.index()) + } + fn type_for_fetch_inputs( &self, type_name: &Name, @@ -903,8 +920,14 @@ impl FetchDependencyGraph { .try_into()?) } - /// Find redundant edges coming out of a node. See `remove_redundant_edges`. - fn collect_redundant_edges(&self, node_index: NodeIndex, acc: &mut HashSet) { + /// Find redundant edges coming out of a node. See `remove_redundant_edges`. This method assumes + /// that the underlying graph does not have any cycles between nodes. + /// + /// PORT NOTE: JS implementation performs in-place removal of edges when finding the redundant + /// edges. In RS implementation we first collect the edges and then remove them. This has a side + /// effect that if we ever end up with a cycle in a graph (which is an invalid state), this method + /// may result in infinite loop. + fn collect_redundant_edges(&self, node_index: NodeIndex, acc: &mut IndexSet) { let mut stack = vec![]; for start_index in self.children_of(node_index) { stack.extend(self.children_of(start_index)); @@ -912,7 +935,6 @@ impl FetchDependencyGraph { for edge in self.graph.edges_connecting(node_index, v) { acc.insert(edge.id()); } - stack.extend(self.children_of(v)); } } @@ -923,9 +945,12 @@ impl FetchDependencyGraph { /// If any deeply nested child of this node has an edge to any direct child of this node, the /// direct child is removed, as we know it is also reachable through the deeply nested route. fn remove_redundant_edges(&mut self, node_index: NodeIndex) { - let mut redundant_edges = HashSet::new(); + let mut redundant_edges = IndexSet::default(); self.collect_redundant_edges(node_index, &mut redundant_edges); + if !redundant_edges.is_empty() { + self.on_modification(); + } for edge in redundant_edges { self.graph.remove_edge(edge); } @@ -979,14 +1004,16 @@ impl FetchDependencyGraph { // Two phases for mutability reasons: first all redundant edges coming out of all nodes are // collected and then they are all removed. - let mut redundant_edges = HashSet::new(); + let mut redundant_edges = IndexSet::default(); for node_index in self.graph.node_indices() { self.collect_redundant_edges(node_index, &mut redundant_edges); } - for edge in redundant_edges { - // PORT_NOTE: JS version calls `FetchGroup.removeChild`, which calls onModification. + // PORT_NOTE: JS version calls `FetchGroup.removeChild`, which calls onModification. + if !redundant_edges.is_empty() { self.on_modification(); + } + for edge in redundant_edges { self.graph.remove_edge(edge); } @@ -1035,7 +1062,7 @@ impl FetchDependencyGraph { node.selection_set.selection_set.selections.is_empty() && !self.is_root_node(node_index, node) }; - let to_remove: HashSet = self + let to_remove: IndexSet = self .graph .node_references() .filter_map(|(node_index, node)| is_removable(node_index, node).then_some(node_index)) @@ -1044,6 +1071,9 @@ impl FetchDependencyGraph { if to_remove.is_empty() { return; // unchanged } + // Note: We remove empty nodes without relocating their children. The invariant is that + // the children of empty nodes (if any) must be accessible from the root via another path. + // Otherwise, they would've become inaccessible orphan nodes. self.retain_nodes(|node_index| !to_remove.contains(node_index)); } @@ -1271,7 +1301,7 @@ impl FetchDependencyGraph { .any(|input| input.contains(selection))); }; - let impl_type_names: HashSet<_> = self + let impl_type_names: IndexSet<_> = self .supergraph_schema .possible_runtime_types(condition_in_supergraph.clone().into())? .iter() @@ -1422,8 +1452,17 @@ impl FetchDependencyGraph { // generate a simple string key from each node subgraph name and mergeAt. We do "sanitize" // subgraph name, but have no worries for `mergeAt` since it contains either number of // field names, and the later is restricted by graphQL so as to not be an issue. + // PORT_NOTE: The JS version iterates over the nodes in their index order, which is also + // the insertion order. The Rust version uses a topological sort to ensure that we never + // merge an ancestor node into a descendant node. JS version's insertion order is almost + // topologically sorted, thanks to the way the graph is constructed from the root. However, + // it's not exactly topologically sorted. So, it's unclear whether that is 100% safe. + // Note: MultiMap preserves insertion order for values of the same key. Thus, the values + // of the same key in `by_subgraphs` will be topologically sorted as well. let mut by_subgraphs = MultiMap::new(); - for node_index in self.graph.node_indices() { + let sorted_nodes = petgraph::algo::toposort(&self.graph, None) + .map_err(|_| FederationError::internal("Failed to sort nodes due to cycle(s)"))?; + for node_index in sorted_nodes { let node = self.node_weight(node_index)?; // We exclude nodes without inputs because that's what we look for. In practice, this // mostly just exclude root nodes, which we don't really want to bother with anyway. @@ -1440,7 +1479,7 @@ impl FetchDependencyGraph { } // Create disjoint sets of the nodes. - // buckets: an array where each entry is a "bucket" of groups that can all be merge together. + // buckets: an array where each entry is a "bucket" of nodes that can all be merge together. let mut buckets: Vec<(NodeIndex, Vec)> = Vec::new(); let has_equal_inputs = |a: NodeIndex, b: NodeIndex| { let a_node = self.node_weight(a)?; @@ -1473,9 +1512,12 @@ impl FetchDependencyGraph { continue; }; - // We pick the head for the group and merge all others into it. Note that which - // group we choose shouldn't matter since the merging preserves all the + // We pick the head for the nodes and merge all others into it. Note that which + // node we choose shouldn't matter since the merging preserves all the // dependencies of each group (both parents and children). + // However, we must not merge an ancestor node into a descendant node. Thus, + // we choose the head as the first node in the bucket that is also the earliest + // in the topo-sorted order. for node in rest { self.merge_in_with_all_dependencies(*head, *node)?; } @@ -1726,7 +1768,7 @@ impl FetchDependencyGraph { let handled_defers_in_current = defers_in_current .iter() .map(|info| info.label.clone()) - .collect::>(); + .collect::>(); let unhandled_defer_nodes = all_deferred_nodes .keys() .filter(|label| !handled_defers_in_current.contains(*label)) @@ -1987,6 +2029,7 @@ impl FetchDependencyGraph { Ok(()) } + /// Assumption: merged_id is not an ancestor of node_id in the graph. fn merge_in_internal( &mut self, node_id: NodeIndex, @@ -2041,6 +2084,7 @@ impl FetchDependencyGraph { // - node_id's defer_ref == merged_id's defer_ref // - node_id's subgraph_name == merged_id's subgraph_name // - node_id's merge_at == merged_id's merge_at + // - merged_id is not an ancestor of node_id in the graph. fn merge_in_with_all_dependencies( &mut self, node_id: NodeIndex, @@ -2061,7 +2105,7 @@ impl FetchDependencyGraph { merged_id: NodeIndex, path_in_this: &OpPath, ) { - let mut new_parent_relations = HashMap::new(); + let mut new_parent_relations = IndexMap::default(); for child_id in self.children_of(merged_id) { // This could already be a child of `this`. Typically, we can have case where we have: // 1 @@ -2128,18 +2172,17 @@ impl FetchDependencyGraph { let node = self.node_weight(node_id)?; let parent = self.node_weight(parent_relation.parent_node_id)?; let Some(parent_op_path) = &parent_relation.path_in_parent else { - return Err(FederationError::internal("Parent operation path is empty")); + return Ok(false); }; let type_at_path = self.type_at_path( &parent.selection_set.selection_set.type_position, &parent.selection_set.selection_set.schema, parent_op_path, )?; - let new_node_is_unneeded = parent_relation.path_in_parent.is_some() - && node - .selection_set - .selection_set - .can_rebase_on(&type_at_path, &parent.selection_set.selection_set.schema)?; + let new_node_is_unneeded = node + .selection_set + .selection_set + .can_rebase_on(&type_at_path, &parent.selection_set.selection_set.schema)?; Ok(new_node_is_unneeded) } @@ -2241,6 +2284,46 @@ impl std::fmt::Display for FetchDependencyGraph { } } +// Necessary for `petgraph::dot::Dot::with_attr_getters` calls to compile, but not executed. +impl std::fmt::Display for FetchDependencyGraphNode { + fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Err(std::fmt::Error) + } +} + +// Necessary for `petgraph::dot::Dot::with_attr_getters` calls to compile, but not executed. +impl std::fmt::Display for FetchDependencyGraphEdge { + fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Err(std::fmt::Error) + } +} + +impl FetchDependencyGraph { + // GraphViz output for FetchDependencyGraph + pub fn to_dot(&self) -> String { + fn label_node(node_id: NodeIndex, node: &FetchDependencyGraphNode) -> String { + let label_str = node.multiline_display(node_id).to_string(); + format!("label=\"{}\"", label_str.replace('"', "\\\"")) + } + + fn label_edge(edge_id: EdgeIndex) -> String { + format!("label=\"{}\"", edge_id.index()) + } + + let config = [ + petgraph::dot::Config::NodeNoLabel, + petgraph::dot::Config::EdgeNoLabel, + ]; + petgraph::dot::Dot::with_attr_getters( + &self.graph, + &config, + &(|_, er| label_edge(er.id())), + &(|_, (node_id, node)| label_node(node_id, node)), + ) + .to_string() + } +} + impl FetchDependencyGraphNode { pub(crate) fn selection_set_mut(&mut self) -> &mut FetchSelectionSet { self.cached_cost = None; @@ -2325,7 +2408,8 @@ impl FetchDependencyGraphNode { query_graph: &QueryGraph, handled_conditions: &Conditions, variable_definitions: &[Node], - fragments: Option<&mut RebasedFragments>, + operation_directives: &DirectiveList, + operation_compression: &mut SubgraphOperationCompression, operation_name: Option, ) -> Result, FederationError> { if self.selection_set.selection_set.selections.is_empty() { @@ -2346,18 +2430,34 @@ impl FetchDependencyGraphNode { .transpose()?; let subgraph_schema = query_graph.schema_by_source(&self.subgraph_name)?; + // Narrow down the variable definitions to only the ones used in the subgraph operation. + let variable_definitions = { + let mut collector = VariableCollector::new(); + collector.visit_directive_list(operation_directives); + collector.visit_selection_set(&selection); + let used_variables = collector.into_inner(); + + variable_definitions + .iter() + .filter(|variable| used_variables.contains(&variable.name)) + .cloned() + .collect::>() + }; let variable_usages = { - let set = selection.used_variables()?; - let mut list = set.into_iter().cloned().collect::>(); + let mut list = variable_definitions + .iter() + .map(|var_def| var_def.name.clone()) + .collect::>(); list.sort(); list }; - let mut operation = if self.is_entity_fetch { + let operation = if self.is_entity_fetch { operation_for_entities_fetch( subgraph_schema, selection, variable_definitions, + operation_directives, &operation_name, )? } else { @@ -2366,14 +2466,12 @@ impl FetchDependencyGraphNode { self.root_kind, selection, variable_definitions, + operation_directives, &operation_name, )? }; - if let Some(fragments) = fragments - .map(|rebased| rebased.for_subgraph(self.subgraph_name.clone(), subgraph_schema)) - { - operation.reuse_fragments(fragments)?; - } + let operation = + operation_compression.compress(&self.subgraph_name, subgraph_schema, operation)?; let operation_document = operation.try_into()?; let node = super::PlanNode::Fetch(Box::new(super::FetchNode { @@ -2511,6 +2609,81 @@ impl FetchDependencyGraphNode { FetchDependencyNodeDisplay { node: self, index } } + // A variation of `fn display` with multiline output, which is more suitable for + // GraphViz output. + pub fn multiline_display(&self, index: NodeIndex) -> impl std::fmt::Display + '_ { + use std::fmt; + use std::fmt::Display; + use std::fmt::Formatter; + + struct DisplayList<'a, T: Display>(&'a [T]); + impl Display for DisplayList<'_, T> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut iter = self.0.iter(); + if let Some(x) = iter.next() { + write!(f, "{x}")?; + } + for x in iter { + write!(f, "::{x}")?; + } + Ok(()) + } + } + + struct FetchDependencyNodeDisplay<'a> { + node: &'a FetchDependencyGraphNode, + index: NodeIndex, + } + + impl Display for FetchDependencyNodeDisplay<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "[{}]", self.index.index())?; + if self.node.defer_ref.is_some() { + write!(f, "(deferred)")?; + } + if let Some(&id) = self.node.id.get() { + write!(f, "{{id: {id}}}")?; + } + + write!(f, " {}", self.node.subgraph_name)?; + + match (self.node.merge_at.as_deref(), self.node.inputs.as_deref()) { + (Some(merge_at), Some(inputs)) => { + write!( + f, + // @(path::to::*::field)[{input1,input2} => { id }] + "\n@({})\n{}\n=>\n{}\n", + DisplayList(merge_at), + inputs, + self.node.selection_set.selection_set + )?; + } + (Some(merge_at), None) => { + write!( + f, + // @(path::to::*::field)[{} => { id }] + "\n@({})\n{{}}\n=>\n{}\n", + DisplayList(merge_at), + self.node.selection_set.selection_set + )?; + } + (None, _) => { + // [(type){ id }] + write!( + f, + "\n({})\n{}", + self.node.parent_type, self.node.selection_set.selection_set + )?; + } + } + + Ok(()) + } + } + + FetchDependencyNodeDisplay { node: self, index } + } + // PORT_NOTE: In JS version, this value is memoized on the node struct. fn subgraph_and_merge_at_key(&self) -> Option { // PORT_NOTE: In JS version, this hash value is defined as below. @@ -2535,19 +2708,11 @@ impl FetchDependencyGraphNode { fn operation_for_entities_fetch( subgraph_schema: &ValidFederationSchema, selection_set: SelectionSet, - all_variable_definitions: &[Node], + mut variable_definitions: Vec>, + operation_directives: &DirectiveList, operation_name: &Option, ) -> Result { - let mut variable_definitions: Vec> = - Vec::with_capacity(all_variable_definitions.len() + 1); - variable_definitions.push(representations_variable_definition(subgraph_schema)?); - let used_variables = selection_set.used_variables()?; - variable_definitions.extend( - all_variable_definitions - .iter() - .filter(|definition| used_variables.contains(&definition.name)) - .cloned(), - ); + variable_definitions.insert(0, representations_variable_definition(subgraph_schema)?); let query_type_name = subgraph_schema.schema().root_operation(OperationType::Query).ok_or_else(|| SingleFederationError::InvalidSubgraph { @@ -2582,11 +2747,10 @@ fn operation_for_entities_fetch( schema: subgraph_schema.clone(), field_position: entities, alias: None, - arguments: Arc::new(vec![executable::Argument { - name: FEDERATION_REPRESENTATIONS_ARGUMENTS_NAME, - value: executable::Value::Variable(FEDERATION_REPRESENTATIONS_VAR_NAME).into(), - } - .into()]), + arguments: ArgumentList::one(( + FEDERATION_REPRESENTATIONS_ARGUMENTS_NAME, + executable::Value::Variable(FEDERATION_REPRESENTATIONS_VAR_NAME), + )), directives: Default::default(), sibling_typename: None, })), @@ -2611,7 +2775,7 @@ fn operation_for_entities_fetch( root_kind: SchemaRootDefinitionKind::Query, name: operation_name.clone(), variables: Arc::new(variable_definitions), - directives: Default::default(), + directives: operation_directives.clone(), selection_set, named_fragments: Default::default(), }) @@ -2621,22 +2785,16 @@ fn operation_for_query_fetch( subgraph_schema: &ValidFederationSchema, root_kind: SchemaRootDefinitionKind, selection_set: SelectionSet, - variable_definitions: &[Node], + variable_definitions: Vec>, + operation_directives: &DirectiveList, operation_name: &Option, ) -> Result { - let used_variables = selection_set.used_variables()?; - let variable_definitions = variable_definitions - .iter() - .filter(|definition| used_variables.contains(&definition.name)) - .cloned() - .collect(); - Ok(Operation { schema: subgraph_schema.clone(), root_kind, name: operation_name.clone(), variables: Arc::new(variable_definitions), - directives: Default::default(), + directives: operation_directives.clone(), selection_set, named_fragments: Default::default(), }) @@ -3557,7 +3715,7 @@ fn wrap_selection_with_type_and_conditions( schema: supergraph_schema.clone(), parent_type_position: wrapping_type.clone(), type_condition_position: Some(type_condition.clone()), - directives: Arc::new([directive].into_iter().collect()), + directives: [directive].into_iter().collect(), selection_id: SelectionId::new(), }), acc, diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs index 4ee9b57da0..ab126dbcc6 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph_processor.rs @@ -1,11 +1,13 @@ -use std::collections::HashSet; +use std::sync::Arc; +use apollo_compiler::collections::IndexSet; use apollo_compiler::executable::VariableDefinition; use apollo_compiler::Name; use apollo_compiler::Node; +use super::query_planner::SubgraphOperationCompression; use crate::error::FederationError; -use crate::operation::RebasedFragments; +use crate::operation::DirectiveList; use crate::operation::SelectionSet; use crate::query_graph::QueryGraph; use crate::query_plan::conditions::Conditions; @@ -44,10 +46,11 @@ const FETCH_COST: QueryPlanCost = 1000.0; const PIPELINING_COST: QueryPlanCost = 100.0; pub(crate) struct FetchDependencyGraphToQueryPlanProcessor { - variable_definitions: Vec>, - fragments: Option, + variable_definitions: Arc>>, + operation_directives: DirectiveList, + operation_compression: SubgraphOperationCompression, operation_name: Option, - assigned_defer_labels: Option>, + assigned_defer_labels: Option>, counter: u32, } @@ -241,14 +244,16 @@ fn sequence_cost(values: impl IntoIterator) -> QueryPlanCo impl FetchDependencyGraphToQueryPlanProcessor { pub(crate) fn new( - variable_definitions: Vec>, - fragments: Option, + variable_definitions: Arc>>, + operation_directives: DirectiveList, + operation_compression: SubgraphOperationCompression, operation_name: Option, - assigned_defer_labels: Option>, + assigned_defer_labels: Option>, ) -> Self { Self { variable_definitions, - fragments, + operation_directives, + operation_compression, operation_name, assigned_defer_labels, counter: 0, @@ -276,7 +281,8 @@ impl FetchDependencyGraphProcessor, DeferredDeferBlock> query_graph, handled_conditions, &self.variable_definitions, - self.fragments.as_mut(), + &self.operation_directives, + &mut self.operation_compression, op_name, ) } diff --git a/apollo-federation/src/query_plan/generate.rs b/apollo-federation/src/query_plan/generate.rs index 0511deb498..4001b70f6f 100644 --- a/apollo-federation/src/query_plan/generate.rs +++ b/apollo-federation/src/query_plan/generate.rs @@ -19,7 +19,7 @@ struct Partial { // that implements all three methods. pub trait PlanBuilder { /// `add_to_plan`: how to obtain a new plan by taking some plan and adding a new element to it. - fn add_to_plan(&mut self, plan: &Plan, elem: Element) -> Plan; + fn add_to_plan(&mut self, plan: &Plan, elem: Element) -> Result; /// `compute_plan_cost`: how to compute the cost of a plan. fn compute_plan_cost(&mut self, plan: &mut Plan) -> Result; @@ -158,7 +158,7 @@ where let picked_index = pick_next(index, next_choices); let Extracted { extracted, is_last } = extract(picked_index, next_choices); - let mut new_partial_plan = plan_builder.add_to_plan(&partial_plan, extracted); + let mut new_partial_plan = plan_builder.add_to_plan(&partial_plan, extracted)?; let cost = plan_builder.compute_plan_cost(&mut new_partial_plan)?; if !is_last { @@ -252,7 +252,11 @@ mod tests { } impl<'a> PlanBuilder for TestPlanBuilder<'a> { - fn add_to_plan(&mut self, partial_plan: &Plan, new_element: Element) -> Plan { + fn add_to_plan( + &mut self, + partial_plan: &Plan, + new_element: Element, + ) -> Result { let new_plan: Plan = partial_plan .iter() .cloned() @@ -261,7 +265,7 @@ mod tests { if new_plan.len() == self.target_len { self.generated.push(new_plan.clone()) } - new_plan + Ok(new_plan) } fn compute_plan_cost(&mut self, plan: &mut Plan) -> Result { diff --git a/apollo-federation/src/query_plan/query_planner.rs b/apollo-federation/src/query_plan/query_planner.rs index 012dd65392..5670c685d9 100644 --- a/apollo-federation/src/query_plan/query_planner.rs +++ b/apollo-federation/src/query_plan/query_planner.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; +use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; use apollo_compiler::ExecutableDocument; use apollo_compiler::Name; @@ -15,7 +16,7 @@ use crate::error::SingleFederationError; use crate::link::federation_spec_definition::FederationSpecDefinition; use crate::operation::normalize_operation; use crate::operation::NamedFragments; -use crate::operation::RebasedFragments; +use crate::operation::Operation; use crate::operation::SelectionSet; use crate::query_graph::build_federated_query_graph; use crate::query_graph::path_tree::OpPathTree; @@ -46,6 +47,10 @@ use crate::utils::logging::snapshot; use crate::ApiSchemaOptions; use crate::Supergraph; +pub(crate) const OVERRIDE_LABEL_ARG_NAME: &str = "overrideLabel"; +pub(crate) const CONTEXT_DIRECTIVE: &str = "context"; +pub(crate) const JOIN_FIELD: &str = "join__field"; + #[derive(Debug, Clone, Hash)] pub struct QueryPlannerConfig { /// Whether the query planner should try to reused the named fragments of the planned query in @@ -208,6 +213,7 @@ impl QueryPlanner { config: QueryPlannerConfig, ) -> Result { config.assert_valid(); + Self::check_unsupported_features(supergraph)?; let supergraph_schema = supergraph.schema.clone(); let api_schema = supergraph.to_api_schema(ApiSchemaOptions { @@ -368,7 +374,6 @@ impl QueryPlanner { } } - let reuse_query_fragments = self.config.reuse_query_fragments; let normalized_operation = normalize_operation( operation, NamedFragments::new(&document.fragments, &self.api_schema), @@ -433,22 +438,25 @@ impl QueryPlanner { ); }; - let rebased_fragments = if reuse_query_fragments { + let operation_compression = if self.config.generate_query_fragments { + SubgraphOperationCompression::GenerateFragments + } else if self.config.reuse_query_fragments { // For all subgraph fetches we query `__typename` on every abstract types (see // `FetchDependencyGraphNode::to_plan_node`) so if we want to have a chance to reuse // fragments, we should make sure those fragments also query `__typename` for every // abstract type. - Some(RebasedFragments::new( + SubgraphOperationCompression::ReuseFragments(RebasedFragments::new( normalized_operation .named_fragments .add_typename_field_for_abstract_types_in_named_fragments()?, )) } else { - None + SubgraphOperationCompression::Disabled }; let mut processor = FetchDependencyGraphToQueryPlanProcessor::new( - operation.variables.clone(), - rebased_fragments, + normalized_operation.variables.clone(), + normalized_operation.directives.clone(), + operation_compression, operation.name.clone(), assigned_defer_labels, ); @@ -531,6 +539,89 @@ impl QueryPlanner { pub fn api_schema(&self) -> &ValidFederationSchema { &self.api_schema } + + fn check_unsupported_features(supergraph: &Supergraph) -> Result<(), FederationError> { + // We have a *progressive* override when `join__field` has a + // non-null value for `overrideLabel` field. + // + // This looks at object types' fields and their directive + // applications, looking specifically for `@join__field` + // arguments list. + let has_progressive_overrides = supergraph + .schema + .schema() + .types + .values() + .filter_map(|extended_type| { + // The override label args can be only on ObjectTypes + if let ExtendedType::Object(object_type) = extended_type { + Some(object_type) + } else { + None + } + }) + .flat_map(|object_type| &object_type.fields) + .flat_map(|(_, field)| { + field + .directives + .iter() + .filter(|d| d.name.as_str() == JOIN_FIELD) + }) + .any(|join_directive| { + if let Some(override_label_arg) = + join_directive.argument_by_name(OVERRIDE_LABEL_ARG_NAME) + { + // Any argument value for `overrideLabel` that's not + // null can be considered as progressive override usage + if !override_label_arg.is_null() { + return true; + } + return false; + } + false + }); + if has_progressive_overrides { + let message = "\ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with progressive overrides. \ + Remove uses of progressive overrides to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.\ + "; + return Err(SingleFederationError::UnsupportedFeature { + message: message.to_owned(), + kind: crate::error::UnsupportedFeatureKind::ProgressiveOverrides, + } + .into()); + } + + // We will only check for `@context` direcive, since + // `@fromContext` can only be used if `@context` is already + // applied, and we assume a correctly composed supergraph. + // + // `@context` can only be applied on Object Types, Interface + // Types and Unions. For simplicity of this function, we just + // check all 'extended_type` directives. + let has_set_context = supergraph + .schema + .schema() + .types + .values() + .any(|extended_type| extended_type.directives().has(CONTEXT_DIRECTIVE)); + if has_set_context { + let message = "\ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with `@context`. \ + Remove uses of `@context` to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.\ + "; + return Err(SingleFederationError::UnsupportedFeature { + message: message.to_owned(), + kind: crate::error::UnsupportedFeatureKind::Context, + } + .into()); + } + Ok(()) + } } fn compute_root_serial_dependency_graph( @@ -765,12 +856,74 @@ fn compute_plan_for_defer_conditionals( _parameters: &mut QueryPlanningParameters, _defer_conditions: IndexMap>, ) -> Result, FederationError> { - Err(SingleFederationError::Internal { + Err(SingleFederationError::UnsupportedFeature { message: String::from("@defer is currently not supported"), + kind: crate::error::UnsupportedFeatureKind::Defer, } .into()) } +/// Tracks fragments from the original operation, along with versions rebased on other subgraphs. +pub(crate) struct RebasedFragments { + original_fragments: NamedFragments, + /// Map key: subgraph name + rebased_fragments: IndexMap, NamedFragments>, +} + +impl RebasedFragments { + fn new(fragments: NamedFragments) -> Self { + Self { + original_fragments: fragments, + rebased_fragments: Default::default(), + } + } + + fn for_subgraph( + &mut self, + subgraph_name: impl Into>, + subgraph_schema: &ValidFederationSchema, + ) -> &NamedFragments { + self.rebased_fragments + .entry(subgraph_name.into()) + .or_insert_with(|| { + self.original_fragments + .rebase_on(subgraph_schema) + .unwrap_or_default() + }) + } +} + +pub(crate) enum SubgraphOperationCompression { + ReuseFragments(RebasedFragments), + GenerateFragments, + Disabled, +} + +impl SubgraphOperationCompression { + /// Compress a subgraph operation. + pub(crate) fn compress( + &mut self, + subgraph_name: &Arc, + subgraph_schema: &ValidFederationSchema, + operation: Operation, + ) -> Result { + match self { + Self::ReuseFragments(fragments) => { + let rebased = fragments.for_subgraph(Arc::clone(subgraph_name), subgraph_schema); + let mut operation = operation; + operation.reuse_fragments(rebased)?; + Ok(operation) + } + Self::GenerateFragments => { + let mut operation = operation; + operation.generate_fragments()?; + Ok(operation) + } + Self::Disabled => Ok(operation), + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/apollo-federation/src/query_plan/query_planning_traversal.rs b/apollo-federation/src/query_plan/query_planning_traversal.rs index edfdca8fa8..2dab41debf 100644 --- a/apollo-federation/src/query_plan/query_planning_traversal.rs +++ b/apollo-federation/src/query_plan/query_planning_traversal.rs @@ -1063,21 +1063,17 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { } impl<'a: 'b, 'b> PlanBuilder> for QueryPlanningTraversal<'a, 'b> { - fn add_to_plan(&mut self, plan_info: &PlanInfo, tree: Arc) -> PlanInfo { + fn add_to_plan( + &mut self, + plan_info: &PlanInfo, + tree: Arc, + ) -> Result { let mut updated_graph = plan_info.fetch_dependency_graph.clone(); - let result = self.updated_dependency_graph(&mut updated_graph, &tree); - if result.is_ok() { - PlanInfo { + self.updated_dependency_graph(&mut updated_graph, &tree) + .map(|_| PlanInfo { fetch_dependency_graph: updated_graph, path_tree: plan_info.path_tree.merge(&tree), - } - } else { - // Failed to update. Return the original plan. - PlanInfo { - fetch_dependency_graph: updated_graph, - path_tree: plan_info.path_tree.clone(), - } - } + }) } fn compute_plan_cost( diff --git a/apollo-federation/src/schema/field_set.rs b/apollo-federation/src/schema/field_set.rs index 077f33c7d5..442162efd9 100644 --- a/apollo-federation/src/schema/field_set.rs +++ b/apollo-federation/src/schema/field_set.rs @@ -10,8 +10,8 @@ use crate::error::FederationError; use crate::error::MultipleFederationErrors; use crate::error::SingleFederationError; use crate::operation::NamedFragments; +use crate::operation::Selection; use crate::operation::SelectionSet; -use crate::query_graph::graph_path::OpPathElement; use crate::schema::position::CompositeTypeDefinitionPosition; use crate::schema::position::FieldDefinitionPosition; use crate::schema::position::InterfaceTypeDefinitionPosition; @@ -23,28 +23,40 @@ use crate::schema::ValidFederationSchema; // Federation spec does not allow the alias syntax in field set strings. // However, since `parse_field_set` uses the standard GraphQL parser, which allows aliases, // we need this secondary check to ensure that aliases are not used. -fn check_absence_of_aliases( - selection_set: &SelectionSet, - code_str: &str, -) -> Result<(), FederationError> { - let mut alias_errors = vec![]; - selection_set.for_each_element(&mut |elem| { - let OpPathElement::Field(field) = elem else { - return Ok(()); - }; - let Some(alias) = &field.alias else { - return Ok(()); - }; - alias_errors.push(SingleFederationError::UnsupportedFeature { - // PORT_NOTE: The JS version also quotes the directive name in the error message. - // For example, "aliases are not currently supported in @requires". - message: format!( - r#"Cannot use alias "{}" in "{}": aliases are not currently supported in the used directive"#, - alias, code_str) - }); +fn check_absence_of_aliases(selection_set: &SelectionSet) -> Result<(), FederationError> { + fn visit_selection_set( + errors: &mut MultipleFederationErrors, + selection_set: &SelectionSet, + ) -> Result<(), FederationError> { + for selection in selection_set.iter() { + match selection { + Selection::FragmentSpread(_) => { + return Err(FederationError::internal( + "check_absence_of_aliases(): unexpected fragment spread", + )) + } + Selection::InlineFragment(frag) => check_absence_of_aliases(&frag.selection_set)?, + Selection::Field(field) => { + if let Some(alias) = &field.field.alias { + errors.push(SingleFederationError::UnsupportedFeature { + // PORT_NOTE: The JS version also quotes the directive name in the error message. + // For example, "aliases are not currently supported in @requires". + message: format!(r#"Cannot use alias "{alias}" in "{}": aliases are not currently supported in the used directive"#, field.field), + kind: crate::error::UnsupportedFeatureKind::Alias + }.into()); + } + if let Some(selection_set) = &field.selection_set { + visit_selection_set(errors, selection_set)?; + } + } + } + } Ok(()) - })?; - MultipleFederationErrors::from_iter(alias_errors).into_result() + } + + let mut errors = MultipleFederationErrors { errors: vec![] }; + visit_selection_set(&mut errors, selection_set)?; + errors.into_result() } // TODO: In the JS codebase, this has some error-rewriting to help give the user better hints around @@ -69,7 +81,7 @@ pub(crate) fn parse_field_set( SelectionSet::from_selection_set(&field_set.selection_set, &named_fragments, schema)?; // Validate the field set has no aliases. - check_absence_of_aliases(&selection_set, value)?; + check_absence_of_aliases(&selection_set)?; Ok(selection_set) } @@ -238,8 +250,8 @@ mod tests { assert_eq!( err.to_string(), r#"The following errors occurred: - - Cannot use alias "r1" in "r1: r s q1: q": aliases are not currently supported in the used directive - - Cannot use alias "q1" in "r1: r s q1: q": aliases are not currently supported in the used directive"# + - Cannot use alias "r1" in "r1: r": aliases are not currently supported in the used directive + - Cannot use alias "q1" in "q1: q": aliases are not currently supported in the used directive"# ); Ok(()) } diff --git a/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap b/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap index 8ace8b2b86..54ee822eea 100644 --- a/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap +++ b/apollo-federation/src/snapshots/apollo_federation__merge__tests__steel_thread.snap @@ -52,6 +52,6 @@ type User @join__type(graph: CONNECTOR_QUERY_USER_0, key: "id") @join__type(grap type Query @join__type(graph: CONNECTOR_QUERY_USER_0) @join__type(graph: CONNECTOR_QUERY_USERS_0) @join__type(graph: CONNECTOR_USER_D_1) @join__type(graph: GRAPHQL) { user(id: ID!): User @join__field(graph: CONNECTOR_QUERY_USER_0) - users: [User] @join__field(graph: CONNECTOR_QUERY_USERS_0) + users(limit: Int): [User] @join__field(graph: CONNECTOR_QUERY_USERS_0) _: ID @inaccessible @join__field(graph: CONNECTOR_USER_D_1) } diff --git a/apollo-federation/src/sources/connect/expand/merge/connector_Query_users_0.graphql b/apollo-federation/src/sources/connect/expand/merge/connector_Query_users_0.graphql index 13fb5dea4a..414b83db47 100644 --- a/apollo-federation/src/sources/connect/expand/merge/connector_Query_users_0.graphql +++ b/apollo-federation/src/sources/connect/expand/merge/connector_Query_users_0.graphql @@ -73,5 +73,5 @@ type User { } type Query { - users: [User] + users(limit: Int): [User] } diff --git a/apollo-federation/src/sources/connect/url_path_template.rs b/apollo-federation/src/sources/connect/url_path_template.rs index dda02f89cb..83977d3d45 100644 --- a/apollo-federation/src/sources/connect/url_path_template.rs +++ b/apollo-federation/src/sources/connect/url_path_template.rs @@ -1,4 +1,3 @@ -use std::collections::HashSet; use std::fmt::Display; use apollo_compiler::collections::IndexMap; @@ -188,7 +187,7 @@ impl URLPathTemplate { } pub fn required_parameters(&self) -> Vec { - let mut parameters = HashSet::new(); + let mut parameters = IndexSet::default(); for param_value in &self.path { parameters.extend(param_value.required_parameters()); } diff --git a/apollo-federation/src/subgraph/mod.rs b/apollo-federation/src/subgraph/mod.rs index 959d731ec3..4ba138ba8f 100644 --- a/apollo-federation/src/subgraph/mod.rs +++ b/apollo-federation/src/subgraph/mod.rs @@ -28,6 +28,7 @@ use crate::subgraph::spec::FEDERATION_V2_DIRECTIVE_NAMES; use crate::subgraph::spec::KEY_DIRECTIVE_NAME; use crate::subgraph::spec::SERVICE_SDL_QUERY; use crate::subgraph::spec::SERVICE_TYPE; +use crate::ValidFederationSubgraph; mod database; pub mod spec; @@ -327,6 +328,16 @@ impl std::fmt::Debug for ValidSubgraph { } } +impl From for ValidSubgraph { + fn from(value: ValidFederationSubgraph) -> Self { + Self { + name: value.name, + url: value.url, + schema: value.schema.schema().clone(), + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/apollo-federation/src/subgraph/spec.rs b/apollo-federation/src/subgraph/spec.rs index 9c3aa1ebd7..e5fd352a8c 100644 --- a/apollo-federation/src/subgraph/spec.rs +++ b/apollo-federation/src/subgraph/spec.rs @@ -48,6 +48,8 @@ pub const PROVIDES_DIRECTIVE_NAME: Name = name!("provides"); pub const REQUIRES_DIRECTIVE_NAME: Name = name!("requires"); pub const SHAREABLE_DIRECTIVE_NAME: Name = name!("shareable"); pub const TAG_DIRECTIVE_NAME: Name = name!("tag"); +pub const CONTEXT_DIRECTIVE_NAME: Name = name!("context"); +pub const FROM_CONTEXT_DIRECTIVE_NAME: Name = name!("fromContext"); pub const FIELDSET_SCALAR_NAME: Name = name!("FieldSet"); // federated types diff --git a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs b/apollo-federation/src/supergraph/mod.rs similarity index 85% rename from apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs rename to apollo-federation/src/supergraph/mod.rs index ef354eaef1..66d6078871 100644 --- a/apollo-federation/src/query_graph/extract_subgraphs_from_supergraph.rs +++ b/apollo-federation/src/supergraph/mod.rs @@ -1,9 +1,12 @@ -use std::collections::BTreeMap; -use std::fmt; +mod schema; +mod subgraph; + use std::fmt::Write; use std::ops::Deref; use std::sync::Arc; +use apollo_compiler::ast::Argument; +use apollo_compiler::ast::Directive; use apollo_compiler::ast::FieldDefinition; use apollo_compiler::collections::IndexMap; use apollo_compiler::collections::IndexSet; @@ -25,18 +28,25 @@ use apollo_compiler::schema::InterfaceType; use apollo_compiler::schema::NamedType; use apollo_compiler::schema::ObjectType; use apollo_compiler::schema::ScalarType; -use apollo_compiler::schema::SchemaBuilder; use apollo_compiler::schema::Type; use apollo_compiler::schema::UnionType; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Node; +use itertools::Itertools; use lazy_static::lazy_static; use time::OffsetDateTime; +use self::schema::get_apollo_directive_names; +pub(crate) use self::schema::new_empty_fed_2_subgraph_schema; +use self::subgraph::FederationSubgraph; +use self::subgraph::FederationSubgraphs; +pub use self::subgraph::ValidFederationSubgraph; +pub use self::subgraph::ValidFederationSubgraphs; use crate::error::FederationError; use crate::error::MultipleFederationErrors; use crate::error::SingleFederationError; +use crate::link::cost_spec_definition::CostSpecDefinition; use crate::link::federation_spec_definition::get_federation_spec_definition_from_subgraph; use crate::link::federation_spec_definition::FederationSpecDefinition; use crate::link::federation_spec_definition::FEDERATION_VERSIONS; @@ -46,6 +56,7 @@ use crate::link::join_spec_definition::TypeDirectiveArguments; use crate::link::spec::Identity; use crate::link::spec::Version; use crate::link::spec_definition::SpecDefinition; +use crate::link::DEFAULT_LINK_NAME; use crate::schema::field_set::parse_field_set_without_normalization; use crate::schema::position::is_graphql_reserved_name; use crate::schema::position::CompositeTypeDefinitionPosition; @@ -69,7 +80,6 @@ use crate::schema::type_and_directive_specification::ScalarTypeSpecification; use crate::schema::type_and_directive_specification::TypeAndDirectiveSpecification; use crate::schema::type_and_directive_specification::UnionTypeSpecification; use crate::schema::FederationSchema; -use crate::schema::ValidFederationSchema; /// Assumes the given schema has been validated. /// @@ -223,69 +233,9 @@ fn collect_empty_subgraphs( )) } -/// TODO: Use the JS/programmatic approach instead of hard-coding definitions. -pub(crate) fn new_empty_fed_2_subgraph_schema() -> Result { - let builder = SchemaBuilder::new().adopt_orphan_extensions(); - let builder = builder.parse( - r#" - extend schema - @link(url: "https://specs.apollo.dev/link/v1.0") - @link(url: "https://specs.apollo.dev/federation/v2.5") - - directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA - - scalar link__Import - - enum link__Purpose { - """ - \`SECURITY\` features provide metadata necessary to securely resolve fields. - """ - SECURITY - - """ - \`EXECUTION\` features provide metadata necessary for operation execution. - """ - EXECUTION - } - - directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE - - directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION - - directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION - - directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION - - directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA - - directive @federation__extends on OBJECT | INTERFACE - - directive @federation__shareable on OBJECT | FIELD_DEFINITION - - directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION - - directive @federation__override(from: String!, label: String) on FIELD_DEFINITION - - directive @federation__composeDirective(name: String) repeatable on SCHEMA - - directive @federation__interfaceObject on OBJECT - - directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM - - directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM - - scalar federation__FieldSet - - scalar federation__Scope - "#, - "subgraph.graphql", - ); - FederationSchema::new(builder.build()?) -} - struct TypeInfo { name: NamedType, - // HashMap + // IndexMap subgraph_info: IndexMap, } @@ -305,6 +255,8 @@ fn extract_subgraphs_from_fed_2_supergraph( join_spec_definition: &'static JoinSpecDefinition, filtered_types: &Vec, ) -> Result<(), FederationError> { + let original_directive_names = get_apollo_directive_names(supergraph_schema)?; + let TypeInfos { object_types, interface_types, @@ -318,6 +270,7 @@ fn extract_subgraphs_from_fed_2_supergraph( federation_spec_definitions, join_spec_definition, filtered_types, + &original_directive_names, )?; extract_object_type_content( @@ -327,6 +280,7 @@ fn extract_subgraphs_from_fed_2_supergraph( federation_spec_definitions, join_spec_definition, &object_types, + &original_directive_names, )?; extract_interface_type_content( supergraph_schema, @@ -335,6 +289,7 @@ fn extract_subgraphs_from_fed_2_supergraph( federation_spec_definitions, join_spec_definition, &interface_types, + &original_directive_names, )?; extract_union_type_content( supergraph_schema, @@ -347,15 +302,25 @@ fn extract_subgraphs_from_fed_2_supergraph( supergraph_schema, subgraphs, graph_enum_value_name_to_subgraph_name, + federation_spec_definitions, join_spec_definition, &enum_types, + &original_directive_names, )?; extract_input_object_type_content( supergraph_schema, subgraphs, graph_enum_value_name_to_subgraph_name, + federation_spec_definitions, join_spec_definition, &input_object_types, + &original_directive_names, + )?; + + extract_join_directives( + supergraph_schema, + subgraphs, + graph_enum_value_name_to_subgraph_name, )?; // We add all the "executable" directive definitions from the supergraph to each subgraphs, as @@ -401,7 +366,10 @@ fn extract_subgraphs_from_fed_2_supergraph( }) .collect::>(); for subgraph in subgraphs.subgraphs.values_mut() { - remove_inactive_requires_and_provides_from_subgraph(&mut subgraph.schema)?; + remove_inactive_requires_and_provides_from_subgraph( + supergraph_schema, + &mut subgraph.schema, + )?; remove_unused_types_from_subgraph(&mut subgraph.schema)?; for definition in all_executable_directive_definitions.iter() { let pos = DirectiveDefinitionPosition { @@ -422,6 +390,7 @@ fn add_all_empty_subgraph_types( federation_spec_definitions: &IndexMap, join_spec_definition: &'static JoinSpecDefinition, filtered_types: &Vec, + original_directive_names: &IndexMap, ) -> Result { let type_directive_definition = join_spec_definition.type_directive_definition(supergraph_schema)?; @@ -451,6 +420,13 @@ fn add_all_empty_subgraph_types( graph_enum_value_name_to_subgraph_name, &type_directive_application.graph, )?; + let federation_spec_definition = federation_spec_definitions + .get(&type_directive_application.graph) + .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { + message: "Subgraph unexpectedly does not use federation spec" + .to_owned(), + })?; + pos.pre_insert(&mut subgraph.schema)?; pos.insert( &mut subgraph.schema, @@ -460,6 +436,17 @@ fn add_all_empty_subgraph_types( directives: Default::default(), }), )?; + + if let Some(cost_spec_definition) = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema) + { + cost_spec_definition.propagate_demand_control_directives_for_scalar( + &mut subgraph.schema, + pos.get(supergraph_schema.schema())?, + pos, + original_directive_names, + )?; + } } None } @@ -705,6 +692,7 @@ fn extract_object_type_content( federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -754,6 +742,29 @@ fn extract_object_type_content( )?; } + for graph_enum_value in subgraph_info.keys() { + let subgraph = get_subgraph( + subgraphs, + graph_enum_value_name_to_subgraph_name, + graph_enum_value, + )?; + let federation_spec_definition = federation_spec_definitions + .get(graph_enum_value) + .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { + message: "Subgraph unexpectedly does not use federation spec".to_owned(), + })?; + if let Some(cost_spec_definition) = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema) + { + cost_spec_definition.propagate_demand_control_directives_for_object( + &mut subgraph.schema, + type_, + &pos, + original_directive_names, + )?; + } + } + for (field_name, field) in type_.fields.iter() { let field_pos = pos.field(field_name.clone()); let mut field_directive_applications = Vec::new(); @@ -777,6 +788,8 @@ fn extract_object_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); add_subgraph_field( field_pos.clone().into(), field, @@ -784,6 +797,8 @@ fn extract_object_type_content( federation_spec_definition, is_shareable, None, + cost_spec_definition, + original_directive_names, )?; } } else { @@ -814,6 +829,8 @@ fn extract_object_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); if !subgraph_info.contains_key(graph_enum_value) { return Err( SingleFederationError::InvalidFederationSupergraph { @@ -833,6 +850,8 @@ fn extract_object_type_content( federation_spec_definition, is_shareable, Some(field_directive_application), + cost_spec_definition, + original_directive_names, )?; } } @@ -849,6 +868,7 @@ fn extract_interface_type_content( federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -971,6 +991,8 @@ fn extract_interface_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); add_subgraph_field( pos.field(field_name.clone()), field, @@ -978,6 +1000,8 @@ fn extract_interface_type_content( federation_spec_definition, false, None, + cost_spec_definition, + original_directive_names, )?; } } else { @@ -1001,6 +1025,8 @@ fn extract_interface_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); if !subgraph_info.contains_key(graph_enum_value) { return Err( SingleFederationError::InvalidFederationSupergraph { @@ -1020,6 +1046,8 @@ fn extract_interface_type_content( federation_spec_definition, false, Some(field_directive_application), + cost_spec_definition, + original_directive_names, )?; } } @@ -1125,8 +1153,10 @@ fn extract_enum_type_content( supergraph_schema: &FederationSchema, subgraphs: &mut FederationSubgraphs, graph_enum_value_name_to_subgraph_name: &IndexMap>, + federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { // This was added in join 0.3, so it can genuinely be None. let enum_value_directive_definition = @@ -1142,6 +1172,29 @@ fn extract_enum_type_content( }; let type_ = pos.get(supergraph_schema.schema())?; + for graph_enum_value in subgraph_info.keys() { + let subgraph = get_subgraph( + subgraphs, + graph_enum_value_name_to_subgraph_name, + graph_enum_value, + )?; + let federation_spec_definition = federation_spec_definitions + .get(graph_enum_value) + .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { + message: "Subgraph unexpectedly does not use federation spec".to_owned(), + })?; + if let Some(cost_spec_definition) = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema) + { + cost_spec_definition.propagate_demand_control_directives_for_enum( + &mut subgraph.schema, + type_, + &pos, + original_directive_names, + )?; + } + } + for (value_name, value) in type_.values.iter() { let value_pos = pos.value(value_name.clone()); let mut enum_value_directive_applications = Vec::new(); @@ -1209,8 +1262,10 @@ fn extract_input_object_type_content( supergraph_schema: &FederationSchema, subgraphs: &mut FederationSubgraphs, graph_enum_value_name_to_subgraph_name: &IndexMap>, + federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -1242,7 +1297,22 @@ fn extract_input_object_type_content( graph_enum_value_name_to_subgraph_name, graph_enum_value, )?; - add_subgraph_input_field(input_field_pos.clone(), input_field, subgraph, None)?; + let federation_spec_definition = federation_spec_definitions + .get(graph_enum_value) + .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { + message: "Subgraph unexpectedly does not use federation spec" + .to_owned(), + })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); + add_subgraph_input_field( + input_field_pos.clone(), + input_field, + subgraph, + None, + cost_spec_definition, + original_directive_names, + )?; } } else { for field_directive_application in &field_directive_applications { @@ -1257,6 +1327,14 @@ fn extract_input_object_type_content( graph_enum_value_name_to_subgraph_name, graph_enum_value, )?; + let federation_spec_definition = federation_spec_definitions + .get(graph_enum_value) + .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { + message: "Subgraph unexpectedly does not use federation spec" + .to_owned(), + })?; + let cost_spec_definition = + federation_spec_definition.get_cost_spec_definition(&subgraph.schema); if !subgraph_info.contains_key(graph_enum_value) { return Err( SingleFederationError::InvalidFederationSupergraph { @@ -1274,6 +1352,8 @@ fn extract_input_object_type_content( input_field, subgraph, Some(field_directive_application), + cost_spec_definition, + original_directive_names, )?; } } @@ -1283,6 +1363,7 @@ fn extract_input_object_type_content( Ok(()) } +#[allow(clippy::too_many_arguments)] fn add_subgraph_field( object_or_interface_field_definition_position: ObjectOrInterfaceFieldDefinitionPosition, field: &FieldDefinition, @@ -1290,6 +1371,8 @@ fn add_subgraph_field( federation_spec_definition: &'static FederationSpecDefinition, is_shareable: bool, field_directive_application: Option<&FieldDirectiveArguments>, + cost_spec_definition: Option<&'static CostSpecDefinition>, + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_application = field_directive_application.unwrap_or_else(|| &FieldDirectiveArguments { @@ -1317,15 +1400,25 @@ fn add_subgraph_field( }; for argument in &field.arguments { + let mut destination_argument = InputValueDefinition { + description: None, + name: argument.name.clone(), + ty: argument.ty.clone(), + default_value: argument.default_value.clone(), + directives: Default::default(), + }; + if let Some(cost_spec_definition) = cost_spec_definition { + cost_spec_definition.propagate_demand_control_directives( + &subgraph.schema, + &argument.directives, + &mut destination_argument.directives, + original_directive_names, + )?; + } + subgraph_field .arguments - .push(Node::new(InputValueDefinition { - description: None, - name: argument.name.clone(), - ty: argument.ty.clone(), - default_value: argument.default_value.clone(), - directives: Default::default(), - })) + .push(Node::new(destination_argument)) } if let Some(requires) = &field_directive_application.requires { subgraph_field.directives.push(Node::new( @@ -1367,6 +1460,15 @@ fn add_subgraph_field( )); } + if let Some(cost_spec_definition) = cost_spec_definition { + cost_spec_definition.propagate_demand_control_directives( + &subgraph.schema, + &field.directives, + &mut subgraph_field.directives, + original_directive_names, + )?; + } + match object_or_interface_field_definition_position { ObjectOrInterfaceFieldDefinitionPosition::Object(pos) => { pos.insert(&mut subgraph.schema, Component::from(subgraph_field))?; @@ -1384,6 +1486,8 @@ fn add_subgraph_input_field( input_field: &InputValueDefinition, subgraph: &mut FederationSubgraph, field_directive_application: Option<&FieldDirectiveArguments>, + cost_spec_definition: Option<&'static CostSpecDefinition>, + original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_application = field_directive_application.unwrap_or_else(|| &FieldDirectiveArguments { @@ -1400,7 +1504,7 @@ fn add_subgraph_input_field( Some(t) => Node::new(decode_type(t)?), None => input_field.ty.clone(), }; - let subgraph_input_field = InputValueDefinition { + let mut subgraph_input_field = InputValueDefinition { description: None, name: input_object_field_definition_position.field_name.clone(), ty: subgraph_input_field_type, @@ -1408,6 +1512,15 @@ fn add_subgraph_input_field( directives: Default::default(), }; + if let Some(cost_spec_definition) = cost_spec_definition { + cost_spec_definition.propagate_demand_control_directives( + &subgraph.schema, + &input_field.directives, + &mut subgraph_input_field.directives, + original_directive_names, + )?; + } + input_object_field_definition_position .insert(&mut subgraph.schema, Component::from(subgraph_input_field))?; @@ -1443,105 +1556,6 @@ fn get_subgraph<'subgraph>( }) } -struct FederationSubgraph { - name: String, - url: String, - schema: FederationSchema, -} - -struct FederationSubgraphs { - subgraphs: BTreeMap, -} - -impl FederationSubgraphs { - fn new() -> Self { - FederationSubgraphs { - subgraphs: BTreeMap::new(), - } - } - - fn add(&mut self, subgraph: FederationSubgraph) -> Result<(), FederationError> { - if self.subgraphs.contains_key(&subgraph.name) { - return Err(SingleFederationError::InvalidFederationSupergraph { - message: format!("A subgraph named \"{}\" already exists", subgraph.name), - } - .into()); - } - self.subgraphs.insert(subgraph.name.clone(), subgraph); - Ok(()) - } - - fn get(&self, name: &str) -> Option<&FederationSubgraph> { - self.subgraphs.get(name) - } - - fn get_mut(&mut self, name: &str) -> Option<&mut FederationSubgraph> { - self.subgraphs.get_mut(name) - } -} - -impl IntoIterator for FederationSubgraphs { - type Item = as IntoIterator>::Item; - type IntoIter = as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.subgraphs.into_iter() - } -} - -// TODO(@goto-bus-stop): consider an appropriate name for this in the public API -// TODO(@goto-bus-stop): should this exist separately from the `crate::subgraph::Subgraph` type? -#[derive(Debug)] -pub struct ValidFederationSubgraph { - pub name: String, - pub url: String, - pub schema: ValidFederationSchema, -} - -pub struct ValidFederationSubgraphs { - subgraphs: BTreeMap, ValidFederationSubgraph>, -} - -impl fmt::Debug for ValidFederationSubgraphs { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("ValidFederationSubgraphs ")?; - f.debug_map().entries(self.subgraphs.iter()).finish() - } -} - -impl ValidFederationSubgraphs { - pub(crate) fn new() -> Self { - ValidFederationSubgraphs { - subgraphs: BTreeMap::new(), - } - } - - pub(crate) fn add(&mut self, subgraph: ValidFederationSubgraph) -> Result<(), FederationError> { - if self.subgraphs.contains_key(subgraph.name.as_str()) { - return Err(SingleFederationError::InvalidFederationSupergraph { - message: format!("A subgraph named \"{}\" already exists", subgraph.name), - } - .into()); - } - self.subgraphs - .insert(subgraph.name.as_str().into(), subgraph); - Ok(()) - } - - pub fn get(&self, name: &str) -> Option<&ValidFederationSubgraph> { - self.subgraphs.get(name) - } -} - -impl IntoIterator for ValidFederationSubgraphs { - type Item = , ValidFederationSubgraph> as IntoIterator>::Item; - type IntoIter = , ValidFederationSubgraph> as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.subgraphs.into_iter() - } -} - lazy_static! { static ref EXECUTABLE_DIRECTIVE_LOCATIONS: IndexSet = { [ @@ -1787,6 +1801,7 @@ fn add_federation_operations( /// unnecessarily. Besides, if a usage adds something useless, there is a chance it hasn't fully /// understood something, and warning about that fact through an error is more helpful. fn remove_inactive_requires_and_provides_from_subgraph( + supergraph_schema: &FederationSchema, schema: &mut FederationSchema, ) -> Result<(), FederationError> { let federation_spec_definition = get_federation_spec_definition_from_subgraph(schema)?; @@ -1838,6 +1853,7 @@ fn remove_inactive_requires_and_provides_from_subgraph( for pos in object_or_interface_field_definition_positions { remove_inactive_applications( + supergraph_schema, schema, federation_spec_definition, FieldSetDirectiveKind::Requires, @@ -1845,6 +1861,7 @@ fn remove_inactive_requires_and_provides_from_subgraph( pos.clone(), )?; remove_inactive_applications( + supergraph_schema, schema, federation_spec_definition, FieldSetDirectiveKind::Provides, @@ -1862,6 +1879,7 @@ enum FieldSetDirectiveKind { } fn remove_inactive_applications( + supergraph_schema: &FederationSchema, schema: &mut FederationSchema, federation_spec_definition: &'static FederationSpecDefinition, directive_kind: FieldSetDirectiveKind, @@ -1871,7 +1889,7 @@ fn remove_inactive_applications( let mut replacement_directives = Vec::new(); let field = object_or_interface_field_definition_position.get(schema.schema())?; for directive in field.directives.get_all(name_in_schema) { - let (fields, parent_type_pos) = match directive_kind { + let (fields, parent_type_pos, target_schema) = match directive_kind { FieldSetDirectiveKind::Provides => { let fields = federation_spec_definition .provides_directive_arguments(directive)? @@ -1879,7 +1897,7 @@ fn remove_inactive_applications( let parent_type_pos: CompositeTypeDefinitionPosition = schema .get_type(field.ty.inner_named_type().clone())? .try_into()?; - (fields, parent_type_pos) + (fields, parent_type_pos, schema.schema()) } FieldSetDirectiveKind::Requires => { let fields = federation_spec_definition @@ -1890,7 +1908,8 @@ fn remove_inactive_applications( .parent() .clone() .into(); - (fields, parent_type_pos) + // @requires needs to be validated against the supergraph schema + (fields, parent_type_pos, supergraph_schema.schema()) } }; // TODO: The assume_valid_ref() here is non-ideal, in the sense that the error messages we @@ -1900,7 +1919,7 @@ fn remove_inactive_applications( // At best, we could try to shift this computation to after the subgraph schema validation // step, but its unclear at this time whether performing this shift affects correctness (and // it takes time to determine that). So for now, we keep this here. - let valid_schema = Valid::assume_valid_ref(schema.schema()); + let valid_schema = Valid::assume_valid_ref(target_schema); // TODO: In the JS codebase, this function ends up getting additionally used in the schema // upgrader, where parsing the field set may error. In such cases, we end up skipping those // directives instead of returning error here, as it pollutes the list of error messages @@ -2094,10 +2113,181 @@ fn maybe_dump_subgraph_schema(subgraph: FederationSubgraph, message: &mut String }; } +//////////////////////////////////////////////////////////////////////////////// +/// @join__directive extraction + +static JOIN_DIRECTIVE: &str = "join__directive"; + +/// Converts `@join__directive(graphs: [A], name: "foo")` to `@foo` in the A subgraph. +/// If the directive is a link directive on the schema definition, we also need +/// to update the metadata and add the imported definitions. +fn extract_join_directives( + supergraph_schema: &FederationSchema, + subgraphs: &mut FederationSubgraphs, + graph_enum_value_name_to_subgraph_name: &IndexMap>, +) -> Result<(), FederationError> { + let join_directives = match supergraph_schema + .referencers() + .get_directive(JOIN_DIRECTIVE) + { + Ok(directives) => directives, + Err(_) => { + // No join directives found, nothing to do. + return Ok(()); + } + }; + + if let Some(schema_def_pos) = &join_directives.schema { + let schema_def = schema_def_pos.get(supergraph_schema.schema()); + let directives = schema_def + .directives + .iter() + .filter_map(|d| { + if d.name == JOIN_DIRECTIVE { + Some(join_directive_to_real_directive(d)) + } else { + None + } + }) + .collect_vec(); + + // TODO: Do we need to handle the link directive being renamed? + let (links, others) = directives + .into_iter() + .partition::, _>(|(d, _)| d.name == DEFAULT_LINK_NAME); + + // After adding links, we'll check the link against a safelist of + // specs and check_or_add the spec definitions if necessary. + for (link_directive, subgraph_enum_values) in links { + for subgraph_enum_value in subgraph_enum_values { + let subgraph = get_subgraph( + subgraphs, + graph_enum_value_name_to_subgraph_name, + &subgraph_enum_value, + )?; + + schema_def_pos.insert_directive( + &mut subgraph.schema, + Component::new(link_directive.clone()), + )?; + + // TODO: add imported definitions from relevant specs + } + } + + // Other directives are added normally. + for (directive, subgraph_enum_values) in others { + for subgraph_enum_value in subgraph_enum_values { + let subgraph = get_subgraph( + subgraphs, + graph_enum_value_name_to_subgraph_name, + &subgraph_enum_value, + )?; + + schema_def_pos + .insert_directive(&mut subgraph.schema, Component::new(directive.clone()))?; + } + } + } + + for object_field_pos in &join_directives.object_fields { + let object_field = object_field_pos.get(supergraph_schema.schema())?; + let directives = object_field + .directives + .iter() + .filter_map(|d| { + if d.name == JOIN_DIRECTIVE { + Some(join_directive_to_real_directive(d)) + } else { + None + } + }) + .collect_vec(); + + for (directive, subgraph_enum_values) in directives { + for subgraph_enum_value in subgraph_enum_values { + let subgraph = get_subgraph( + subgraphs, + graph_enum_value_name_to_subgraph_name, + &subgraph_enum_value, + )?; + + object_field_pos + .insert_directive(&mut subgraph.schema, Node::new(directive.clone()))?; + } + } + } + + // TODO + // - join_directives.directive_arguments + // - join_directives.enum_types + // - join_directives.enum_values + // - join_directives.input_object_fields + // - join_directives.input_object_types + // - join_directives.interface_field_arguments + // - join_directives.interface_fields + // - join_directives.interface_types + // - join_directives.object_field_arguments + // - join_directives.object_types + // - join_directives.scalar_types + // - join_directives.union_types + + Ok(()) +} + +fn join_directive_to_real_directive(directive: &Node) -> (Directive, Vec) { + let subgraph_enum_values = directive + .argument_by_name("graphs") + .and_then(|arg| arg.as_list()) + .map(|list| { + list.iter() + .map(|node| { + Name::new( + node.as_enum() + .expect("join__directive(graphs:) value is an enum") + .as_str(), + ) + .expect("join__directive(graphs:) value is a valid name") + }) + .collect() + }) + .expect("join__directive(graphs:) missing"); + + let name = directive + .argument_by_name("name") + .expect("join__directive(name:) is present") + .as_str() + .expect("join__directive(name:) is a string"); + + let arguments = directive + .argument_by_name("args") + .and_then(|a| a.as_object()) + .map(|args| { + args.iter() + .map(|(k, v)| { + Argument { + name: k.clone(), + value: v.clone(), + } + .into() + }) + .collect() + }) + .unwrap_or_default(); + + let directive = Directive { + name: Name::new(name).expect("join__directive(name:) is a valid name"), + arguments, + }; + + (directive, subgraph_enum_values) +} + #[cfg(test)] mod tests { use apollo_compiler::name; use apollo_compiler::Schema; + use insta::assert_snapshot; use crate::schema::FederationSchema; use crate::ValidFederationSubgraphs; @@ -2709,4 +2899,79 @@ mod tests { let user_type = subgraph.schema.schema().get_object("User"); assert!(user_type.is_none()); } + + #[test] + fn test_join_directives() { + let supergraph = r###"schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @join__directive(graphs: [SUBGRAPH], name: "link", args: {url: "https://specs.apollo.dev/hello/v0.1", import: ["@hello"]}) + { + query: Query + } + + directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! + } + + scalar join__DirectiveArguments + + scalar join__FieldSet + + scalar join__FieldValue + + enum join__Graph { + SUBGRAPH @join__graph(name: "subgraph", url: "none") + } + + scalar link__Import + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + type Query + @join__type(graph: SUBGRAPH) + { + f: String + } + "###; + + let schema = Schema::parse(supergraph, "supergraph.graphql").unwrap(); + let ValidFederationSubgraphs { subgraphs } = super::extract_subgraphs_from_supergraph( + &FederationSchema::new(schema).unwrap(), + Some(true), + ) + .unwrap(); + + let subgraph = subgraphs.get("subgraph").unwrap(); + assert_snapshot!(subgraph.schema.schema().schema_definition.directives, @r###" @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") @link(url: "https://specs.apollo.dev/hello/v0.1", import: ["@hello"])"###); + } } diff --git a/apollo-federation/src/supergraph/schema.rs b/apollo-federation/src/supergraph/schema.rs new file mode 100644 index 0000000000..589131f633 --- /dev/null +++ b/apollo-federation/src/supergraph/schema.rs @@ -0,0 +1,109 @@ +use apollo_compiler::collections::IndexMap; +use apollo_compiler::schema::SchemaBuilder; +use apollo_compiler::Name; + +use crate::error::FederationError; +use crate::link::spec::APOLLO_SPEC_DOMAIN; +use crate::link::Link; +use crate::schema::FederationSchema; + +/// Builds a map of original name to new name for Apollo feature directives. This is +/// used to handle cases where a directive is renamed via an import statement. For +/// example, importing a directive with a custom name like +/// ```graphql +/// @link(url: "https://specs.apollo.dev/cost/v0.1", import: [{ name: "@cost", as: "@renamedCost" }]) +/// ``` +/// results in a map entry of `cost -> renamedCost` with the `@` prefix removed. +/// +/// If the directive is imported under its default name, that also results in an entry. So, +/// ```graphql +/// @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@cost"]) +/// ``` +/// results in a map entry of `cost -> cost`. This duals as a way to check if a directive +/// is included in the supergraph schema. +/// +/// **Important:** This map does _not_ include directives imported from identities other +/// than `specs.apollo.dev`. This helps us avoid extracting directives to subgraphs +/// when a custom directive's name conflicts with that of a default one. +pub(super) fn get_apollo_directive_names( + supergraph_schema: &FederationSchema, +) -> Result, FederationError> { + let mut hm: IndexMap = IndexMap::default(); + for directive in &supergraph_schema.schema().schema_definition.directives { + if directive.name.as_str() == "link" { + if let Ok(link) = Link::from_directive_application(directive) { + if link.url.identity.domain != APOLLO_SPEC_DOMAIN { + continue; + } + for import in link.imports { + hm.insert(import.element.clone(), import.imported_name().clone()); + } + } + } + } + Ok(hm) +} + +/// TODO: Use the JS/programmatic approach instead of hard-coding definitions. +pub(crate) fn new_empty_fed_2_subgraph_schema() -> Result { + let builder = SchemaBuilder::new().adopt_orphan_extensions(); + let builder = builder.parse( + r#" + extend schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/federation/v2.9") + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + scalar link__Import + + enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + + directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + + directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + + directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + + directive @federation__extends on OBJECT | INTERFACE + + directive @federation__shareable on OBJECT | FIELD_DEFINITION + + directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + + directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + + directive @federation__composeDirective(name: String) repeatable on SCHEMA + + directive @federation__interfaceObject on OBJECT + + directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + + directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + + directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + + directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + + scalar federation__FieldSet + + scalar federation__Scope + "#, + "subgraph.graphql", + ); + FederationSchema::new(builder.build()?) +} diff --git a/apollo-federation/src/supergraph/subgraph.rs b/apollo-federation/src/supergraph/subgraph.rs new file mode 100644 index 0000000000..7697d3b569 --- /dev/null +++ b/apollo-federation/src/supergraph/subgraph.rs @@ -0,0 +1,107 @@ +use std::collections::BTreeMap; +use std::fmt; +use std::sync::Arc; + +use crate::error::FederationError; +use crate::error::SingleFederationError; +use crate::schema::FederationSchema; +use crate::schema::ValidFederationSchema; + +pub(super) struct FederationSubgraph { + pub(super) name: String, + pub(super) url: String, + pub(super) schema: FederationSchema, +} + +pub(super) struct FederationSubgraphs { + pub(super) subgraphs: BTreeMap, +} + +impl FederationSubgraphs { + pub(super) fn new() -> Self { + FederationSubgraphs { + subgraphs: BTreeMap::new(), + } + } + + pub(super) fn add(&mut self, subgraph: FederationSubgraph) -> Result<(), FederationError> { + if self.subgraphs.contains_key(&subgraph.name) { + return Err(SingleFederationError::InvalidFederationSupergraph { + message: format!("A subgraph named \"{}\" already exists", subgraph.name), + } + .into()); + } + self.subgraphs.insert(subgraph.name.clone(), subgraph); + Ok(()) + } + + fn get(&self, name: &str) -> Option<&FederationSubgraph> { + self.subgraphs.get(name) + } + + pub(super) fn get_mut(&mut self, name: &str) -> Option<&mut FederationSubgraph> { + self.subgraphs.get_mut(name) + } +} + +impl IntoIterator for FederationSubgraphs { + type Item = as IntoIterator>::Item; + type IntoIter = as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.subgraphs.into_iter() + } +} + +// TODO(@goto-bus-stop): consider an appropriate name for this in the public API +// TODO(@goto-bus-stop): should this exist separately from the `crate::subgraph::Subgraph` type? +#[derive(Debug, Clone)] +pub struct ValidFederationSubgraph { + pub name: String, + pub url: String, + pub schema: ValidFederationSchema, +} + +pub struct ValidFederationSubgraphs { + pub(super) subgraphs: BTreeMap, ValidFederationSubgraph>, +} + +impl fmt::Debug for ValidFederationSubgraphs { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("ValidFederationSubgraphs ")?; + f.debug_map().entries(self.subgraphs.iter()).finish() + } +} + +impl ValidFederationSubgraphs { + pub(crate) fn new() -> Self { + ValidFederationSubgraphs { + subgraphs: BTreeMap::new(), + } + } + + pub(crate) fn add(&mut self, subgraph: ValidFederationSubgraph) -> Result<(), FederationError> { + if self.subgraphs.contains_key(subgraph.name.as_str()) { + return Err(SingleFederationError::InvalidFederationSupergraph { + message: format!("A subgraph named \"{}\" already exists", subgraph.name), + } + .into()); + } + self.subgraphs + .insert(subgraph.name.as_str().into(), subgraph); + Ok(()) + } + + pub fn get(&self, name: &str) -> Option<&ValidFederationSubgraph> { + self.subgraphs.get(name) + } +} + +impl IntoIterator for ValidFederationSubgraphs { + type Item = , ValidFederationSubgraph> as IntoIterator>::Item; + type IntoIter = , ValidFederationSubgraph> as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.subgraphs.into_iter() + } +} diff --git a/apollo-federation/src/utils/logging.rs b/apollo-federation/src/utils/logging.rs index fd7bb4b3d2..c7a07c2ef2 100644 --- a/apollo-federation/src/utils/logging.rs +++ b/apollo-federation/src/utils/logging.rs @@ -25,7 +25,7 @@ macro_rules! snapshot { #[cfg(feature = "snapshot_tracing")] tracing::trace!( snapshot = std::any::type_name_of_val(&$value), - data = serde_json::to_string(&$value).expect(concat!( + data = ron::ser::to_string(&$value).expect(concat!( "Could not serialize value for a snapshot with message: ", $msg )), @@ -36,7 +36,7 @@ macro_rules! snapshot { #[cfg(feature = "snapshot_tracing")] tracing::trace!( snapshot = std::any::type_name_of_val(&$value), - data = serde_json::to_string(&$value).expect(concat!( + data = ron::ser::to_string(&$value).expect(concat!( "Could not serialize value for a snapshot with message: ", $msg )), diff --git a/apollo-federation/tests/extract_subgraphs.rs b/apollo-federation/tests/extract_subgraphs.rs index 51a505345f..2148185184 100644 --- a/apollo-federation/tests/extract_subgraphs.rs +++ b/apollo-federation/tests/extract_subgraphs.rs @@ -255,3 +255,445 @@ fn erase_empty_types_due_to_overridden_fields() { .schema(); assert!(!b.types.contains_key("User")); } + +#[test] +fn extracts_demand_control_directives() { + let subgraphs = Supergraph::new(r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@cost", "@listSize"]) + { + query: Query + } + + directive @cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + + directive @cost__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + + directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + + enum AorB + @join__type(graph: SUBGRAPHWITHCOST) + @cost(weight: 15) + { + A @join__enumValue(graph: SUBGRAPHWITHCOST) + B @join__enumValue(graph: SUBGRAPHWITHCOST) + } + + scalar ExpensiveInt + @join__type(graph: SUBGRAPHWITHCOST) + @cost(weight: 30) + + type ExpensiveObject + @join__type(graph: SUBGRAPHWITHCOST) + @cost(weight: 40) + { + id: ID + } + + type HasInts + @join__type(graph: SUBGRAPHWITHLISTSIZE) + { + ints: [Int!] + } + + input InputTypeWithCost + @join__type(graph: SUBGRAPHWITHCOST) + { + somethingWithCost: Int @cost(weight: 20) + } + + input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! + } + + scalar join__DirectiveArguments + + scalar join__FieldSet + + scalar join__FieldValue + + enum join__Graph { + SUBGRAPHWITHCOST @join__graph(name: "subgraphWithCost", url: "") + SUBGRAPHWITHLISTSIZE @join__graph(name: "subgraphWithListSize", url: "") + } + + scalar link__Import + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + type Query + @join__type(graph: SUBGRAPHWITHCOST) + @join__type(graph: SUBGRAPHWITHLISTSIZE) + { + fieldWithCost: Int @join__field(graph: SUBGRAPHWITHCOST) @cost(weight: 5) + argWithCost(arg: Int @cost(weight: 10)): Int @join__field(graph: SUBGRAPHWITHCOST) + enumWithCost: AorB @join__field(graph: SUBGRAPHWITHCOST) + inputWithCost(someInput: InputTypeWithCost): Int @join__field(graph: SUBGRAPHWITHCOST) + scalarWithCost: ExpensiveInt @join__field(graph: SUBGRAPHWITHCOST) + objectWithCost: ExpensiveObject @join__field(graph: SUBGRAPHWITHCOST) + fieldWithListSize: [String!] @join__field(graph: SUBGRAPHWITHLISTSIZE) @listSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int!): HasInts @join__field(graph: SUBGRAPHWITHLISTSIZE) @listSize(slicingArguments: ["first"], sizedFields: ["ints"], requireOneSlicingArgument: true) + } + "#) + .expect("is supergraph") + .extract_subgraphs() + .expect("extracts subgraphs"); + + let mut snapshot = String::new(); + for (_name, subgraph) in subgraphs { + use std::fmt::Write; + + _ = writeln!( + &mut snapshot, + "{}\n---\n{}", + subgraph.name, + subgraph.schema.schema() + ); + } + insta::assert_snapshot!(snapshot); +} + +#[test] +fn extracts_renamed_demand_control_directives() { + let subgraphs = Supergraph::new(r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://specs.apollo.dev/cost/v0.1", import: [{name: "@cost", as: "@renamedCost"}, {name: "@listSize", as: "@renamedListSize"}]) + { + query: Query + } + + directive @cost__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + + directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @renamedCost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + + directive @renamedListSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + + enum AorB + @join__type(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 15) + { + A @join__enumValue(graph: SUBGRAPHWITHCOST) + B @join__enumValue(graph: SUBGRAPHWITHCOST) + } + + scalar ExpensiveInt + @join__type(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 30) + + type ExpensiveObject + @join__type(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 40) + { + id: ID + } + + type HasInts + @join__type(graph: SUBGRAPHWITHLISTSIZE) + { + ints: [Int!] + } + + input InputTypeWithCost + @join__type(graph: SUBGRAPHWITHCOST) + { + somethingWithCost: Int @renamedCost(weight: 20) + } + + input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! + } + + scalar join__DirectiveArguments + + scalar join__FieldSet + + scalar join__FieldValue + + enum join__Graph { + SUBGRAPHWITHCOST @join__graph(name: "subgraphWithCost", url: "") + SUBGRAPHWITHLISTSIZE @join__graph(name: "subgraphWithListSize", url: "") + } + + scalar link__Import + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + type Query + @join__type(graph: SUBGRAPHWITHCOST) + @join__type(graph: SUBGRAPHWITHLISTSIZE) + { + fieldWithCost: Int @join__field(graph: SUBGRAPHWITHCOST) @renamedCost(weight: 5) + argWithCost(arg: Int @renamedCost(weight: 10)): Int @join__field(graph: SUBGRAPHWITHCOST) + enumWithCost: AorB @join__field(graph: SUBGRAPHWITHCOST) + inputWithCost(someInput: InputTypeWithCost): Int @join__field(graph: SUBGRAPHWITHCOST) + scalarWithCost: ExpensiveInt @join__field(graph: SUBGRAPHWITHCOST) + objectWithCost: ExpensiveObject @join__field(graph: SUBGRAPHWITHCOST) + fieldWithListSize: [String!] @join__field(graph: SUBGRAPHWITHLISTSIZE) @renamedListSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int!): HasInts @join__field(graph: SUBGRAPHWITHLISTSIZE) @renamedListSize(slicingArguments: ["first"], sizedFields: ["ints"], requireOneSlicingArgument: true) + } + "#) + .expect("parses") + .extract_subgraphs() + .expect("extracts"); + + let mut snapshot = String::new(); + for (_name, subgraph) in subgraphs { + use std::fmt::Write; + + _ = writeln!( + &mut snapshot, + "{}\n---\n{}", + subgraph.name, + subgraph.schema.schema() + ); + } + insta::assert_snapshot!(snapshot); +} + +#[test] +fn does_not_extract_demand_control_directive_name_conflicts() { + let subgraphs = Supergraph::new(r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://example.com/myCustomDirective/v1.0", import: ["@cost"]) + @link(url: "https://example.com/myOtherCustomDirective/v1.0", import: ["@listSize"]) + { + query: Query + } + + directive @cost(name: String!) on FIELD_DEFINITION | SCALAR + + directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @listSize(name: String!) on FIELD_DEFINITION + + input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! + } + + scalar join__DirectiveArguments + + scalar join__FieldSet + + scalar join__FieldValue + + enum join__Graph { + SUBGRAPH_A @join__graph(name: "subgraph-a", url: "") + SUBGRAPH_B @join__graph(name: "subgraph-b", url: "") + } + + scalar link__Import + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + scalar ExpensiveInt @cost(name: "expensiveInt") + @join__type(graph: SUBGRAPH_A) + + type Query + @join__type(graph: SUBGRAPH_A) + @join__type(graph: SUBGRAPH_B) + { + a: ExpensiveInt @join__field(graph: SUBGRAPH_A) @cost(name: "cost") + b: [Int] @join__field(graph: SUBGRAPH_B) @listSize(name: "listSize") + } + "#) + .expect("parses") + .extract_subgraphs() + .expect("extracts"); + + let mut snapshot = String::new(); + for (_name, subgraph) in subgraphs { + use std::fmt::Write; + + _ = writeln!( + &mut snapshot, + "{}\n---\n{}", + subgraph.name, + subgraph.schema.schema() + ); + } + insta::assert_snapshot!(snapshot); +} + +#[test] +fn does_not_extract_renamed_demand_control_directive_name_conflicts() { + let subgraphs = Supergraph::new(r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://example.com/myCustomDirective/v1.0", import: [{name: "@cost", as: "@renamedCost"}]) + @link(url: "https://example.com/myOtherCustomDirective/v1.0", import: [{name: "@listSize", as: "@renamedListSize"}]) + { + query: Query + } + + directive @renamedCost(name: String!) on FIELD_DEFINITION | SCALAR + + directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @renamedListSize(name: String!) on FIELD_DEFINITION + + input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! + } + + scalar join__DirectiveArguments + + scalar join__FieldSet + + scalar join__FieldValue + + enum join__Graph { + SUBGRAPH_A @join__graph(name: "subgraph-a", url: "") + SUBGRAPH_B @join__graph(name: "subgraph-b", url: "") + } + + scalar link__Import + + enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION + } + + scalar ExpensiveInt @renamedCost(name: "expensiveInt") + @join__type(graph: SUBGRAPH_A) + + type Query + @join__type(graph: SUBGRAPH_A) + @join__type(graph: SUBGRAPH_B) + { + a: ExpensiveInt @join__field(graph: SUBGRAPH_A) @renamedCost(name: "cost") + b: [Int] @join__field(graph: SUBGRAPH_B) @renamedListSize(name: "listSize") + } + "#) + .expect("parses") + .extract_subgraphs() + .expect("extracts"); + + let mut snapshot = String::new(); + for (_name, subgraph) in subgraphs { + use std::fmt::Write; + + _ = writeln!( + &mut snapshot, + "{}\n---\n{}", + subgraph.name, + subgraph.schema.schema() + ); + } + insta::assert_snapshot!(snapshot); +} diff --git a/apollo-federation/tests/query_plan/build_query_plan_support.rs b/apollo-federation/tests/query_plan/build_query_plan_support.rs index 8f594ef271..ed70798f2f 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_support.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_support.rs @@ -1,8 +1,8 @@ -use std::collections::HashSet; use std::io::Read; use std::sync::Mutex; use std::sync::OnceLock; +use apollo_compiler::collections::IndexSet; use apollo_federation::query_plan::query_planner::QueryPlanner; use apollo_federation::query_plan::query_planner::QueryPlannerConfig; use apollo_federation::query_plan::FetchNode; @@ -96,7 +96,7 @@ pub(crate) fn compose( function_path: &'static str, subgraph_names_and_schemas: &[(&str, &str)], ) -> String { - let unique_names: std::collections::HashSet<_> = subgraph_names_and_schemas + let unique_names: IndexSet<_> = subgraph_names_and_schemas .iter() .map(|(name, _)| name) .collect(); @@ -127,7 +127,7 @@ pub(crate) fn compose( let prefix = "# Composed from subgraphs with hash: "; let test_name = function_path.rsplit("::").next().unwrap(); - static SEEN_TEST_NAMES: OnceLock>> = OnceLock::new(); + static SEEN_TEST_NAMES: OnceLock>> = OnceLock::new(); let new = SEEN_TEST_NAMES .get_or_init(Default::default) .lock() diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests.rs b/apollo-federation/tests/query_plan/build_query_plan_tests.rs index e849bf74c2..a5a8769c2a 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests.rs @@ -720,3 +720,197 @@ fn defer_gets_stripped_out() { ); assert_eq!(plan_one, plan_two) } + +#[test] +fn test_merging_fetches_do_not_create_cycle_in_fetch_dependency_graph() { + // This is a test for ROUTER-546 (the second part). + let planner = planner!( + S: r#" + type Query { + start: T! + } + + type T @key(fields: "id") { + id: String! + } + "#, + A: r#" + type T @key(fields: "id") { + id: String! @shareable + u: U! @shareable + } + + type U @key(fields: "id") { + id: ID! + a: String! @shareable + b: String @shareable + } + "#, + B: r#" + type T @key(fields: "id") { + id: String! @external + u: U! @shareable + } + + type U @key(fields: "id") { + id: ID! + a: String! @shareable + # Note: b is not here. + } + + # This definition is necessary. + extend type W @key(fields: "id") { + id: ID @external + } + "#, + C: r#" + extend type U @key(fields: "id") { + id: ID! @external + a: String! @external + b: String @external + w: W @requires(fields: "a b") + } + + type W @key(fields: "id") { + id: ID + y: Y + w1: Int + w2: Int + w3: Int + w4: Int + w5: Int + } + + type Y { + y1: Int + y2: Int + y3: Int + } + "#, + ); + assert_plan!( + &planner, + r#" + { + start { + u { + w { + id + w1 + w2 + w3 + w4 + w5 + y { + y1 + y2 + y3 + } + } + } + } + } + "#, + @r###" + QueryPlan { + Sequence { + Fetch(service: "S") { + { + start { + __typename + id + } + } + }, + Parallel { + Sequence { + Flatten(path: "start") { + Fetch(service: "B") { + { + ... on T { + __typename + id + } + } => + { + ... on T { + u { + __typename + id + } + } + } + }, + }, + Flatten(path: "start.u") { + Fetch(service: "A") { + { + ... on U { + __typename + id + } + } => + { + ... on U { + b + a + } + } + }, + }, + }, + Flatten(path: "start") { + Fetch(service: "A") { + { + ... on T { + __typename + id + } + } => + { + ... on T { + u { + __typename + id + b + a + } + } + } + }, + }, + }, + Flatten(path: "start.u") { + Fetch(service: "C") { + { + ... on U { + __typename + a + b + id + } + } => + { + ... on U { + w { + y { + y1 + y2 + y3 + } + id + w1 + w2 + w3 + w4 + w5 + } + } + } + }, + }, + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/debug_max_evaluated_plans_configuration.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/debug_max_evaluated_plans_configuration.rs index 690ad722a7..3147c44fa7 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/debug_max_evaluated_plans_configuration.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/debug_max_evaluated_plans_configuration.rs @@ -15,7 +15,7 @@ const SUBGRAPH: &str = r#" type Query { t: T @shareable } - + type T @key(fields: "id") @shareable { id: ID! v1: Int @@ -200,7 +200,7 @@ fn correctly_generate_plan_built_from_some_non_individually_optimal_branch_optio type Query { t: T @shareable } - + type T { x: Int @shareable } @@ -209,7 +209,7 @@ fn correctly_generate_plan_built_from_some_non_individually_optimal_branch_optio type Query { t: T @shareable } - + type T @key(fields: "id") { id: ID! } @@ -276,7 +276,7 @@ fn does_not_error_on_some_complex_fetch_group_dependencies() { type Query { me: User @shareable } - + type User { id: ID! @shareable } @@ -285,12 +285,12 @@ fn does_not_error_on_some_complex_fetch_group_dependencies() { type Query { me: User @shareable } - + type User @key(fields: "id") { id: ID! p: Props } - + type Props { id: ID! @shareable } @@ -299,29 +299,29 @@ fn does_not_error_on_some_complex_fetch_group_dependencies() { type Query { me: User @shareable } - + type User { id: ID! @shareable } - + type Props @key(fields: "id") { id: ID! v0: Int t: T } - + type T { id: ID! v1: V v2: V - + # Note: this field is not queried, but matters to the reproduction this test exists # for because it prevents some optimizations that would happen without it (namely, # without it, the planner would notice that everything after type T is guaranteed # to be local to the subgraph). user: User } - + type V { x: Int } @@ -396,7 +396,7 @@ fn does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field() { type Query { t: T } - + type T @key(fields: "otherId") { otherId: ID! } @@ -468,8 +468,6 @@ fn does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field() { } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure fn avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists() { // Each of id/v0 can have 2 options each, so that's 4 combinations. If we were to consider 2 options for each // v1 value however, that would multiple it by 2 each times, so it would 32 possibilities. We limit the number of @@ -487,7 +485,7 @@ fn avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists( type Query { t: T @shareable } - + type T @key(fields: "id") { id: ID! v0: Int @shareable @@ -497,7 +495,7 @@ fn avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists( type Query { t: T @shareable } - + type T @key(fields: "id") { id: ID! v0: Int @shareable diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs index edfdf0d972..876334fa5d 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs @@ -1,6 +1,8 @@ use apollo_federation::query_plan::query_planner::QueryPlannerConfig; const SUBGRAPH: &str = r#" + directive @custom on INLINE_FRAGMENT | FRAGMENT_SPREAD + type Query { t: T t2: T @@ -21,11 +23,9 @@ const SUBGRAPH: &str = r#" "#; #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: generate_query_fragments (https://apollographql.atlassian.net/browse/FED-76) fn it_respects_generate_query_fragments_option() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, ..Default::default() }, + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, Subgraph1: SUBGRAPH, ); assert_plan!( @@ -48,34 +48,32 @@ fn it_respects_generate_query_fragments_option() { // Note: `... on B {}` won't be replaced, since it has only one field. @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - __typename - ..._generated_onA2_0 - ... on B { - z - } + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ..._generated_onA2_0 + ... on B { + z } } - - fragment _generated_onA2_0 on A { - x - y - } - }, - } + } + + fragment _generated_onA2_0 on A { + x + y + } + }, + } "### ); } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: generate_query_fragments (https://apollographql.atlassian.net/browse/FED-76) fn it_handles_nested_fragment_generation() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, ..Default::default() }, + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, Subgraph1: SUBGRAPH, ); assert_plan!( @@ -102,43 +100,41 @@ fn it_handles_nested_fragment_generation() { // Note: `... on B {}` won't be replaced, since it has only one field. @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - __typename - ..._generated_onA3_0 - } + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ..._generated_onA3_0 } + } - fragment _generated_onA2_0 on A { - x - y - } + fragment _generated_onA2_0 on A { + x + y + } - fragment _generated_onA3_0 on A { - x - y - t { - __typename - ..._generated_onA2_0 - ... on B { - z - } + fragment _generated_onA3_0 on A { + x + y + t { + __typename + ..._generated_onA2_0 + ... on B { + z } } - }, - } - "### + } + }, + } + "### ); } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: generate_query_fragments (https://apollographql.atlassian.net/browse/FED-76) fn it_handles_fragments_with_one_non_leaf_field() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, ..Default::default() }, + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, Subgraph1: SUBGRAPH, ); @@ -158,35 +154,103 @@ fn it_handles_fragments_with_one_non_leaf_field() { } "#, @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - __typename - ..._generated_onA1_0 + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ..._generated_onA1_0 + } + } + + fragment _generated_onA1_0 on A { + t { + __typename + ... on B { + z } } + } + }, + } + "### + ); +} - fragment _generated_onA1_0 on A { - t { - __typename - ... on B { - z +#[test] +fn it_migrates_skip_include() { + let planner = planner!( + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, + Subgraph1: SUBGRAPH, + ); + assert_plan!( + &planner, + r#" + query ($var: Boolean!) { + t { + ... on A { + x + y + t { + ... on A @include(if: $var) { + x + y + } + ... on A @skip(if: $var) { + x + y + } + ... on A @custom { + x + y + } } } } - }, - } - "### + } + "#, + + // Note: `... on A @custom {}` won't be replaced, since it has a custom directive. Even + // though it also supports being used on a named fragment spread, we cannot assume that + // the behaviour is exactly the same. + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ..._generated_onA3_0 + } + } + + fragment _generated_onA3_0 on A { + x + y + t { + __typename + ... on A @include(if: $var) { + x + y + } + ... on A @skip(if: $var) { + x + y + } + ... on A @custom { + x + y + } + } + } + }, + } + "### ); } - #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: generate_query_fragments (https://apollographql.atlassian.net/browse/FED-76) fn it_identifies_and_reuses_equivalent_fragments_that_arent_identical() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, ..Default::default() }, + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, Subgraph1: SUBGRAPH, ); assert_plan!( @@ -208,35 +272,33 @@ fn it_identifies_and_reuses_equivalent_fragments_that_arent_identical() { } "#, @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - __typename - ..._generated_onA2_0 - } - t2 { - __typename - ..._generated_onA2_0 - } + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ..._generated_onA2_0 } - - fragment _generated_onA2_0 on A { - x - y + t2 { + __typename + ..._generated_onA2_0 } - }, - } + } + + fragment _generated_onA2_0 on A { + x + y + } + }, + } "### ); } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: generate_query_fragments (https://apollographql.atlassian.net/browse/FED-76) fn fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, ..Default::default() }, + config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, Subgraph1: SUBGRAPH, ); assert_plan!( @@ -258,30 +320,30 @@ fn fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment } "#, @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - __typename - ..._generated_onA2_0 - } - t2 { - __typename - ..._generated_onA2_1 - } + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + __typename + ..._generated_onA2_0 } - - fragment _generated_onA2_0 on A { - x - y + t2 { + __typename + ..._generated_onA2_1 } + } - fragment _generated_onA2_1 on A { - y - z - } - }, - } + fragment _generated_onA2_0 on A { + x + y + } + + fragment _generated_onA2_1 on A { + y + z + } + }, + } "### ); } diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs index 986c43a0d2..23508b7d02 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/handles_operations_with_directives.rs @@ -39,8 +39,6 @@ const SUBGRAPH_B: &str = r#" "#; #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure (missing directives on fetch operation) fn test_if_directives_at_the_operation_level_are_passed_down_to_subgraph_queries() { let planner = planner!( subgraphA: SUBGRAPH_A, @@ -134,8 +132,8 @@ fn test_if_directives_at_the_operation_level_are_passed_down_to_subgraph_queries insta::assert_snapshot!(b_fetch_nodes[0].operation_document, @r#" query Operation__subgraphB__1($representations: [_Any!]!) @operation { _entities(representations: $representations) { - ... on Foo { - baz @field + ... on T { + f1 @field } } } @@ -144,17 +142,18 @@ fn test_if_directives_at_the_operation_level_are_passed_down_to_subgraph_queries insta::assert_snapshot!(b_fetch_nodes[1].operation_document, @r#" query Operation__subgraphB__2($representations: [_Any!]!) @operation { _entities(representations: $representations) { - ... on T { - f1 @field + ... on Foo { + baz @field } } } "#); + // This checks a regression where the `variable_usages` included the `representations` variable. + assert_eq!(b_fetch_nodes[0].variable_usages.len(), 0); + assert_eq!(b_fetch_nodes[1].variable_usages.len(), 0); } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure (missing `mutation` keyword and operation name) fn test_if_directives_on_mutations_are_passed_down_to_subgraph_queries() { let planner = planner!( subgraphA: SUBGRAPH_A, @@ -173,7 +172,7 @@ fn test_if_directives_on_mutations_are_passed_down_to_subgraph_queries() { @r###" QueryPlan { Fetch(service: "subgraphA") { - mutation TestMutation__subgraphA__0 { + { updateFoo(bar: "something") @field { id @field bar @field @@ -198,8 +197,6 @@ fn test_if_directives_on_mutations_are_passed_down_to_subgraph_queries() { } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure (missing directives on fetch query) fn test_if_directives_with_arguments_applied_on_queries_are_ok() { let planner = planner!( Subgraph1: r#" @@ -244,8 +241,6 @@ fn test_if_directives_with_arguments_applied_on_queries_are_ok() { } #[test] -#[should_panic(expected = r#"unused variable: `$some_var`"#)] -// TODO: investigate this failure fn subgraph_query_retains_the_query_variables_used_in_the_directives_applied_to_the_query() { let planner = planner!( Subgraph1: r#" @@ -267,7 +262,15 @@ fn subgraph_query_retains_the_query_variables_used_in_the_directives_applied_to_ test } "#, - @r#""# + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + test + } + }, + } + "### ); let fetch_nodes = find_fetch_nodes_for_subgraph("Subgraph1", &plan); diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/merged_abstract_types_handling.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/merged_abstract_types_handling.rs index 31b5503f42..82eade721c 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/merged_abstract_types_handling.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/merged_abstract_types_handling.rs @@ -632,8 +632,6 @@ fn handles_spread_unions_correctly() { } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure (reverse order of parallel fetches) fn handles_case_of_key_chains_in_parallel_requires() { let planner = planner!( Subgraph1: r#" @@ -706,6 +704,22 @@ fn handles_case_of_key_chains_in_parallel_requires() { } }, Parallel { + Flatten(path: "t") { + Fetch(service: "Subgraph3") { + { + ... on T2 { + __typename + id + y + } + } => + { + ... on T2 { + z + } + } + }, + }, Sequence { Flatten(path: "t") { Fetch(service: "Subgraph2") { @@ -738,22 +752,6 @@ fn handles_case_of_key_chains_in_parallel_requires() { }, }, }, - Flatten(path: "t") { - Fetch(service: "Subgraph3") { - { - ... on T2 { - __typename - id - y - } - } => - { - ... on T2 { - z - } - } - }, - }, }, }, } diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs index 2b270770e1..bcaeac067d 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/requires.rs @@ -877,8 +877,6 @@ fn it_handles_longer_require_chain() { } #[test] -#[should_panic(expected = "snapshot assertion")] -// TODO: investigate this failure fn it_handles_complex_require_chain() { // Another "require chain" test but with more complexity as we have a require on multiple fields, some of which being // nested, and having requirements of their own. @@ -994,40 +992,6 @@ fn it_handles_complex_require_chain() { } }, Parallel { - Sequence { - Flatten(path: "t") { - Fetch(service: "Subgraph2") { - { - ... on T { - __typename - id - } - } => - { - ... on T { - inner2_required - inner1 - } - } - }, - }, - Flatten(path: "t") { - Fetch(service: "Subgraph3") { - { - ... on T { - __typename - inner2_required - id - } - } => - { - ... on T { - inner2 - } - } - }, - }, - }, Flatten(path: "t") { Fetch(service: "Subgraph7") { { @@ -1129,6 +1093,40 @@ fn it_handles_complex_require_chain() { }, }, }, + Sequence { + Flatten(path: "t") { + Fetch(service: "Subgraph2") { + { + ... on T { + __typename + id + } + } => + { + ... on T { + inner2_required + inner1 + } + } + }, + }, + Flatten(path: "t") { + Fetch(service: "Subgraph3") { + { + ... on T { + __typename + inner2_required + id + } + } => + { + ... on T { + inner2 + } + } + }, + }, + }, }, Flatten(path: "t") { Fetch(service: "Subgraph5") { @@ -1453,3 +1451,391 @@ fn it_require_of_multiple_field_when_one_is_also_a_key_to_reach_another() { "### ); } + +#[test] +fn it_handles_multiple_requires_with_multiple_fetches() { + let planner = planner!( + s1: r#" + type Query { + t: T + } + + interface I { + id: ID! + name: String! + } + + type T implements I @key(fields: "id") { + id: ID! + name: String! @shareable + x: X @shareable + v: V @shareable + } + + type U implements I @key(fields: "id") { + id: ID! + name: String! @external + } + + type V @key(fields: "id") @key(fields: "internalID") { + id: ID! + internalID: ID! + } + + type X @key(fields: "t { id }") { + t: T! + isX: Boolean! + } + "#, + s2: r#" + type V @key(fields: "id") { + id: ID! + internalID: ID! @shareable + y: Y! @shareable + zz: [Z!] @external + } + + type Z { + u: U! @external + } + + type Y @key(fields: "id") { + id: ID! + isY: Boolean! @external + } + + interface I { + id: ID! + name: String! + } + + type T implements I @key(fields: "id") { + id: ID! + name: String! @external + x: X @external + v: V @external + foo: [String!]! @requires(fields: "x { isX }\nv { y { isY } }") + bar: [I!]! @requires(fields: "x { isX }\nv { y { isY } zz { u { id } } }") + } + + type X { + isX: Boolean! @external + } + + type U implements I @key(fields: "id") { + id: ID! + name: String! @external + } + "#, + s3: r#" + type V @key(fields: "internalID") { + internalID: ID! + y: Y! @shareable + } + + type Y @key(fields: "id") { + id: ID! + isY: Boolean! + } + "#, + s4: r#" + type V @key(fields: "id") @key(fields: "internalID") { + id: ID! + internalID: ID! + zz: [Z!] @override(from: "s1") + } + + type Z { + free: Boolean + u: U! + v: V! + } + + interface I { + id: ID! + name: String! + } + + type T implements I @key(fields: "id") { + id: ID! + name: String! @shareable + x: X @shareable + v: V @shareable + } + + type X @key(fields: "t { id }", resolvable: false) { + t: T! @external + } + + type U implements I @key(fields: "id") { + id: ID! + name: String! @override(from: "s1") + } + "#, + ); + assert_plan!( + &planner, + r#" + { + t { + foo + bar { + name + } + } + } + "#, + + @r###" + QueryPlan { + Sequence { + Fetch(service: "s1") { + { + t { + __typename + id + x { + isX + } + v { + __typename + internalID + } + } + } + }, + Flatten(path: "t") { + Fetch(service: "s4") { + { + ... on T { + __typename + id + } + } => + { + ... on T { + v { + __typename + internalID + zz { + u { + id + } + } + } + } + } + }, + }, + Flatten(path: "t.v") { + Fetch(service: "s3") { + { + ... on V { + __typename + internalID + } + } => + { + ... on V { + y { + isY + } + } + } + }, + }, + Parallel { + Flatten(path: "t") { + Fetch(service: "s2") { + { + ... on T { + __typename + x { + isX + } + v { + y { + isY + } + } + id + } + } => + { + ... on T { + foo + } + } + }, + }, + Sequence { + Flatten(path: "t") { + Fetch(service: "s2") { + { + ... on T { + __typename + x { + isX + } + v { + y { + isY + } + zz { + u { + id + } + } + } + id + } + } => + { + ... on T { + bar { + __typename + ... on T { + __typename + id + } + ... on U { + __typename + id + } + } + } + } + }, + }, + Flatten(path: "t.bar.@") { + Fetch(service: "s4") { + { + ... on T { + __typename + id + } + ... on U { + __typename + id + } + } => + { + ... on T { + name + } + ... on U { + name + } + } + }, + }, + }, + }, + }, + } + "### + ); +} + +#[test] +fn handles_requires_from_supergraph() { + // This test verifies that @requires field selection set does not have to be locally satisfiable + // and is valid as long as it is satisfiable in the supergraph. + // In the test below, type U implements interface I only in the Subgraph1, but we can still use + // that type information in the @requires selection set in Subgraph2. + // + // NOTE: While GraphQL does not allow you to return raw interface data, it is still a valid schema. + // Since our interface field is marked as @external, its value should always be provided from + // other subgraph and should not be resolved locally (as that would lead to a runtime exception + // as we don't have any concrete type to return there). + let planner = planner!( + Subgraph1: r#" + type Query { + t: T + } + + type T @key(fields: "id") { + id: ID! + i: I + } + + interface I { + name: String + } + + type U implements I { + name: String @shareable + value: String + } + "#, + Subgraph2: r#" + interface I { + name: String + } + + type U { + name: String @shareable + value: String @external + } + + type T @key(fields: "id") { + id: ID! + i: I @external + r: Int @requires(fields: "i { name ... on U { value } }") + } + "#, + ); + assert_plan!( + &planner, + r#" + { + t { + r + } + } + "#, + + @r###" + QueryPlan { + Sequence { + Fetch(service: "Subgraph1") { + { + t { + __typename + id + i { + __typename + name + ... on U { + value + } + } + } + } + }, + Flatten(path: "t") { + Fetch(service: "Subgraph2") { + { + ... on T { + __typename + id + i { + name + ... on U { + value + } + } + } + } => + { + ... on T { + r + } + } + }, + }, + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/supergraphs/allows_setting_down_to_1.graphql b/apollo-federation/tests/query_plan/supergraphs/allows_setting_down_to_1.graphql index 7abb1b3fa0..0b6baf2e68 100644 --- a/apollo-federation/tests/query_plan/supergraphs/allows_setting_down_to_1.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/allows_setting_down_to_1.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: fd2cfde36cc3d0a981e6c3636aaeea3a6aad4424 +# Composed from subgraphs with hash: bf831e2e6890f60e5c8e93bc52ce549323cb23e8 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists.graphql b/apollo-federation/tests/query_plan/supergraphs/avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists.graphql index 8c7da0d906..2a7e9c07f9 100644 --- a/apollo-federation/tests/query_plan/supergraphs/avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/avoid_considering_indirect_paths_from_the_root_when_a_more_direct_one_exists.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: b4f21df2efd31ed379be10cafbb341c080179593 +# Composed from subgraphs with hash: 995342f0aeb7c35ebe233102083b817ae5d9b0a8 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/can_be_set_to_an_arbitrary_number.graphql b/apollo-federation/tests/query_plan/supergraphs/can_be_set_to_an_arbitrary_number.graphql index 7abb1b3fa0..0b6baf2e68 100644 --- a/apollo-federation/tests/query_plan/supergraphs/can_be_set_to_an_arbitrary_number.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/can_be_set_to_an_arbitrary_number.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: fd2cfde36cc3d0a981e6c3636aaeea3a6aad4424 +# Composed from subgraphs with hash: bf831e2e6890f60e5c8e93bc52ce549323cb23e8 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/correctly_generate_plan_built_from_some_non_individually_optimal_branch_options.graphql b/apollo-federation/tests/query_plan/supergraphs/correctly_generate_plan_built_from_some_non_individually_optimal_branch_options.graphql index 0a4b8a4af1..2766e3b307 100644 --- a/apollo-federation/tests/query_plan/supergraphs/correctly_generate_plan_built_from_some_non_individually_optimal_branch_options.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/correctly_generate_plan_built_from_some_non_individually_optimal_branch_options.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: 86f4ab2b6c51a81a9ecc193fdf764487cb8c7ac8 +# Composed from subgraphs with hash: 38b15e780cba3d9d7cb6288e027386e3d612102a schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/does_not_error_on_some_complex_fetch_group_dependencies.graphql b/apollo-federation/tests/query_plan/supergraphs/does_not_error_on_some_complex_fetch_group_dependencies.graphql index 9cef2b06fc..dbc5271859 100644 --- a/apollo-federation/tests/query_plan/supergraphs/does_not_error_on_some_complex_fetch_group_dependencies.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/does_not_error_on_some_complex_fetch_group_dependencies.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: fb4a75fb881e3766651b083f97e1ec452f842582 +# Composed from subgraphs with hash: f4f751d2b348c0947b2f1dbca4cea1c987ff7d02 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field.graphql b/apollo-federation/tests/query_plan/supergraphs/does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field.graphql index 5aa9f72b5d..a55159cae8 100644 --- a/apollo-federation/tests/query_plan/supergraphs/does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/does_not_evaluate_plans_relying_on_a_key_field_to_fetch_that_same_field.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: 1d139caaa150bb8da0e7fe34dba387685d526c41 +# Composed from subgraphs with hash: 15c059c34b90d54a9c27d2ad67c89307a1280a1f schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions.graphql b/apollo-federation/tests/query_plan/supergraphs/fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions.graphql index c887dbab71..eaf9c3e94d 100644 --- a/apollo-federation/tests/query_plan/supergraphs/fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: c53638411cdc10eaf883495993b90fdcc03b8d0e +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @@ -6,6 +6,8 @@ schema query: Query } +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION diff --git a/apollo-federation/tests/query_plan/supergraphs/handles_requires_from_supergraph.graphql b/apollo-federation/tests/query_plan/supergraphs/handles_requires_from_supergraph.graphql new file mode 100644 index 0000000000..a3f47c437f --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/handles_requires_from_supergraph.graphql @@ -0,0 +1,74 @@ +# Composed from subgraphs with hash: 46a2d6c6cf9956c08daa5b3faa018245cb5f9cfe +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +interface I + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + name: String +} + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") + SUBGRAPH2 @join__graph(name: "Subgraph2", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + t: T @join__field(graph: SUBGRAPH1) +} + +type T + @join__type(graph: SUBGRAPH1, key: "id") + @join__type(graph: SUBGRAPH2, key: "id") +{ + id: ID! + i: I @join__field(graph: SUBGRAPH1) @join__field(graph: SUBGRAPH2, external: true) + r: Int @join__field(graph: SUBGRAPH2, requires: "i { name ... on U { value } }") +} + +type U implements I + @join__implements(graph: SUBGRAPH1, interface: "I") + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + name: String + value: String @join__field(graph: SUBGRAPH1) @join__field(graph: SUBGRAPH2, external: true) +} diff --git a/apollo-federation/tests/query_plan/supergraphs/it_handles_fragments_with_one_non_leaf_field.graphql b/apollo-federation/tests/query_plan/supergraphs/it_handles_fragments_with_one_non_leaf_field.graphql index c887dbab71..eaf9c3e94d 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_handles_fragments_with_one_non_leaf_field.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_handles_fragments_with_one_non_leaf_field.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: c53638411cdc10eaf883495993b90fdcc03b8d0e +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @@ -6,6 +6,8 @@ schema query: Query } +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION diff --git a/apollo-federation/tests/query_plan/supergraphs/it_handles_multiple_requires_with_multiple_fetches.graphql b/apollo-federation/tests/query_plan/supergraphs/it_handles_multiple_requires_with_multiple_fetches.graphql new file mode 100644 index 0000000000..8e4d26a88e --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/it_handles_multiple_requires_with_multiple_fetches.graphql @@ -0,0 +1,130 @@ +# Composed from subgraphs with hash: 461e4a611a1faf2558d6ee6e3de4af24a043fc16 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +interface I + @join__type(graph: S1) + @join__type(graph: S2) + @join__type(graph: S4) +{ + id: ID! + name: String! +} + +scalar join__FieldSet + +enum join__Graph { + S1 @join__graph(name: "s1", url: "none") + S2 @join__graph(name: "s2", url: "none") + S3 @join__graph(name: "s3", url: "none") + S4 @join__graph(name: "s4", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: S1) + @join__type(graph: S2) + @join__type(graph: S3) + @join__type(graph: S4) +{ + t: T @join__field(graph: S1) +} + +type T implements I + @join__implements(graph: S1, interface: "I") + @join__implements(graph: S2, interface: "I") + @join__implements(graph: S4, interface: "I") + @join__type(graph: S1, key: "id") + @join__type(graph: S2, key: "id") + @join__type(graph: S4, key: "id") +{ + id: ID! + name: String! @join__field(graph: S1) @join__field(graph: S2, external: true) @join__field(graph: S4) + x: X @join__field(graph: S1) @join__field(graph: S2, external: true) @join__field(graph: S4) + v: V @join__field(graph: S1) @join__field(graph: S2, external: true) @join__field(graph: S4) + foo: [String!]! @join__field(graph: S2, requires: "x { isX }\nv { y { isY } }") + bar: [I!]! @join__field(graph: S2, requires: "x { isX }\nv { y { isY } zz { u { id } } }") +} + +type U implements I + @join__implements(graph: S1, interface: "I") + @join__implements(graph: S2, interface: "I") + @join__implements(graph: S4, interface: "I") + @join__type(graph: S1, key: "id") + @join__type(graph: S2, key: "id") + @join__type(graph: S4, key: "id") +{ + id: ID! + name: String! @join__field(graph: S1, external: true) @join__field(graph: S2, external: true) @join__field(graph: S4, override: "s1") +} + +type V + @join__type(graph: S1, key: "id") + @join__type(graph: S1, key: "internalID") + @join__type(graph: S2, key: "id") + @join__type(graph: S3, key: "internalID") + @join__type(graph: S4, key: "id") + @join__type(graph: S4, key: "internalID") +{ + id: ID! @join__field(graph: S1) @join__field(graph: S2) @join__field(graph: S4) + internalID: ID! + y: Y! @join__field(graph: S2) @join__field(graph: S3) + zz: [Z!] @join__field(graph: S2, external: true) @join__field(graph: S4, override: "s1") +} + +type X + @join__type(graph: S1, key: "t { id }") + @join__type(graph: S2) + @join__type(graph: S4, key: "t { id }", resolvable: false) +{ + t: T! @join__field(graph: S1) @join__field(graph: S4, external: true) + isX: Boolean! @join__field(graph: S1) @join__field(graph: S2, external: true) +} + +type Y + @join__type(graph: S2, key: "id") + @join__type(graph: S3, key: "id") +{ + id: ID! + isY: Boolean! @join__field(graph: S2, external: true) @join__field(graph: S3) +} + +type Z + @join__type(graph: S2) + @join__type(graph: S4) +{ + u: U! @join__field(graph: S2, external: true) @join__field(graph: S4) + free: Boolean @join__field(graph: S4) + v: V! @join__field(graph: S4) +} diff --git a/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation.graphql b/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation.graphql index c887dbab71..eaf9c3e94d 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: c53638411cdc10eaf883495993b90fdcc03b8d0e +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @@ -6,6 +6,8 @@ schema query: Query } +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION diff --git a/apollo-federation/tests/query_plan/supergraphs/it_identifies_and_reuses_equivalent_fragments_that_arent_identical.graphql b/apollo-federation/tests/query_plan/supergraphs/it_identifies_and_reuses_equivalent_fragments_that_arent_identical.graphql index c887dbab71..eaf9c3e94d 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_identifies_and_reuses_equivalent_fragments_that_arent_identical.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_identifies_and_reuses_equivalent_fragments_that_arent_identical.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: c53638411cdc10eaf883495993b90fdcc03b8d0e +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @@ -6,6 +6,8 @@ schema query: Query } +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION diff --git a/apollo-federation/tests/query_plan/supergraphs/it_migrates_skip_include.graphql b/apollo-federation/tests/query_plan/supergraphs/it_migrates_skip_include.graphql new file mode 100644 index 0000000000..eaf9c3e94d --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/it_migrates_skip_include.graphql @@ -0,0 +1,71 @@ +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +type A + @join__type(graph: SUBGRAPH1) +{ + x: Int + y: Int + z: Int + t: T +} + +type B + @join__type(graph: SUBGRAPH1) +{ + z: Int +} + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPH1) +{ + t: T + t2: T +} + +union T + @join__type(graph: SUBGRAPH1) + @join__unionMember(graph: SUBGRAPH1, member: "A") + @join__unionMember(graph: SUBGRAPH1, member: "B") + = A | B diff --git a/apollo-federation/tests/query_plan/supergraphs/it_respects_generate_query_fragments_option.graphql b/apollo-federation/tests/query_plan/supergraphs/it_respects_generate_query_fragments_option.graphql index c887dbab71..eaf9c3e94d 100644 --- a/apollo-federation/tests/query_plan/supergraphs/it_respects_generate_query_fragments_option.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/it_respects_generate_query_fragments_option.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: c53638411cdc10eaf883495993b90fdcc03b8d0e +# Composed from subgraphs with hash: 9d07e44c7cffd48b0677fce186f5ba41a864bc13 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @@ -6,6 +6,8 @@ schema query: Query } +directive @custom on FRAGMENT_SPREAD | INLINE_FRAGMENT + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION diff --git a/apollo-federation/tests/query_plan/supergraphs/multiplication_overflow_in_reduce_options_if_needed.graphql b/apollo-federation/tests/query_plan/supergraphs/multiplication_overflow_in_reduce_options_if_needed.graphql index 7abb1b3fa0..0b6baf2e68 100644 --- a/apollo-federation/tests/query_plan/supergraphs/multiplication_overflow_in_reduce_options_if_needed.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/multiplication_overflow_in_reduce_options_if_needed.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: fd2cfde36cc3d0a981e6c3636aaeea3a6aad4424 +# Composed from subgraphs with hash: bf831e2e6890f60e5c8e93bc52ce549323cb23e8 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/query_plan/supergraphs/test_merging_fetches_do_not_create_cycle_in_fetch_dependency_graph.graphql b/apollo-federation/tests/query_plan/supergraphs/test_merging_fetches_do_not_create_cycle_in_fetch_dependency_graph.graphql new file mode 100644 index 0000000000..aea8d395a8 --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/test_merging_fetches_do_not_create_cycle_in_fetch_dependency_graph.graphql @@ -0,0 +1,94 @@ +# Composed from subgraphs with hash: 58cfa42df5c5f20fb0fbe43d4a506b3654439de1 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) +{ + query: Query +} + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + A @join__graph(name: "A", url: "none") + B @join__graph(name: "B", url: "none") + C @join__graph(name: "C", url: "none") + S @join__graph(name: "S", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: A) + @join__type(graph: B) + @join__type(graph: C) + @join__type(graph: S) +{ + start: T! @join__field(graph: S) +} + +type T + @join__type(graph: A, key: "id") + @join__type(graph: B, key: "id") + @join__type(graph: S, key: "id") +{ + id: String! @join__field(graph: A) @join__field(graph: B, external: true) @join__field(graph: S) + u: U! @join__field(graph: A) @join__field(graph: B) +} + +type U + @join__type(graph: A, key: "id") + @join__type(graph: B, key: "id") + @join__type(graph: C, key: "id", extension: true) +{ + id: ID! + a: String! @join__field(graph: A) @join__field(graph: B) @join__field(graph: C, external: true) + b: String @join__field(graph: A) @join__field(graph: C, external: true) + w: W @join__field(graph: C, requires: "a b") +} + +type W + @join__type(graph: B, key: "id", extension: true) + @join__type(graph: C, key: "id") +{ + id: ID + y: Y @join__field(graph: C) + w1: Int @join__field(graph: C) + w2: Int @join__field(graph: C) + w3: Int @join__field(graph: C) + w4: Int @join__field(graph: C) + w5: Int @join__field(graph: C) +} + +type Y + @join__type(graph: C) +{ + y1: Int + y2: Int + y3: Int +} diff --git a/apollo-federation/tests/query_plan/supergraphs/works_when_unset.graphql b/apollo-federation/tests/query_plan/supergraphs/works_when_unset.graphql index 7abb1b3fa0..0b6baf2e68 100644 --- a/apollo-federation/tests/query_plan/supergraphs/works_when_unset.graphql +++ b/apollo-federation/tests/query_plan/supergraphs/works_when_unset.graphql @@ -1,4 +1,4 @@ -# Composed from subgraphs with hash: fd2cfde36cc3d0a981e6c3636aaeea3a6aad4424 +# Composed from subgraphs with hash: bf831e2e6890f60e5c8e93bc52ce549323cb23e8 schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) diff --git a/apollo-federation/tests/snapshots/main__extract_subgraphs__can_extract_subgraph.snap b/apollo-federation/tests/snapshots/main__extract_subgraphs__can_extract_subgraph.snap index f036c14999..324709bd56 100644 --- a/apollo-federation/tests/snapshots/main__extract_subgraphs__can_extract_subgraph.snap +++ b/apollo-federation/tests/snapshots/main__extract_subgraphs__can_extract_subgraph.snap @@ -8,7 +8,7 @@ schema { query: Query } -extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.5") +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA @@ -38,6 +38,10 @@ directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + scalar link__Import enum link__Purpose { @@ -85,7 +89,7 @@ schema { query: Query } -extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.5") +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA @@ -115,6 +119,10 @@ directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + scalar link__Import enum link__Purpose { diff --git a/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_demand_control_directive_name_conflicts.snap b/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_demand_control_directive_name_conflicts.snap new file mode 100644 index 0000000000..f86e759fca --- /dev/null +++ b/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_demand_control_directive_name_conflicts.snap @@ -0,0 +1,141 @@ +--- +source: apollo-federation/tests/extract_subgraphs.rs +expression: snapshot +--- +subgraph-a +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +scalar ExpensiveInt + +type Query { + a: ExpensiveInt + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} + +subgraph-b +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +type Query { + b: [Int] + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} diff --git a/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_renamed_demand_control_directive_name_conflicts.snap b/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_renamed_demand_control_directive_name_conflicts.snap new file mode 100644 index 0000000000..f86e759fca --- /dev/null +++ b/apollo-federation/tests/snapshots/main__extract_subgraphs__does_not_extract_renamed_demand_control_directive_name_conflicts.snap @@ -0,0 +1,141 @@ +--- +source: apollo-federation/tests/extract_subgraphs.rs +expression: snapshot +--- +subgraph-a +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +scalar ExpensiveInt + +type Query { + a: ExpensiveInt + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} + +subgraph-b +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +type Query { + b: [Int] + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} diff --git a/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_demand_control_directives.snap b/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_demand_control_directives.snap new file mode 100644 index 0000000000..319b91d908 --- /dev/null +++ b/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_demand_control_directives.snap @@ -0,0 +1,166 @@ +--- +source: apollo-federation/tests/extract_subgraphs.rs +expression: snapshot +--- +subgraphWithCost +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +enum AorB @federation__cost(weight: 15) { + A + B +} + +scalar ExpensiveInt @federation__cost(weight: 30) + +type ExpensiveObject @federation__cost(weight: 40) { + id: ID +} + +input InputTypeWithCost { + somethingWithCost: Int @federation__cost(weight: 20) +} + +type Query { + fieldWithCost: Int @federation__cost(weight: 5) + argWithCost( + arg: Int @federation__cost(weight: 10), + ): Int + enumWithCost: AorB + inputWithCost(someInput: InputTypeWithCost): Int + scalarWithCost: ExpensiveInt + objectWithCost: ExpensiveObject + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} + +subgraphWithListSize +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +type HasInts { + ints: [Int!] +} + +type Query { + fieldWithListSize: [String!] @federation__listSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int!): HasInts @federation__listSize(slicingArguments: ["first"], sizedFields: ["ints"], requireOneSlicingArgument: true) + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} diff --git a/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_renamed_demand_control_directives.snap b/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_renamed_demand_control_directives.snap new file mode 100644 index 0000000000..319b91d908 --- /dev/null +++ b/apollo-federation/tests/snapshots/main__extract_subgraphs__extracts_renamed_demand_control_directives.snap @@ -0,0 +1,166 @@ +--- +source: apollo-federation/tests/extract_subgraphs.rs +expression: snapshot +--- +subgraphWithCost +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +enum AorB @federation__cost(weight: 15) { + A + B +} + +scalar ExpensiveInt @federation__cost(weight: 30) + +type ExpensiveObject @federation__cost(weight: 40) { + id: ID +} + +input InputTypeWithCost { + somethingWithCost: Int @federation__cost(weight: 20) +} + +type Query { + fieldWithCost: Int @federation__cost(weight: 5) + argWithCost( + arg: Int @federation__cost(weight: 10), + ): Int + enumWithCost: AorB + inputWithCost(someInput: InputTypeWithCost): Int + scalarWithCost: ExpensiveInt + objectWithCost: ExpensiveObject + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} + +subgraphWithListSize +--- +schema { + query: Query +} + +extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.9") + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +directive @federation__key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + +directive @federation__requires(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__provides(fields: federation__FieldSet!) on FIELD_DEFINITION + +directive @federation__external(reason: String) on OBJECT | FIELD_DEFINITION + +directive @federation__tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @federation__extends on OBJECT | INTERFACE + +directive @federation__shareable on OBJECT | FIELD_DEFINITION + +directive @federation__inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @federation__override(from: String!, label: String) on FIELD_DEFINITION + +directive @federation__composeDirective(name: String) repeatable on SCHEMA + +directive @federation__interfaceObject on OBJECT + +directive @federation__authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__requiresScopes(scopes: [[federation__Scope!]!]!) on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + +directive @federation__cost(weight: Int!) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @federation__listSize(assumedSize: Int, slicingArguments: [String!], sizedFields: [String!], requireOneSlicingArgument: Boolean = true) on FIELD_DEFINITION + +scalar link__Import + +enum link__Purpose { + """ + \`SECURITY\` features provide metadata necessary to securely resolve fields. + """ + SECURITY + """ + \`EXECUTION\` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar federation__FieldSet + +scalar federation__Scope + +type HasInts { + ints: [Int!] +} + +type Query { + fieldWithListSize: [String!] @federation__listSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int!): HasInts @federation__listSize(slicingArguments: ["first"], sizedFields: ["ints"], requireOneSlicingArgument: true) + _service: _Service! +} + +scalar _Any + +type _Service { + sdl: String +} diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index 67369c060a..c051b44df7 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.52.1" +version = "1.53.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 0aea0a37a9..5cf55bfeda 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.52.1" +version = "1.53.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.template.toml b/apollo-router-scaffold/templates/base/Cargo.template.toml index f135df3eb3..9953fb981c 100644 --- a/apollo-router-scaffold/templates/base/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/Cargo.template.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.52.1" +apollo-router = "1.53.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml index 6d9c8e98f6..482be08889 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.52.1" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.53.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index c3844426cb..19950eb520 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.52.1" +version = "1.53.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" @@ -68,7 +68,7 @@ askama = "0.12.1" access-json = "0.1.0" anyhow = "1.0.86" apollo-compiler.workspace = true -apollo-federation = { path = "../apollo-federation", version = "=1.52.1" } +apollo-federation = { path = "../apollo-federation", version = "=1.53.0" } arc-swap = "1.6.0" async-channel = "1.9.0" async-compression = { version = "0.4.6", features = [ @@ -197,7 +197,7 @@ regex = "1.10.5" reqwest.workspace = true # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.5.31+v2.8.5" +router-bridge = "=0.6.0+v2.9.0" rust-embed = { version = "8.4.0", features = ["include-exclude"] } rustls = "0.21.12" diff --git a/apollo-router/src/apollo_studio_interop/mod.rs b/apollo-router/src/apollo_studio_interop/mod.rs index e1571fa672..16b1a2f9ee 100644 --- a/apollo-router/src/apollo_studio_interop/mod.rs +++ b/apollo-router/src/apollo_studio_interop/mod.rs @@ -285,18 +285,17 @@ pub(crate) fn extract_enums_from_response( operation_name: Option<&str>, schema: &Valid, response_body: &Object, -) -> ReferencedEnums { - let mut result = ReferencedEnums::new(); + existing_refs: &mut ReferencedEnums, +) { if let Some(operation) = query.operation(operation_name) { extract_enums_from_selection_set( &operation.selection_set, &query.fragments, schema, response_body, - &mut result, + existing_refs, ); } - result } fn add_enum_value_to_map( diff --git a/apollo-router/src/apollo_studio_interop/tests.rs b/apollo-router/src/apollo_studio_interop/tests.rs index b6c1e03d22..34f457a196 100644 --- a/apollo-router/src/apollo_studio_interop/tests.rs +++ b/apollo-router/src/apollo_studio_interop/tests.rs @@ -130,12 +130,15 @@ fn enums_from_response( let query = Query::parse(query_str, operation_name, &schema, &config).unwrap(); let response_body: Object = serde_json::from_str(response_body_str).unwrap(); + let mut result = ReferencedEnums::new(); extract_enums_from_response( Arc::new(query), operation_name, schema.supergraph_schema(), &response_body, - ) + &mut result, + ); + result } #[test(tokio::test)] diff --git a/apollo-router/src/axum_factory/axum_http_server_factory.rs b/apollo-router/src/axum_factory/axum_http_server_factory.rs index f687440f4c..08df933dc6 100644 --- a/apollo-router/src/axum_factory/axum_http_server_factory.rs +++ b/apollo-router/src/axum_factory/axum_http_server_factory.rs @@ -303,6 +303,7 @@ impl HttpServerFactory for AxumHttpServerFactory { main_listener, actual_main_listen_address.clone(), all_routers.main.1, + true, all_connections_stopped_sender.clone(), ); @@ -341,6 +342,7 @@ impl HttpServerFactory for AxumHttpServerFactory { listener, listen_addr.clone(), router, + false, all_connections_stopped_sender.clone(), ); ( diff --git a/apollo-router/src/axum_factory/listeners.rs b/apollo-router/src/axum_factory/listeners.rs index 6be06acdbd..dad439317c 100644 --- a/apollo-router/src/axum_factory/listeners.rs +++ b/apollo-router/src/axum_factory/listeners.rs @@ -7,7 +7,6 @@ use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Duration; -use std::time::Instant; use axum::response::*; use axum::Router; @@ -31,7 +30,8 @@ use crate::router::ApolloRouterError; use crate::router_factory::Endpoint; use crate::ListenAddr; -pub(crate) static SESSION_COUNT: AtomicU64 = AtomicU64::new(0); +static SESSION_COUNT: AtomicU64 = AtomicU64::new(0); +static MAX_FILE_HANDLES_WARN: AtomicBool = AtomicBool::new(false); #[derive(Clone, Debug)] pub(crate) struct ListenAddrAndRouter(pub(crate) ListenAddr, pub(crate) Router); @@ -201,6 +201,7 @@ pub(super) fn serve_router_on_listen_addr( mut listener: Listener, address: ListenAddr, router: axum::Router, + main_graphql_port: bool, all_connections_stopped_sender: mpsc::Sender<()>, ) -> (impl Future, oneshot::Sender<()>) { let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); @@ -213,7 +214,6 @@ pub(super) fn serve_router_on_listen_addr( tokio::pin!(shutdown_receiver); let connection_shutdown = Arc::new(Notify::new()); - let mut max_open_file_warning = None; let address = address.to_string(); @@ -229,16 +229,18 @@ pub(super) fn serve_router_on_listen_addr( match res { Ok(res) => { - if max_open_file_warning.is_some(){ + if MAX_FILE_HANDLES_WARN.load(Ordering::SeqCst) { tracing::info!("can accept connections again"); - max_open_file_warning = None; + MAX_FILE_HANDLES_WARN.store(false, Ordering::SeqCst); + } + // We only want to recognise sessions if we are the main graphql port. + if main_graphql_port { + let session_count = SESSION_COUNT.fetch_add(1, Ordering::Acquire)+1; + tracing::info!( + value.apollo_router_session_count_total = session_count, + listener = &address + ); } - - let session_count = SESSION_COUNT.fetch_add(1, Ordering::Acquire)+1; - tracing::info!( - value.apollo_router_session_count_total = session_count, - listener = &address - ); let address = address.clone(); tokio::task::spawn(async move { @@ -356,12 +358,14 @@ pub(super) fn serve_router_on_listen_addr( } } - let session_count = SESSION_COUNT.fetch_sub(1, Ordering::Acquire)-1; - tracing::info!( - value.apollo_router_session_count_total = session_count, - listener = &address - ); - + // We only want to recognise sessions if we are the main graphql port. + if main_graphql_port { + let session_count = SESSION_COUNT.fetch_sub(1, Ordering::Acquire)-1; + tracing::info!( + value.apollo_router_session_count_total = session_count, + listener = &address + ); + } }); } @@ -419,16 +423,10 @@ pub(super) fn serve_router_on_listen_addr( _ => { match e.raw_os_error() { Some(libc::EMFILE) | Some(libc::ENFILE) => { - match max_open_file_warning { - None => { - tracing::error!("reached the max open file limit, cannot accept any new connection"); - max_open_file_warning = Some(Instant::now()); - } - Some(last) => if Instant::now() - last > Duration::from_secs(60) { - tracing::error!("still at the max open file limit, cannot accept any new connection"); - max_open_file_warning = Some(Instant::now()); - } - } + tracing::error!( + "reached the max open file limit, cannot accept any new connection" + ); + MAX_FILE_HANDLES_WARN.store(true, Ordering::SeqCst); tokio::time::sleep(Duration::from_millis(1)).await; } _ => {} diff --git a/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap b/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap index e60d87a783..6d6e785101 100644 --- a/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap +++ b/apollo-router/src/axum_factory/snapshots/apollo_router__axum_factory__tests__defer_is_not_buffered.snap @@ -21,6 +21,10 @@ expression: parts "errors": [ { "message": "couldn't find mock for query {\"query\":\"query TopProducts__reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{__typename id product{__typename upc}}}}}\",\"operationName\":\"TopProducts__reviews__1\",\"variables\":{\"representations\":[{\"__typename\":\"Product\",\"upc\":\"1\"},{\"__typename\":\"Product\",\"upc\":\"2\"}]}}", + "path": [ + "topProducts", + "@" + ], "extensions": { "code": "FETCH_ERROR" } diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs index 3d8f39b5d5..703193683a 100644 --- a/apollo-router/src/axum_factory/tests.rs +++ b/apollo-router/src/axum_factory/tests.rs @@ -83,6 +83,7 @@ use crate::services::RouterResponse; use crate::services::SupergraphResponse; use crate::services::MULTIPART_DEFER_ACCEPT; use crate::services::MULTIPART_DEFER_CONTENT_TYPE; +use crate::spec::Schema; use crate::test_harness::http_client; use crate::test_harness::http_client::MaybeMultipart; use crate::uplink::license_enforcement::LicenseState; @@ -2265,14 +2266,11 @@ async fn test_supergraph_timeout() { let conf: Arc = Arc::new(serde_json::from_value(config).unwrap()); let schema = include_str!("..//testdata/minimal_supergraph.graphql"); - let planner = BridgeQueryPlannerPool::new( - schema.to_string(), - conf.clone(), - NonZeroUsize::new(1).unwrap(), - ) - .await - .unwrap(); - let schema = planner.schema(); + let schema = Arc::new(Schema::parse(schema, &conf).unwrap()); + let planner = + BridgeQueryPlannerPool::new(schema.clone(), conf.clone(), NonZeroUsize::new(1).unwrap()) + .await + .unwrap(); // we do the entire supergraph rebuilding instead of using `from_supergraph_mock_callback_and_configuration` // because we need the plugins to apply on the supergraph diff --git a/apollo-router/src/cache/mod.rs b/apollo-router/src/cache/mod.rs index 80daa1d8a0..6e1ef01cb8 100644 --- a/apollo-router/src/cache/mod.rs +++ b/apollo-router/src/cache/mod.rs @@ -14,7 +14,9 @@ use self::storage::ValueType; use crate::configuration::RedisCache; pub(crate) mod redis; +mod size_estimation; pub(crate) mod storage; +pub(crate) use size_estimation::estimate_size; type WaitMap = Arc>>>; pub(crate) const DEFAULT_CACHE_CAPACITY: NonZeroUsize = match NonZeroUsize::new(512) { @@ -37,7 +39,7 @@ where pub(crate) async fn with_capacity( capacity: NonZeroUsize, redis: Option, - caller: &str, + caller: &'static str, ) -> Result { Ok(Self { wait_map: Arc::new(Mutex::new(HashMap::new())), @@ -47,7 +49,7 @@ where pub(crate) async fn from_configuration( config: &crate::configuration::Cache, - caller: &str, + caller: &'static str, ) -> Result { Self::with_capacity(config.in_memory.limit, config.redis.clone(), caller).await } diff --git a/apollo-router/src/cache/redis.rs b/apollo-router/src/cache/redis.rs index f0973551f4..f16130c116 100644 --- a/apollo-router/src/cache/redis.rs +++ b/apollo-router/src/cache/redis.rs @@ -171,7 +171,7 @@ impl RedisCacheStorage { let client = RedisClient::new( client_config, Some(PerformanceConfig { - default_command_timeout: config.timeout.unwrap_or(Duration::from_millis(2)), + default_command_timeout: config.timeout.unwrap_or(Duration::from_millis(500)), ..Default::default() }), None, @@ -562,16 +562,29 @@ impl RedisCacheStorage { } pub(crate) async fn delete(&self, keys: Vec>) -> Option { - self.inner - .del(keys) - .await - .map_err(|e| { - if !e.is_not_found() { + let mut h: HashMap> = HashMap::new(); + for key in keys.into_iter() { + let key = self.make_key(key); + let hash = ClusterRouting::hash_key(key.as_bytes()); + let entry = h.entry(hash).or_default(); + entry.push(key); + } + + // then we query all the key groups at the same time + let results: Vec> = + futures::future::join_all(h.into_values().map(|keys| self.inner.del(keys))).await; + let mut total = 0u32; + + for res in results { + match res { + Ok(res) => total += res, + Err(e) => { tracing::error!(error = %e, "redis del error"); } - e - }) - .ok() + } + } + + Some(total) } pub(crate) fn scan( @@ -593,12 +606,19 @@ mod test { use url::Url; + use crate::cache::storage::ValueType; + #[test] fn ensure_invalid_payload_serialization_doesnt_fail() { #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] struct Stuff { time: SystemTime, } + impl ValueType for Stuff { + fn estimated_size(&self) -> Option { + None + } + } let invalid_json_payload = super::RedisValue(Stuff { // this systemtime is invalid, serialization will fail diff --git a/apollo-router/src/cache/size_estimation.rs b/apollo-router/src/cache/size_estimation.rs new file mode 100644 index 0000000000..885e8d5c13 --- /dev/null +++ b/apollo-router/src/cache/size_estimation.rs @@ -0,0 +1,438 @@ +use std::fmt::Debug; +use std::fmt::Display; +use std::fmt::Formatter; + +use serde::ser; +use serde::ser::SerializeMap; +use serde::ser::SerializeSeq; +use serde::ser::SerializeStruct; +use serde::ser::SerializeStructVariant; +use serde::ser::SerializeTuple; +use serde::ser::SerializeTupleStruct; +use serde::ser::SerializeTupleVariant; +use serde::Serialize; + +pub(crate) fn estimate_size(s: &T) -> usize { + let ser = s + .serialize(CountingSerializer::default()) + .expect("mut be able to serialize"); + ser.count +} + +pub(crate) struct Error; + +impl Debug for Error { + fn fmt(&self, _f: &mut Formatter<'_>) -> std::fmt::Result { + unreachable!() + } +} + +impl Display for Error { + fn fmt(&self, _f: &mut Formatter<'_>) -> std::fmt::Result { + unreachable!() + } +} + +impl std::error::Error for Error {} + +impl ser::Error for Error { + fn custom(_msg: T) -> Self { + unreachable!() + } +} + +/// This is a special serializer that doesn't store the serialized data, instead it counts the bytes +/// Yes, it's inaccurate, but we're looking for something that is relatively cheap to compute. +/// It doesn't take into account shared datastructures occurring multiple times and will give the +/// full estimated serialized cost. +#[derive(Default, Debug)] +struct CountingSerializer { + count: usize, +} + +impl ser::Serializer for CountingSerializer { + type Ok = Self; + type Error = Error; + type SerializeSeq = Self; + type SerializeTuple = Self; + type SerializeTupleStruct = Self; + type SerializeTupleVariant = Self; + type SerializeMap = Self; + type SerializeStruct = Self; + type SerializeStructVariant = Self; + + fn serialize_bool(mut self, _v: bool) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_i8(mut self, _v: i8) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_i16(mut self, _v: i16) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_i32(mut self, _v: i32) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_i64(mut self, _v: i64) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_u8(mut self, _v: u8) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_u16(mut self, _v: u16) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_u32(mut self, _v: u32) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_u64(mut self, _v: u64) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_f32(mut self, _v: f32) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_f64(mut self, _v: f64) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_char(mut self, _v: char) -> Result { + self.count += std::mem::size_of::(); + Ok(self) + } + + fn serialize_str(mut self, v: &str) -> Result { + //ptr + 8 bytes length + 8 bytes capacity + self.count += 24 + v.len(); + Ok(self) + } + + fn serialize_bytes(mut self, v: &[u8]) -> Result { + self.count += v.len(); + Ok(self) + } + + fn serialize_none(self) -> Result { + Ok(self) + } + + fn serialize_some(self, value: &T) -> Result + where + T: ?Sized + Serialize, + { + Ok(value.serialize(self).expect("failed to serialize")) + } + + fn serialize_unit(self) -> Result { + Ok(self) + } + + fn serialize_unit_struct(self, _name: &'static str) -> Result { + Ok(self) + } + + fn serialize_unit_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + ) -> Result { + Ok(self) + } + + fn serialize_newtype_struct( + self, + _name: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + Ok(value.serialize(self).expect("failed to serialize")) + } + + fn serialize_newtype_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + value: &T, + ) -> Result + where + T: ?Sized + Serialize, + { + Ok(value.serialize(self).expect("failed to serialize")) + } + + fn serialize_seq(self, _len: Option) -> Result { + Ok(self) + } + + fn serialize_tuple(self, _len: usize) -> Result { + Ok(self) + } + + fn serialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Ok(self) + } + + fn serialize_tuple_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Ok(self) + } + + fn serialize_map(self, _len: Option) -> Result { + Ok(self) + } + + fn serialize_struct( + self, + _name: &'static str, + _len: usize, + ) -> Result { + Ok(self) + } + + fn serialize_struct_variant( + self, + _name: &'static str, + _variant_index: u32, + _variant: &'static str, + _len: usize, + ) -> Result { + Ok(self) + } +} +impl SerializeStructVariant for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_field(&mut self, _key: &'static str, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} +impl SerializeSeq for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} +impl SerializeTuple for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_element(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} + +impl SerializeStruct for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_field(&mut self, _key: &'static str, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} + +impl SerializeMap for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_key(&mut self, key: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = key + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn serialize_value(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} + +impl SerializeTupleVariant for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} + +impl SerializeTupleStruct for CountingSerializer { + type Ok = Self; + type Error = Error; + + fn serialize_field(&mut self, value: &T) -> Result<(), Self::Error> + where + T: ?Sized + Serialize, + { + let ser = value + .serialize(CountingSerializer::default()) + .expect("must be able to serialize"); + self.count += ser.count; + Ok(()) + } + + fn end(self) -> Result { + Ok(self) + } +} + +#[cfg(test)] +mod test { + use serde::Serialize; + + use crate::cache::estimate_size; + + #[test] + fn test_estimate_size() { + #[derive(Serialize)] + struct Test { + string: String, + u8: u8, + embedded: TestEmbedded, + } + + #[derive(Serialize)] + struct TestEmbedded { + string: String, + u8: u8, + } + + // Baseline + let s = estimate_size(&Test { + string: "".to_string(), + u8: 0, + embedded: TestEmbedded { + string: "".to_string(), + u8: 0, + }, + }); + assert_eq!(s, 50); + + // Test modifying the root struct + let s = estimate_size(&Test { + string: "test".to_string(), + u8: 0, + embedded: TestEmbedded { + string: "".to_string(), + u8: 0, + }, + }); + assert_eq!(s, 54); + + // Test modifying the embedded struct + let s = estimate_size(&Test { + string: "".to_string(), + u8: 0, + embedded: TestEmbedded { + string: "test".to_string(), + u8: 0, + }, + }); + assert_eq!(s, 54); + } +} diff --git a/apollo-router/src/cache/storage.rs b/apollo-router/src/cache/storage.rs index b72ad9d378..7cfa37a0ad 100644 --- a/apollo-router/src/cache/storage.rs +++ b/apollo-router/src/cache/storage.rs @@ -2,9 +2,16 @@ use std::fmt::Display; use std::fmt::{self}; use std::hash::Hash; use std::num::NonZeroUsize; +use std::sync::atomic::AtomicI64; +use std::sync::atomic::Ordering; use std::sync::Arc; use lru::LruCache; +use opentelemetry::metrics::MeterProvider; +use opentelemetry_api::metrics::Meter; +use opentelemetry_api::metrics::ObservableGauge; +use opentelemetry_api::metrics::Unit; +use opentelemetry_api::KeyValue; use serde::de::DeserializeOwned; use serde::Serialize; use tokio::sync::Mutex; @@ -13,6 +20,8 @@ use tower::BoxError; use super::redis::*; use crate::configuration::RedisCache; +use crate::metrics; +use crate::plugins::telemetry::config_new::instruments::METER_NAME; pub(crate) trait KeyType: Clone + fmt::Debug + fmt::Display + Hash + Eq + Send + Sync @@ -21,6 +30,10 @@ pub(crate) trait KeyType: pub(crate) trait ValueType: Clone + fmt::Debug + Send + Sync + Serialize + DeserializeOwned { + /// Returns an estimated size of the cache entry in bytes. + fn estimated_size(&self) -> Option { + None + } } // Blanket implementation which satisfies the compiler @@ -32,15 +45,6 @@ where // It has the functions it needs already } -// Blanket implementation which satisfies the compiler -impl ValueType for V -where - V: Clone + fmt::Debug + Send + Sync + Serialize + DeserializeOwned, -{ - // Nothing to implement, since V already supports the other traits. - // It has the functions it needs already -} - pub(crate) type InMemoryCache = Arc>>; // placeholder storage module @@ -52,6 +56,10 @@ pub(crate) struct CacheStorage { caller: String, inner: Arc>>, redis: Option, + cache_size: Arc, + cache_estimated_storage: Arc, + _cache_size_gauge: ObservableGauge, + _cache_estimated_storage_gauge: ObservableGauge, } impl CacheStorage @@ -62,9 +70,19 @@ where pub(crate) async fn new( max_capacity: NonZeroUsize, config: Option, - caller: &str, + caller: &'static str, ) -> Result { + // Because calculating the cache size is expensive we do this as we go rather than iterating. This means storing the values for the gauges + let meter: opentelemetry::metrics::Meter = metrics::meter_provider().meter(METER_NAME); + let (cache_size, cache_size_gauge) = Self::create_cache_size_gauge(&meter, caller); + let (cache_estimated_storage, cache_estimated_storage_gauge) = + Self::create_cache_estimated_storage_size_gauge(&meter, caller); + Ok(Self { + _cache_size_gauge: cache_size_gauge, + _cache_estimated_storage_gauge: cache_estimated_storage_gauge, + cache_size, + cache_estimated_storage, caller: caller.to_string(), inner: Arc::new(Mutex::new(LruCache::new(max_capacity))), redis: if let Some(config) = config { @@ -89,6 +107,56 @@ where }) } + fn create_cache_size_gauge( + meter: &Meter, + caller: &'static str, + ) -> (Arc, ObservableGauge) { + let current_cache_size = Arc::new(AtomicI64::new(0)); + let current_cache_size_for_gauge = current_cache_size.clone(); + let cache_size_gauge = meter + // TODO move to dot naming convention + .i64_observable_gauge("apollo_router_cache_size") + .with_description("Cache size") + .with_callback(move |i| { + i.observe( + current_cache_size_for_gauge.load(Ordering::SeqCst), + &[ + KeyValue::new("kind", caller), + KeyValue::new("type", "memory"), + ], + ) + }) + .init(); + (current_cache_size, cache_size_gauge) + } + + fn create_cache_estimated_storage_size_gauge( + meter: &Meter, + caller: &'static str, + ) -> (Arc, ObservableGauge) { + let cache_estimated_storage = Arc::new(AtomicI64::new(0)); + let cache_estimated_storage_for_gauge = cache_estimated_storage.clone(); + let cache_estimated_storage_gauge = meter + .i64_observable_gauge("apollo.router.cache.storage.estimated_size") + .with_description("Estimated cache storage") + .with_unit(Unit::new("bytes")) + .with_callback(move |i| { + // If there's no storage then don't bother updating the gauge + let value = cache_estimated_storage_for_gauge.load(Ordering::SeqCst); + if value > 0 { + i.observe( + cache_estimated_storage_for_gauge.load(Ordering::SeqCst), + &[ + KeyValue::new("kind", caller), + KeyValue::new("type", "memory"), + ], + ) + } + }) + .init(); + (cache_estimated_storage, cache_estimated_storage_gauge) + } + /// `init_from_redis` is called with values newly deserialized from Redis cache /// if an error is returned, the value is ignored and considered a cache miss. pub(crate) async fn get( @@ -143,7 +211,7 @@ where }); match redis_value { Some(v) => { - self.inner.lock().await.put(key.clone(), v.0.clone()); + self.insert_in_memory(key.clone(), v.0.clone()).await; tracing::info!( monotonic_counter.apollo_router_cache_hit_count = 1u64, @@ -187,25 +255,33 @@ where .await; } - let mut in_memory = self.inner.lock().await; - in_memory.put(key, value); - let size = in_memory.len() as u64; - tracing::info!( - value.apollo_router_cache_size = size, - kind = %self.caller, - storage = &tracing::field::display(CacheStorageName::Memory), - ); + self.insert_in_memory(key, value).await; } - pub(crate) async fn insert_in_memory(&self, key: K, value: V) { - let mut in_memory = self.inner.lock().await; - in_memory.put(key, value); - let size = in_memory.len() as u64; - tracing::info!( - value.apollo_router_cache_size = size, - kind = %self.caller, - storage = &tracing::field::display(CacheStorageName::Memory), - ); + pub(crate) async fn insert_in_memory(&self, key: K, value: V) + where + V: ValueType, + { + // Update the cache size and estimated storage size + // This is cheaper than trying to estimate the cache storage size by iterating over the cache + let new_value_size = value.estimated_size().unwrap_or(0) as i64; + + let (old_value, length) = { + let mut in_memory = self.inner.lock().await; + (in_memory.push(key, value), in_memory.len()) + }; + + let size_delta = match old_value { + Some((_, old_value)) => { + let old_value_size = old_value.estimated_size().unwrap_or(0) as i64; + new_value_size - old_value_size + } + None => new_value_size, + }; + self.cache_estimated_storage + .fetch_add(size_delta, Ordering::SeqCst); + + self.cache_size.store(length as i64, Ordering::SeqCst); } pub(crate) fn in_memory_cache(&self) -> InMemoryCache { @@ -231,3 +307,184 @@ impl Display for CacheStorageName { } } } + +impl ValueType for String { + fn estimated_size(&self) -> Option { + Some(self.len()) + } +} + +impl ValueType for crate::graphql::Response { + fn estimated_size(&self) -> Option { + None + } +} + +impl ValueType for usize { + fn estimated_size(&self) -> Option { + Some(std::mem::size_of::()) + } +} + +#[cfg(test)] +mod test { + use std::num::NonZeroUsize; + + use crate::cache::estimate_size; + use crate::cache::storage::CacheStorage; + use crate::cache::storage::ValueType; + use crate::metrics::FutureMetricsExt; + + #[tokio::test] + async fn test_metrics() { + #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] + struct Stuff {} + impl ValueType for Stuff { + fn estimated_size(&self) -> Option { + Some(1) + } + } + + async { + let cache: CacheStorage = + CacheStorage::new(NonZeroUsize::new(10).unwrap(), None, "test") + .await + .unwrap(); + + cache.insert("test".to_string(), Stuff {}).await; + assert_gauge!( + "apollo.router.cache.storage.estimated_size", + 1, + "kind" = "test", + "type" = "memory" + ); + assert_gauge!( + "apollo_router_cache_size", + 1, + "kind" = "test", + "type" = "memory" + ); + } + .with_metrics() + .await; + } + + #[tokio::test] + #[should_panic] + async fn test_metrics_not_emitted_where_no_estimated_size() { + #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] + struct Stuff {} + impl ValueType for Stuff { + fn estimated_size(&self) -> Option { + None + } + } + + async { + let cache: CacheStorage = + CacheStorage::new(NonZeroUsize::new(10).unwrap(), None, "test") + .await + .unwrap(); + + cache.insert("test".to_string(), Stuff {}).await; + // This metric won't exist + assert_gauge!( + "apollo_router_cache_size", + 0, + "kind" = "test", + "type" = "memory" + ); + } + .with_metrics() + .await; + } + + #[tokio::test] + async fn test_metrics_eviction() { + #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] + struct Stuff { + test: String, + } + impl ValueType for Stuff { + fn estimated_size(&self) -> Option { + Some(estimate_size(self)) + } + } + + async { + // note that the cache size is 1 + // so the second insert will always evict + let cache: CacheStorage = + CacheStorage::new(NonZeroUsize::new(1).unwrap(), None, "test") + .await + .unwrap(); + + cache + .insert( + "test".to_string(), + Stuff { + test: "test".to_string(), + }, + ) + .await; + assert_gauge!( + "apollo.router.cache.storage.estimated_size", + 28, + "kind" = "test", + "type" = "memory" + ); + assert_gauge!( + "apollo_router_cache_size", + 1, + "kind" = "test", + "type" = "memory" + ); + + // Insert something slightly larger + cache + .insert( + "test".to_string(), + Stuff { + test: "test_extended".to_string(), + }, + ) + .await; + assert_gauge!( + "apollo.router.cache.storage.estimated_size", + 37, + "kind" = "test", + "type" = "memory" + ); + assert_gauge!( + "apollo_router_cache_size", + 1, + "kind" = "test", + "type" = "memory" + ); + + // Even though this is a new cache entry, we should get back to where we initially were + cache + .insert( + "test2".to_string(), + Stuff { + test: "test".to_string(), + }, + ) + .await; + assert_gauge!( + "apollo.router.cache.storage.estimated_size", + 28, + "kind" = "test", + "type" = "memory" + ); + assert_gauge!( + "apollo_router_cache_size", + 1, + "kind" = "test", + "type" = "memory" + ); + } + .with_metrics() + .await; + } +} diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 8cd6b56381..7e18720703 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -378,7 +378,7 @@ impl InstrumentData { populate_config_instrument!( apollo.router.config.demand_control, - "$.preview_demand_control[?(@.enabled == true)]", + "$.demand_control[?(@.enabled == true)]", opt.mode, "$.mode" ); @@ -400,7 +400,7 @@ impl InstrumentData { Self::get_first_key_from_path( demand_control_attributes, "opt.strategy", - "$.preview_demand_control[?(@.enabled == true)].strategy", + "$.demand_control[?(@.enabled == true)].strategy", yaml, ); } diff --git a/apollo-router/src/configuration/migrations/0026-entity_cache_invalidation_preview.yaml b/apollo-router/src/configuration/migrations/0026-entity_cache_invalidation_preview.yaml new file mode 100644 index 0000000000..60a9a83604 --- /dev/null +++ b/apollo-router/src/configuration/migrations/0026-entity_cache_invalidation_preview.yaml @@ -0,0 +1,6 @@ +description: Entity cache preview configuration format is changing +actions: + - type: move + from: preview_entity_cache.redis + to: preview_entity_cache.subgraph.all.redis + diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index cebc0add67..b839eb4f90 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -219,16 +219,32 @@ pub(crate) enum ApolloMetricsGenerationMode { /// Query planner modes. #[derive(Clone, PartialEq, Eq, Default, Derivative, Serialize, Deserialize, JsonSchema)] #[derivative(Debug)] -#[serde(rename_all = "lowercase")] +#[serde(rename_all = "snake_case")] pub(crate) enum QueryPlannerMode { /// Use the new Rust-based implementation. + /// + /// Raises an error at Router startup if the the new planner does not support the schema + /// (such as using legacy Apollo Federation 1) New, /// Use the old JavaScript-based implementation. - #[default] Legacy, - /// Use Rust-based and Javascript-based implementations side by side, logging warnings if the - /// implementations disagree. + /// Use primarily the Javascript-based implementation, + /// but also schedule background jobs to run the Rust implementation and compare results, + /// logging warnings if the implementations disagree. + /// + /// Raises an error at Router startup if the the new planner does not support the schema + /// (such as using legacy Apollo Federation 1) Both, + /// Use primarily the Javascript-based implementation, + /// but also schedule on a best-effort basis background jobs + /// to run the Rust implementation and compare results, + /// logging warnings if the implementations disagree. + /// + /// Falls back to `legacy` with a warning + /// if the the new planner does not support the schema + /// (such as using legacy Apollo Federation 1) + #[default] + BothBestEffort, } impl<'de> serde::Deserialize<'de> for Configuration { diff --git a/apollo-router/src/configuration/schema.rs b/apollo-router/src/configuration/schema.rs index a78015ab63..4d05b786ef 100644 --- a/apollo-router/src/configuration/schema.rs +++ b/apollo-router/src/configuration/schema.rs @@ -161,8 +161,12 @@ pub(crate) fn validate_yaml_configuration( let offset = start_marker .line() .saturating_sub(NUMBER_OF_PREVIOUS_LINES_TO_DISPLAY); - - let lines = yaml_split_by_lines[offset..end_marker.line()] + let end = if end_marker.line() > yaml_split_by_lines.len() { + yaml_split_by_lines.len() + } else { + end_marker.line() + }; + let lines = yaml_split_by_lines[offset..end] .iter() .map(|line| format!(" {line}")) .join("\n"); diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index c47abd0744..e30ffefbf6 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1708,21 +1708,21 @@ expression: "&schema" "description": "Enable or disable the entity caching feature", "type": "boolean" }, + "invalidation": { + "$ref": "#/definitions/InvalidationEndpointConfig", + "description": "#/definitions/InvalidationEndpointConfig", + "nullable": true + }, "metrics": { "$ref": "#/definitions/Metrics", "description": "#/definitions/Metrics" }, - "redis": { - "$ref": "#/definitions/RedisCache", - "description": "#/definitions/RedisCache" - }, "subgraph": { "$ref": "#/definitions/SubgraphConfiguration_for_Subgraph", "description": "#/definitions/SubgraphConfiguration_for_Subgraph" } }, "required": [ - "redis", "subgraph" ], "type": "object" @@ -2179,6 +2179,17 @@ expression: "&schema" ], "type": "string" }, + "DisplayTraceIdFormat": { + "anyOf": [ + { + "$ref": "#/definitions/TraceIdFormat", + "description": "#/definitions/TraceIdFormat" + }, + { + "type": "boolean" + } + ] + }, "Enabled": { "enum": [ "enabled" @@ -2592,8 +2603,8 @@ expression: "&schema" "type": "boolean" }, "format": { - "$ref": "#/definitions/TraceIdFormat2", - "description": "#/definitions/TraceIdFormat2" + "$ref": "#/definitions/TraceIdFormat", + "description": "#/definitions/TraceIdFormat" }, "header_name": { "description": "Choose the header name to expose trace_id (default: apollo-trace-id)", @@ -3583,6 +3594,24 @@ expression: "&schema" }, "type": "object" }, + "InvalidationEndpointConfig": { + "additionalProperties": false, + "properties": { + "listen": { + "$ref": "#/definitions/ListenAddr", + "description": "#/definitions/ListenAddr" + }, + "path": { + "description": "Specify on which path you want to listen for invalidation endpoint.", + "type": "string" + } + }, + "required": [ + "listen", + "path" + ], + "type": "object" + }, "JWTConf": { "additionalProperties": false, "properties": { @@ -4357,7 +4386,7 @@ expression: "&schema" "description": "Query planner modes.", "oneOf": [ { - "description": "Use the new Rust-based implementation.", + "description": "Use the new Rust-based implementation.\n\nRaises an error at Router startup if the the new planner does not support the schema (such as using legacy Apollo Federation 1)", "enum": [ "new" ], @@ -4371,11 +4400,18 @@ expression: "&schema" "type": "string" }, { - "description": "Use Rust-based and Javascript-based implementations side by side, logging warnings if the implementations disagree.", + "description": "Use primarily the Javascript-based implementation, but also schedule background jobs to run the Rust implementation and compare results, logging warnings if the implementations disagree.\n\nRaises an error at Router startup if the the new planner does not support the schema (such as using legacy Apollo Federation 1)", "enum": [ "both" ], "type": "string" + }, + { + "description": "Use primarily the Javascript-based implementation, but also schedule on a best-effort basis background jobs to run the Rust implementation and compare results, logging warnings if the implementations disagree.\n\nFalls back to `legacy` with a warning if the the new planner does not support the schema (such as using legacy Apollo Federation 1)", + "enum": [ + "both_best_effort" + ], + "type": "string" } ] }, @@ -4589,6 +4625,10 @@ expression: "&schema" "RequestPropagation": { "additionalProperties": false, "properties": { + "format": { + "$ref": "#/definitions/TraceIdFormat", + "description": "#/definitions/TraceIdFormat" + }, "header_name": { "description": "Choose the header name to expose trace_id (default: apollo-trace-id)", "type": "string" @@ -5571,15 +5611,26 @@ expression: "&schema" "description": "Per subgraph configuration for entity caching", "properties": { "enabled": { + "default": true, "description": "activates caching for this subgraph, overrides the global configuration", - "nullable": true, "type": "boolean" }, + "invalidation": { + "$ref": "#/definitions/SubgraphInvalidationConfig", + "description": "#/definitions/SubgraphInvalidationConfig", + "nullable": true + }, "private_id": { + "default": null, "description": "Context key used to separate cache sections per user", "nullable": true, "type": "string" }, + "redis": { + "$ref": "#/definitions/RedisCache", + "description": "#/definitions/RedisCache", + "nullable": true + }, "ttl": { "$ref": "#/definitions/Ttl", "description": "#/definitions/Ttl", @@ -5779,6 +5830,22 @@ expression: "&schema" }, "type": "object" }, + "SubgraphInvalidationConfig": { + "additionalProperties": false, + "properties": { + "enabled": { + "default": false, + "description": "Enable the invalidation", + "type": "boolean" + }, + "shared_key": { + "default": "", + "description": "Shared key needed to request the invalidation endpoint", + "type": "string" + } + }, + "type": "object" + }, "SubgraphPassthroughMode": { "additionalProperties": false, "properties": { @@ -7101,27 +7168,16 @@ expression: "&schema" "TraceIdFormat": { "oneOf": [ { - "description": "Open Telemetry trace ID, a hex string.", + "description": "Format the Trace ID as a hexadecimal number\n\n(e.g. Trace ID 16 -> 00000000000000000000000000000010)", "enum": [ - "open_telemetry" + "hexadecimal" ], "type": "string" }, - { - "description": "Datadog trace ID, a u64.", - "enum": [ - "datadog" - ], - "type": "string" - } - ] - }, - "TraceIdFormat2": { - "oneOf": [ { "description": "Format the Trace ID as a hexadecimal number\n\n(e.g. Trace ID 16 -> 00000000000000000000000000000010)", "enum": [ - "hexadecimal" + "open_telemetry" ], "type": "string" }, @@ -7138,6 +7194,13 @@ expression: "&schema" "datadog" ], "type": "string" + }, + { + "description": "UUID format with dashes (eg. 67e55044-10b1-426f-9247-bb680e5fe0c8)", + "enum": [ + "uuid" + ], + "type": "string" } ] }, @@ -8086,9 +8149,8 @@ expression: "&schema" "type": "boolean" }, "display_trace_id": { - "default": true, - "description": "Include the trace id (if any) with the log event. (default: true)", - "type": "boolean" + "$ref": "#/definitions/DisplayTraceIdFormat", + "description": "#/definitions/DisplayTraceIdFormat" } }, "type": "object" @@ -8184,9 +8246,8 @@ expression: "&schema" "type": "boolean" }, "display_trace_id": { - "default": false, - "description": "Include the trace id (if any) with the log event. (default: false)", - "type": "boolean" + "$ref": "#/definitions/DisplayTraceIdFormat", + "description": "#/definitions/DisplayTraceIdFormat" } }, "type": "object" @@ -8237,6 +8298,10 @@ expression: "&schema" "$ref": "#/definitions/CSRFConfig", "description": "#/definitions/CSRFConfig" }, + "demand_control": { + "$ref": "#/definitions/DemandControlConfig", + "description": "#/definitions/DemandControlConfig" + }, "experimental_apollo_metrics_generation_mode": { "$ref": "#/definitions/ApolloMetricsGenerationMode", "description": "#/definitions/ApolloMetricsGenerationMode" @@ -8290,10 +8355,6 @@ expression: "&schema" "$ref": "#/definitions/Plugins", "description": "#/definitions/Plugins" }, - "preview_demand_control": { - "$ref": "#/definitions/DemandControlConfig", - "description": "#/definitions/DemandControlConfig" - }, "preview_entity_cache": { "$ref": "#/definitions/Config7", "description": "#/definitions/Config7" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap index 08bc3e55b9..5fda93d394 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview.yaml.snap @@ -4,15 +4,19 @@ expression: new_config --- --- preview_entity_cache: - redis: - urls: - - "redis://localhost:6379" - timeout: 5ms - ttl: 60s enabled: true + invalidation: + listen: "127.0.0.1:4000" + path: /invalidation subgraph: subgraphs: accounts: enabled: false products: ttl: 120s + all: + redis: + urls: + - "redis://localhost:6379" + timeout: 5ms + ttl: 60s diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview2.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview2.yaml.snap new file mode 100644 index 0000000000..3fbf236aaa --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@entity_cache_preview2.yaml.snap @@ -0,0 +1,19 @@ +--- +source: apollo-router/src/configuration/tests.rs +expression: new_config +--- +--- +preview_entity_cache: + enabled: true + subgraph: + subgraphs: + accounts: + enabled: false + products: + ttl: 120s + all: + redis: + urls: + - "redis://localhost:6379" + timeout: 5ms + ttl: 60s diff --git a/apollo-router/src/configuration/testdata/metrics/demand_control.router.yaml b/apollo-router/src/configuration/testdata/metrics/demand_control.router.yaml index a78a0870ce..c83294d0d0 100644 --- a/apollo-router/src/configuration/testdata/metrics/demand_control.router.yaml +++ b/apollo-router/src/configuration/testdata/metrics/demand_control.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/configuration/testdata/metrics/entities.router.yaml b/apollo-router/src/configuration/testdata/metrics/entities.router.yaml index 8c810effa7..e2cbd0ee04 100644 --- a/apollo-router/src/configuration/testdata/metrics/entities.router.yaml +++ b/apollo-router/src/configuration/testdata/metrics/entities.router.yaml @@ -1,11 +1,15 @@ preview_entity_cache: enabled: false - redis: - urls: [ "redis://localhost:6379" ] - timeout: 5ms - ttl: 60s + invalidation: + listen: 127.0.0.1:4000 + path: /invalidation subgraph: all: + redis: + urls: [ "redis://localhost:6379" ] + timeout: 5ms + ttl: 60s + enabled: true subgraphs: accounts: diff --git a/apollo-router/src/configuration/testdata/migrations/entity_cache_preview.yaml b/apollo-router/src/configuration/testdata/migrations/entity_cache_preview.yaml index 2539a571ce..c210551098 100644 --- a/apollo-router/src/configuration/testdata/migrations/entity_cache_preview.yaml +++ b/apollo-router/src/configuration/testdata/migrations/entity_cache_preview.yaml @@ -4,6 +4,9 @@ preview_entity_cache: timeout: 5ms ttl: 60s enabled: true + invalidation: + listen: 127.0.0.1:4000 + path: /invalidation subgraphs: accounts: enabled: false diff --git a/apollo-router/src/configuration/testdata/migrations/entity_cache_preview2.yaml b/apollo-router/src/configuration/testdata/migrations/entity_cache_preview2.yaml new file mode 100644 index 0000000000..2539a571ce --- /dev/null +++ b/apollo-router/src/configuration/testdata/migrations/entity_cache_preview2.yaml @@ -0,0 +1,11 @@ +preview_entity_cache: + redis: + urls: [ "redis://localhost:6379" ] + timeout: 5ms + ttl: 60s + enabled: true + subgraphs: + accounts: + enabled: false + products: + ttl: 120s \ No newline at end of file diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index 2ebf66bd4d..80b075a915 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -233,8 +233,8 @@ pub(crate) enum ServiceBuildError { /// couldn't build Query Planner Service: {0} QueryPlannerError(QueryPlannerError), - /// The supergraph schema failed to produce a valid API schema: {0} - ApiSchemaError(FederationError), + /// failed to initialize the query planner: {0} + QpInitError(FederationError), /// schema error: {0} Schema(SchemaError), @@ -249,12 +249,6 @@ impl From for ServiceBuildError { } } -impl From for ServiceBuildError { - fn from(err: FederationError) -> Self { - ServiceBuildError::ApiSchemaError(err) - } -} - impl From> for ServiceBuildError { fn from(errors: Vec) -> Self { ServiceBuildError::QueryPlannerError(errors.into()) diff --git a/apollo-router/src/lib.rs b/apollo-router/src/lib.rs index 656f33c976..9ac39c9e23 100644 --- a/apollo-router/src/lib.rs +++ b/apollo-router/src/lib.rs @@ -114,7 +114,9 @@ pub mod _private { pub use crate::plugin::PluginFactory; pub use crate::plugin::PLUGINS; // For comparison/fuzzing + pub use crate::query_planner::bridge_query_planner::render_diff; pub use crate::query_planner::bridge_query_planner::QueryPlanResult; + pub use crate::query_planner::dual_query_planner::diff_plan; pub use crate::query_planner::dual_query_planner::plan_matches; // For tests pub use crate::router_factory::create_test_service_factory_from_yaml; diff --git a/apollo-router/src/metrics/mod.rs b/apollo-router/src/metrics/mod.rs index 2e0fbf2ca5..e24317cd06 100644 --- a/apollo-router/src/metrics/mod.rs +++ b/apollo-router/src/metrics/mod.rs @@ -329,12 +329,12 @@ pub(crate) mod test_utils { } } - #[derive(Serialize, Eq, PartialEq, Default)] + #[derive(Clone, Serialize, Eq, PartialEq, Default)] pub(crate) struct SerdeMetricData { pub(crate) datapoints: Vec, } - #[derive(Serialize, Eq, PartialEq)] + #[derive(Clone, Serialize, Eq, PartialEq)] pub(crate) struct SerdeMetricDataPoint { #[serde(skip_serializing_if = "Option::is_none")] pub(crate) value: Option, @@ -421,14 +421,14 @@ pub(crate) mod test_utils { attributes: value .attributes .iter() - .map(|(k, v)| (k.as_str().to_string(), Self::to_value(v))) + .map(|(k, v)| (k.as_str().to_string(), Self::convert(v))) .collect(), } } } impl SerdeMetricDataPoint { - pub(crate) fn to_value(v: &Value) -> serde_json::Value { + pub(crate) fn convert(v: &Value) -> serde_json::Value { match v.clone() { Value::Bool(v) => v.into(), Value::I64(v) => v.into(), @@ -455,7 +455,7 @@ pub(crate) mod test_utils { attributes: value .attributes .iter() - .map(|(k, v)| (k.as_str().to_string(), Self::to_value(v))) + .map(|(k, v)| (k.as_str().to_string(), Self::convert(v))) .collect(), } } @@ -509,23 +509,23 @@ pub(crate) fn meter_provider() -> AggregateMeterProvider { #[allow(unused_macros)] macro_rules! u64_counter { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(u64, counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(u64, counter, add, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(u64, counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(u64, counter, add, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(u64, counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(u64, counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(u64, counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(u64, counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -550,23 +550,23 @@ macro_rules! u64_counter { #[allow(unused_macros)] macro_rules! f64_counter { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, counter, add, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, counter, add, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { metric!(f64, counter, add, $name, $description, $value, $attrs); @@ -591,23 +591,23 @@ macro_rules! f64_counter { #[allow(unused_macros)] macro_rules! i64_up_down_counter { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(i64, up_down_counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(i64, up_down_counter, add, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(i64, up_down_counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(i64, up_down_counter, add, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(i64, up_down_counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(i64, up_down_counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(i64, up_down_counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(i64, up_down_counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -632,23 +632,23 @@ macro_rules! i64_up_down_counter { #[allow(unused_macros)] macro_rules! f64_up_down_counter { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, up_down_counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, up_down_counter, add, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, up_down_counter, add, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, up_down_counter, add, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, up_down_counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, up_down_counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, up_down_counter, add, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, up_down_counter, add, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -673,23 +673,23 @@ macro_rules! f64_up_down_counter { #[allow(unused_macros)] macro_rules! f64_histogram { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(f64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(f64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(f64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(f64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -714,23 +714,23 @@ macro_rules! f64_histogram { #[allow(unused_macros)] macro_rules! u64_histogram { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(u64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(u64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(u64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(u64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(u64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(u64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(u64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(u64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -755,23 +755,23 @@ macro_rules! u64_histogram { #[allow(unused_macros)] macro_rules! i64_histogram { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(i64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(i64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($($name:ident).+, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(i64, histogram, record, stringify!($($name).+), $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(i64, histogram, record, stringify!($($name).+), $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - metric!(i64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + metric!(i64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - metric!(i64, histogram, record, $name, $description, $value, &attributes); + let attributes = [$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + metric!(i64, histogram, record, $name, $description, $value, attributes); }; ($name:literal, $description:literal, $value: expr, $attrs: expr) => { @@ -855,7 +855,7 @@ macro_rules! assert_metric { description: "".to_string(), unit: "".to_string(), data: crate::metrics::test_utils::SerdeMetricData { - datapoints: vec![crate::metrics::test_utils::SerdeMetricDataPoint { + datapoints: [crate::metrics::test_utils::SerdeMetricDataPoint { value: $value, sum: $sum, attributes: $attrs @@ -863,13 +863,14 @@ macro_rules! assert_metric { .map(|kv: &opentelemetry::KeyValue| { ( kv.key.to_string(), - crate::metrics::test_utils::SerdeMetricDataPoint::to_value( + crate::metrics::test_utils::SerdeMetricDataPoint::convert( &kv.value, ), ) }) - .collect(), - }], + .collect::>(), + }] + .to_vec(), }, }; panic!( @@ -885,28 +886,28 @@ macro_rules! assert_metric { macro_rules! assert_counter { ($($name:ident).+, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { let name = stringify!($($name).+); - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(name, crate::metrics::test_utils::MetricType::Counter, $value, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(name, crate::metrics::test_utils::MetricType::Counter, $value, attributes); assert_metric!(result, name, Some($value.into()), None, &attributes); }; ($($name:ident).+, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { let name = stringify!($($name).+); - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(name, crate::metrics::test_utils::MetricType::Counter, $value, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(name, crate::metrics::test_utils::MetricType::Counter, $value, attributes); assert_metric!(result, name, Some($value.into()), None, &attributes); }; ($name:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Counter, $value, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Counter, $value, attributes); assert_metric!(result, $name, Some($value.into()), None, &attributes); }; ($name:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Counter, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Counter, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, &attributes); }; ($name:literal, $value: expr) => { @@ -919,27 +920,27 @@ macro_rules! assert_counter { macro_rules! assert_up_down_counter { ($($name:ident).+, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::UpDownCounter, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::UpDownCounter, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($($name:ident).+, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::UpDownCounter, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::UpDownCounter, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::UpDownCounter, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::UpDownCounter, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::UpDownCounter, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::UpDownCounter, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr) => { @@ -952,27 +953,27 @@ macro_rules! assert_up_down_counter { macro_rules! assert_gauge { ($($name:ident).+, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Gauge, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Gauge, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($($name:ident).+, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Gauge, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Gauge, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Gauge, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Gauge, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Gauge, $value, &attributes); - assert_metric!(result, $name, Some($value.into()), None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Gauge, $value, attributes); + assert_metric!(result, $name, Some($value.into()), None, attributes); }; ($name:literal, $value: expr) => { @@ -985,27 +986,27 @@ macro_rules! assert_gauge { macro_rules! assert_histogram_sum { ($($name:ident).+, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, $value, &attributes); - assert_metric!(result, $name, None, Some($value.into()), &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, $value, attributes); + assert_metric!(result, $name, None, Some($value.into()), attributes); }; ($($name:ident).+, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, $value, &attributes); - assert_metric!(result, $name, None, Some($value.into()), &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, $value, attributes); + assert_metric!(result, $name, None, Some($value.into()), attributes); }; ($name:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Histogram, $value, &attributes); - assert_metric!(result, $name, None, Some($value.into()), &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Histogram, $value, attributes); + assert_metric!(result, $name, None, Some($value.into()), attributes); }; ($name:literal, $value: expr, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Histogram, $value, &attributes); - assert_metric!(result, $name, None, Some($value.into()), &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Histogram, $value, attributes); + assert_metric!(result, $name, None, Some($value.into()), attributes); }; ($name:literal, $value: expr) => { @@ -1018,27 +1019,27 @@ macro_rules! assert_histogram_sum { macro_rules! assert_histogram_exists { ($($name:ident).+, $value: ty, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(result, $name, None, None, attributes); }; ($($name:ident).+, $value: ty, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(result, $name, None, None, attributes); }; ($name:literal, $value: ty, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(result, $name, None, None, attributes); }; ($name:literal, $value: ty, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(result, $name, None, None, attributes); }; ($name:literal, $value: ty) => { @@ -1051,27 +1052,27 @@ macro_rules! assert_histogram_exists { macro_rules! assert_histogram_not_exists { ($($name:ident).+, $value: ty, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(!result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(!result, $name, None, None, attributes); }; ($($name:ident).+, $value: ty, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(!result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>(stringify!($($name).+), crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(!result, $name, None, None, attributes); }; ($name:literal, $value: ty, $($attr_key:literal = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(!result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new($attr_key, $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(!result, $name, None, None, attributes); }; ($name:literal, $value: ty, $($($attr_key:ident).+ = $attr_value:expr),+) => { - let attributes = vec![$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; - let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, &attributes); - assert_metric!(!result, $name, None, None, &attributes); + let attributes = &[$(opentelemetry::KeyValue::new(stringify!($($attr_key).+), $attr_value)),+]; + let result = crate::metrics::collect_metrics().metric_exists::<$value>($name, crate::metrics::test_utils::MetricType::Histogram, attributes); + assert_metric!(!result, $name, None, None, attributes); }; ($name:literal, $value: ty) => { diff --git a/apollo-router/src/notification.rs b/apollo-router/src/notification.rs index 77aff5db43..7cfba87e7a 100644 --- a/apollo-router/src/notification.rs +++ b/apollo-router/src/notification.rs @@ -807,6 +807,7 @@ where } #[allow(clippy::collapsible_if)] if topic_to_delete { + tracing::trace!("deleting subscription from unsubscribe"); if self.subscriptions.remove(&topic).is_some() { i64_up_down_counter!( "apollo_router_opened_subscriptions", @@ -880,6 +881,7 @@ where // Send error message to all killed connections for (_subscriber_id, subscription) in closed_subs { + tracing::trace!("deleting subscription from kill_dead_topics"); i64_up_down_counter!( "apollo_router_opened_subscriptions", "Number of opened subscriptions", @@ -907,7 +909,7 @@ where } fn force_delete(&mut self, topic: K) { - tracing::trace!("deleting subscription"); + tracing::trace!("deleting subscription from force_delete"); let sub = self.subscriptions.remove(&topic); if let Some(sub) = sub { i64_up_down_counter!( diff --git a/apollo-router/src/orbiter/mod.rs b/apollo-router/src/orbiter/mod.rs index ebd3383752..a326e15d48 100644 --- a/apollo-router/src/orbiter/mod.rs +++ b/apollo-router/src/orbiter/mod.rs @@ -97,7 +97,7 @@ impl RouterSuperServiceFactory for OrbiterRouterSuperServiceFactory { &'a mut self, is_telemetry_disabled: bool, configuration: Arc, - schema: String, + schema: Arc, previous_router: Option<&'a Self::RouterFactory>, extra_plugins: Option)>>, ) -> Result { diff --git a/apollo-router/src/plugins/cache/entity.rs b/apollo-router/src/plugins/cache/entity.rs index 2375d4fde4..3992dbd670 100644 --- a/apollo-router/src/plugins/cache/entity.rs +++ b/apollo-router/src/plugins/cache/entity.rs @@ -7,6 +7,7 @@ use std::time::Duration; use http::header; use http::header::CACHE_CONTROL; +use multimap::MultiMap; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; @@ -26,12 +27,16 @@ use tracing::Level; use super::cache_control::CacheControl; use super::invalidation::Invalidation; use super::invalidation::InvalidationOrigin; +use super::invalidation_endpoint::InvalidationEndpointConfig; +use super::invalidation_endpoint::InvalidationService; +use super::invalidation_endpoint::SubgraphInvalidationConfig; use super::metrics::CacheMetricContextKey; use super::metrics::CacheMetricsService; use crate::batching::BatchQuery; use crate::cache::redis::RedisCacheStorage; use crate::cache::redis::RedisKey; use crate::cache::redis::RedisValue; +use crate::cache::storage::ValueType; use crate::configuration::subgraph::SubgraphConfiguration; use crate::configuration::RedisCache; use crate::error::FetchError; @@ -49,6 +54,8 @@ use crate::services::subgraph; use crate::services::supergraph; use crate::spec::TYPENAME; use crate::Context; +use crate::Endpoint; +use crate::ListenAddr; /// Change this key if you introduce a breaking change in entity caching algorithm to make sure it won't take the previous entries pub(crate) const ENTITY_CACHE_VERSION: &str = "1.0"; @@ -60,7 +67,8 @@ register_plugin!("apollo", "preview_entity_cache", EntityCache); #[derive(Clone)] pub(crate) struct EntityCache { - storage: Option, + storage: Arc, + endpoint_config: Option>, subgraphs: Arc>, entity_type: Option, enabled: bool, @@ -69,34 +77,66 @@ pub(crate) struct EntityCache { pub(crate) invalidation: Invalidation, } +pub(crate) struct Storage { + all: Option, + subgraphs: HashMap, +} + +impl Storage { + pub(crate) fn get(&self, subgraph: &str) -> Option<&RedisCacheStorage> { + self.subgraphs.get(subgraph).or(self.all.as_ref()) + } +} + /// Configuration for entity caching #[derive(Clone, Debug, JsonSchema, Deserialize)] #[serde(rename_all = "snake_case", deny_unknown_fields)] pub(crate) struct Config { - redis: RedisCache, /// Enable or disable the entity caching feature #[serde(default)] enabled: bool, + /// Configure invalidation per subgraph subgraph: SubgraphConfiguration, + /// Global invalidation configuration + invalidation: Option, + /// Entity caching evaluation metrics #[serde(default)] metrics: Metrics, } /// Per subgraph configuration for entity caching -#[derive(Clone, Debug, Default, JsonSchema, Deserialize, Serialize)] -#[serde(rename_all = "snake_case", deny_unknown_fields)] +#[derive(Clone, Debug, JsonSchema, Deserialize, Serialize)] +#[serde(rename_all = "snake_case", deny_unknown_fields, default)] pub(crate) struct Subgraph { + /// Redis configuration + pub(crate) redis: Option, + /// expiration for all keys for this subgraph, unless overriden by the `Cache-Control` header in subgraph responses pub(crate) ttl: Option, /// activates caching for this subgraph, overrides the global configuration - pub(crate) enabled: Option, + pub(crate) enabled: bool, /// Context key used to separate cache sections per user pub(crate) private_id: Option, + + /// Invalidation configuration + pub(crate) invalidation: Option, +} + +impl Default for Subgraph { + fn default() -> Self { + Self { + redis: None, + enabled: true, + ttl: Default::default(), + private_id: Default::default(), + invalidation: Default::default(), + } + } } /// Per subgraph configuration for entity caching @@ -147,26 +187,64 @@ impl Plugin for EntityCache { .query .as_ref() .map(|q| q.name.to_string()); - let required_to_start = init.config.redis.required_to_start; - // we need to explicitely disable TTL reset because it is managed directly by this plugin - let mut redis_config = init.config.redis.clone(); - redis_config.reset_ttl = false; - let storage = match RedisCacheStorage::new(redis_config).await { - Ok(storage) => Some(storage), - Err(e) => { - tracing::error!( - cache = "entity", - e, - "could not open connection to Redis for caching", - ); - if required_to_start { - return Err(e); + + let mut all = None; + + if let Some(redis) = &init.config.subgraph.all.redis { + let mut redis_config = redis.clone(); + let required_to_start = redis_config.required_to_start; + // we need to explicitely disable TTL reset because it is managed directly by this plugin + redis_config.reset_ttl = false; + all = match RedisCacheStorage::new(redis_config).await { + Ok(storage) => Some(storage), + Err(e) => { + tracing::error!( + cache = "entity", + e, + "could not open connection to Redis for caching", + ); + if required_to_start { + return Err(e); + } + None + } + }; + } + let mut subgraph_storages = HashMap::new(); + for (subgraph, config) in &init.config.subgraph.subgraphs { + if let Some(redis) = &config.redis { + let required_to_start = redis.required_to_start; + // we need to explicitely disable TTL reset because it is managed directly by this plugin + let mut redis_config = redis.clone(); + redis_config.reset_ttl = false; + let storage = match RedisCacheStorage::new(redis_config).await { + Ok(storage) => Some(storage), + Err(e) => { + tracing::error!( + cache = "entity", + e, + "could not open connection to Redis for caching", + ); + if required_to_start { + return Err(e); + } + None + } + }; + if let Some(storage) = storage { + subgraph_storages.insert(subgraph.clone(), storage); } - None } - }; + } - if init.config.redis.ttl.is_none() + if init + .config + .subgraph + .all + .redis + .as_ref() + .map(|r| r.ttl.is_none()) + .unwrap_or(false) && init .config .subgraph @@ -179,12 +257,34 @@ impl Plugin for EntityCache { .into()); } + if init + .config + .subgraph + .all + .invalidation + .as_ref() + .map(|i| i.shared_key.is_empty()) + .unwrap_or_default() + { + return Err( + "you must set a default shared_key invalidation for all subgraphs" + .to_string() + .into(), + ); + } + + let storage = Arc::new(Storage { + all, + subgraphs: subgraph_storages, + }); + let invalidation = Invalidation::new(storage.clone()).await?; Ok(Self { storage, entity_type, enabled: init.config.enabled, + endpoint_config: init.config.invalidation.clone().map(Arc::new), subgraphs: Arc::new(init.config.subgraph), metrics: init.config.metrics, private_queries: Arc::new(RwLock::new(HashSet::new())), @@ -214,8 +314,8 @@ impl Plugin for EntityCache { name: &str, mut service: subgraph::BoxService, ) -> subgraph::BoxService { - let storage = match self.storage.clone() { - Some(storage) => storage, + let storage = match self.storage.get(name) { + Some(storage) => storage.clone(), None => { return ServiceBuilder::new() .map_response(move |response: subgraph::Response| { @@ -240,13 +340,8 @@ impl Plugin for EntityCache { .clone() .map(|t| t.0) .or_else(|| storage.ttl()); - let subgraph_enabled = self.enabled - && self - .subgraphs - .get(name) - .enabled - // if the top level `enabled` is true but there is no other configuration, caching is enabled for this plugin - .unwrap_or(true); + let subgraph_enabled = + self.enabled && (self.subgraphs.all.enabled || self.subgraphs.get(name).enabled); let private_id = self.subgraphs.get(name).private_id.clone(); let name = name.to_string(); @@ -300,6 +395,40 @@ impl Plugin for EntityCache { .boxed() } } + + fn web_endpoints(&self) -> MultiMap { + let mut map = MultiMap::new(); + if self.enabled + && self + .subgraphs + .all + .invalidation + .as_ref() + .map(|i| i.enabled) + .unwrap_or_default() + { + match &self.endpoint_config { + Some(endpoint_config) => { + let endpoint = Endpoint::from_router_service( + endpoint_config.path.clone(), + InvalidationService::new(self.subgraphs.clone(), self.invalidation.clone()) + .boxed(), + ); + tracing::info!( + "Entity caching invalidation endpoint listening on: {}{}", + endpoint_config.listen, + endpoint_config.path + ); + map.insert(endpoint_config.listen.clone(), endpoint); + } + None => { + tracing::warn!("Cannot start entity caching invalidation endpoint because the listen address and endpoint is not configured"); + } + } + } + + map + } } impl EntityCache { @@ -311,9 +440,18 @@ impl EntityCache { where Self: Sized, { - let invalidation = Invalidation::new(Some(storage.clone())).await?; + use std::net::IpAddr; + use std::net::Ipv4Addr; + use std::net::SocketAddr; + + let storage = Arc::new(Storage { + all: Some(storage), + subgraphs: HashMap::new(), + }); + let invalidation = Invalidation::new(storage.clone()).await?; + Ok(Self { - storage: Some(storage), + storage, entity_type: None, enabled: true, subgraphs: Arc::new(SubgraphConfiguration { @@ -322,6 +460,13 @@ impl EntityCache { }), metrics: Metrics::default(), private_queries: Default::default(), + endpoint_config: Some(Arc::new(InvalidationEndpointConfig { + path: String::from("/invalidation"), + listen: ListenAddr::SocketAddr(SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + 4000, + )), + })), invalidation, }) } @@ -446,11 +591,13 @@ impl InnerCacheService { // we did not know in advance that this was a query with a private scope, so we update the cache key if !is_known_private { self.private_queries.write().await.insert(query.to_string()); + + if let Some(s) = private_id.as_ref() { + root_cache_key = format!("{root_cache_key}:{s}"); + } } - if let Some(s) = private_id.as_ref() { - root_cache_key = format!("{root_cache_key}:{s}"); - } else { + if private_id.is_none() { // the response has a private scope but we don't have a way to differentiate users, so we do not store the response in cache return Ok(response); } @@ -512,8 +659,48 @@ impl InnerCacheService { .await? { ControlFlow::Break(response) => Ok(response), - ControlFlow::Continue((request, cache_result)) => { - let mut response = self.service.call(request).await?; + ControlFlow::Continue((request, mut cache_result)) => { + let context = request.context.clone(); + let mut response = match self.service.call(request).await { + Ok(response) => response, + Err(e) => { + let e = match e.downcast::() { + Ok(inner) => match *inner { + FetchError::SubrequestHttpError { .. } => *inner, + _ => FetchError::SubrequestHttpError { + status_code: None, + service: self.name.to_string(), + reason: inner.to_string(), + }, + }, + Err(e) => FetchError::SubrequestHttpError { + status_code: None, + service: self.name.to_string(), + reason: e.to_string(), + }, + }; + + let graphql_error = e.to_graphql_error(None); + + let (new_entities, new_errors) = assemble_response_from_errors( + &[graphql_error], + &mut cache_result.0, + ); + + let mut data = Object::default(); + data.insert(ENTITIES, new_entities.into()); + + let mut response = subgraph::Response::builder() + .context(context) + .data(Value::Object(data)) + .errors(new_errors) + .extensions(Object::new()) + .build(); + CacheControl::no_store().to_headers(response.response.headers_mut())?; + + return Ok(response); + } + }; let mut cache_control = if response.response.headers().contains_key(CACHE_CONTROL) { @@ -740,6 +927,12 @@ struct CacheEntry { data: Value, } +impl ValueType for CacheEntry { + fn estimated_size(&self) -> Option { + None + } +} + async fn cache_store_root_from_response( cache: RedisCacheStorage, subgraph_ttl: Option, @@ -821,6 +1014,15 @@ async fn cache_store_entities_from_response( .map(|o| o.insert(ENTITIES, new_entities.into())); response.response.body_mut().data = data; response.response.body_mut().errors = new_errors; + } else { + let (new_entities, new_errors) = + assemble_response_from_errors(&response.response.body().errors, &mut result_from_cache); + + let mut data = Object::default(); + data.insert(ENTITIES, new_entities.into()); + + response.response.body_mut().data = Some(Value::Object(data)); + response.response.body_mut().errors = new_errors; } Ok(()) @@ -1174,3 +1376,31 @@ async fn insert_entities_in_result( Ok((new_entities, new_errors)) } + +fn assemble_response_from_errors( + graphql_errors: &[Error], + result: &mut Vec, +) -> (Vec, Vec) { + let mut new_entities = Vec::new(); + let mut new_errors = Vec::new(); + + for (new_entity_idx, IntermediateResult { cache_entry, .. }) in result.drain(..).enumerate() { + match cache_entry { + Some(v) => { + new_entities.push(v.data); + } + None => { + new_entities.push(Value::Null); + + for mut error in graphql_errors.iter().cloned() { + error.path = Some(Path(vec![ + PathElement::Key(ENTITIES.to_string(), None), + PathElement::Index(new_entity_idx), + ])); + new_errors.push(error); + } + } + } + } + (new_entities, new_errors) +} diff --git a/apollo-router/src/plugins/cache/invalidation.rs b/apollo-router/src/plugins/cache/invalidation.rs index 96c863e437..77736f6598 100644 --- a/apollo-router/src/plugins/cache/invalidation.rs +++ b/apollo-router/src/plugins/cache/invalidation.rs @@ -1,14 +1,20 @@ +use std::sync::Arc; use std::time::Instant; +use fred::error::RedisError; use fred::types::Scanner; use futures::SinkExt; use futures::StreamExt; +use itertools::Itertools; use serde::Deserialize; use serde::Serialize; use serde_json_bytes::Value; +use thiserror::Error; +use tokio::sync::broadcast; use tower::BoxError; use tracing::Instrument; +use super::entity::Storage as EntityStorage; use crate::cache::redis::RedisCacheStorage; use crate::cache::redis::RedisKey; use crate::notification::Handle; @@ -19,52 +25,105 @@ use crate::Notify; #[derive(Clone)] pub(crate) struct Invalidation { - enabled: bool, - handle: Handle)>, + #[allow(clippy::type_complexity)] + pub(super) handle: Handle< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + broadcast::Sender>, + ), + >, } +#[derive(Error, Debug, Clone)] +pub(crate) enum InvalidationError { + #[error("redis error")] + RedisError(#[from] RedisError), + #[error("several errors")] + Errors(#[from] InvalidationErrors), + #[cfg(test)] + #[error("custom error: {0}")] + Custom(String), +} + +#[derive(Debug, Clone)] +pub(crate) struct InvalidationErrors(Vec); + +impl std::fmt::Display for InvalidationErrors { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "invalidation errors: [{}]", + self.0.iter().map(|e| e.to_string()).join("; ") + ) + } +} + +impl std::error::Error for InvalidationErrors {} + #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub(crate) struct InvalidationTopic; -#[derive(Clone, Debug)] -#[allow(dead_code)] +#[derive(Clone, Debug, PartialEq)] pub(crate) enum InvalidationOrigin { Endpoint, Extensions, } impl Invalidation { - pub(crate) async fn new(storage: Option) -> Result { + pub(crate) async fn new(storage: Arc) -> Result { let mut notify = Notify::new(None, None, None); let (handle, _b) = notify.create_or_subscribe(InvalidationTopic, false).await?; - let enabled = storage.is_some(); - if let Some(storage) = storage { - let h = handle.clone(); - tokio::task::spawn(async move { start(storage, h.into_stream()).await }); - } - Ok(Self { enabled, handle }) + let h = handle.clone(); + + tokio::task::spawn(async move { + start(storage, h.into_stream()).await; + }); + Ok(Self { handle }) } pub(crate) async fn invalidate( &mut self, origin: InvalidationOrigin, requests: Vec, - ) -> Result<(), BoxError> { - if self.enabled { - let mut sink = self.handle.clone().into_sink(); - sink.send((origin, requests)).await.map_err(|e| e.message)?; - } + ) -> Result { + let mut sink = self.handle.clone().into_sink(); + let (response_tx, mut response_rx) = broadcast::channel(2); + sink.send((requests, origin, response_tx.clone())) + .await + .map_err(|e| format!("cannot send invalidation request: {}", e.message))?; - Ok(()) + let result = response_rx + .recv() + .await + .map_err(|err| { + format!( + "cannot receive response for invalidation request: {:?}", + err + ) + })? + .map_err(|err| format!("received an invalidation error: {:?}", err))?; + + Ok(result) } } +// TODO refactor +#[allow(clippy::type_complexity)] async fn start( - storage: RedisCacheStorage, - mut handle: HandleStream)>, + storage: Arc, + mut handle: HandleStream< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + broadcast::Sender>, + ), + >, ) { - while let Some((origin, requests)) = handle.next().await { + while let Some((requests, origin, response_tx)) = handle.next().await { let origin = match origin { InvalidationOrigin::Endpoint => "endpoint", InvalidationOrigin::Extensions => "extensions", @@ -75,30 +134,17 @@ async fn start( 1u64, "origin" = origin ); - handle_request_batch(&storage, origin, requests) - .instrument(tracing::info_span!( - "cache.invalidation.batch", - "origin" = origin - )) - .await - } -} -async fn handle_request_batch( - storage: &RedisCacheStorage, - origin: &'static str, - requests: Vec, -) { - for request in requests { - let start = Instant::now(); - handle_request(storage, origin, &request) - .instrument(tracing::info_span!("cache.invalidation.request")) - .await; - f64_histogram!( - "apollo.router.cache.invalidation.duration", - "Duration of the invalidation event execution.", - start.elapsed().as_secs_f64() - ); + if let Err(err) = response_tx.send( + handle_request_batch(&storage, origin, requests) + .instrument(tracing::info_span!( + "cache.invalidation.batch", + "origin" = origin + )) + .await, + ) { + ::tracing::error!("cannot send answer to invalidation request in the channel: {err}"); + } } } @@ -106,17 +152,18 @@ async fn handle_request( storage: &RedisCacheStorage, origin: &'static str, request: &InvalidationRequest, -) { +) -> Result { let key_prefix = request.key_prefix(); - let subgraph = request.subgraph(); + let subgraph = request.subgraph_name(); tracing::debug!( "got invalidation request: {request:?}, will scan for: {}", key_prefix ); // FIXME: configurable batch size - let mut stream = storage.scan(key_prefix.clone(), Some(10)); + let mut stream = storage.scan(key_prefix.clone(), Some(100)); let mut count = 0u64; + let mut error = None; while let Some(res) = stream.next().await { match res { @@ -126,6 +173,7 @@ async fn handle_request( error = %e, message = "error scanning for key", ); + error = Some(e); break; } Ok(scan_res) => { @@ -136,7 +184,6 @@ async fn handle_request( .map(|k| RedisKey(k.to_string())) .collect::>(); if !keys.is_empty() { - tracing::debug!("deleting keys: {keys:?}"); count += keys.len() as u64; storage.delete(keys).await; @@ -158,9 +205,50 @@ async fn handle_request( "Number of invalidated keys.", count ); + + match error { + Some(err) => Err(err.into()), + None => Ok(count), + } } -#[derive(Clone, Debug, Serialize, Deserialize)] +async fn handle_request_batch( + storage: &EntityStorage, + origin: &'static str, + requests: Vec, +) -> Result { + let mut count = 0; + let mut errors = Vec::new(); + for request in requests { + let start = Instant::now(); + let redis_storage = match storage.get(request.subgraph_name()) { + Some(s) => s, + None => continue, + }; + match handle_request(redis_storage, origin, &request) + .instrument(tracing::info_span!("cache.invalidation.request")) + .await + { + Ok(c) => count += c, + Err(err) => { + errors.push(err); + } + } + f64_histogram!( + "apollo.router.cache.invalidation.duration", + "Duration of the invalidation event execution.", + start.elapsed().as_secs_f64() + ); + } + + if !errors.is_empty() { + Err(InvalidationErrors(errors).into()) + } else { + Ok(count) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(tag = "kind", rename_all = "lowercase")] pub(crate) enum InvalidationRequest { Subgraph { @@ -181,10 +269,10 @@ impl InvalidationRequest { fn key_prefix(&self) -> String { match self { InvalidationRequest::Subgraph { subgraph } => { - format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}*",) + format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:*",) } InvalidationRequest::Type { subgraph, r#type } => { - format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:type:{type}*",) + format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:type:{type}:*",) } InvalidationRequest::Entity { subgraph, @@ -192,17 +280,16 @@ impl InvalidationRequest { key, } => { let entity_key = hash_entity_key(key); - format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:type:{type}:entity:{entity_key}*") + format!("version:{ENTITY_CACHE_VERSION}:subgraph:{subgraph}:type:{type}:entity:{entity_key}:*") } } } - fn subgraph(&self) -> String { + pub(super) fn subgraph_name(&self) -> &String { match self { - InvalidationRequest::Subgraph { subgraph } => subgraph.clone(), - _ => { - todo!() - } + InvalidationRequest::Subgraph { subgraph } + | InvalidationRequest::Type { subgraph, .. } + | InvalidationRequest::Entity { subgraph, .. } => subgraph, } } } diff --git a/apollo-router/src/plugins/cache/invalidation_endpoint.rs b/apollo-router/src/plugins/cache/invalidation_endpoint.rs new file mode 100644 index 0000000000..561309c8fb --- /dev/null +++ b/apollo-router/src/plugins/cache/invalidation_endpoint.rs @@ -0,0 +1,565 @@ +use std::sync::Arc; +use std::task::Poll; + +use bytes::Buf; +use futures::future::BoxFuture; +use http::header::AUTHORIZATION; +use http::Method; +use http::StatusCode; +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use serde_json_bytes::json; +use tower::BoxError; +use tower::Service; +use tracing_futures::Instrument; + +use super::entity::Subgraph; +use super::invalidation::Invalidation; +use super::invalidation::InvalidationOrigin; +use crate::configuration::subgraph::SubgraphConfiguration; +use crate::plugins::cache::invalidation::InvalidationRequest; +use crate::services::router; +use crate::services::router::body::RouterBody; +use crate::ListenAddr; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema, Default)] +#[serde(rename_all = "snake_case", deny_unknown_fields, default)] +pub(crate) struct SubgraphInvalidationConfig { + /// Enable the invalidation + pub(crate) enabled: bool, + /// Shared key needed to request the invalidation endpoint + pub(crate) shared_key: String, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "snake_case", deny_unknown_fields)] +pub(crate) struct InvalidationEndpointConfig { + /// Specify on which path you want to listen for invalidation endpoint. + pub(crate) path: String, + /// Listen address on which the invalidation endpoint must listen. + pub(crate) listen: ListenAddr, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub(crate) enum InvalidationType { + EntityType, +} + +#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize, JsonSchema)] +#[serde(rename_all = "camelCase")] +pub(crate) struct InvalidationKey { + pub(crate) id: String, + pub(crate) field: String, +} + +#[derive(Clone)] +pub(crate) struct InvalidationService { + config: Arc>, + invalidation: Invalidation, +} + +impl InvalidationService { + pub(crate) fn new( + config: Arc>, + invalidation: Invalidation, + ) -> Self { + Self { + config, + invalidation, + } + } +} + +impl Service for InvalidationService { + type Response = router::Response; + type Error = BoxError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> Poll> { + Ok(()).into() + } + + fn call(&mut self, req: router::Request) -> Self::Future { + let mut invalidation = self.invalidation.clone(); + let config = self.config.clone(); + Box::pin( + async move { + let (parts, body) = req.router_request.into_parts(); + if !parts.headers.contains_key(AUTHORIZATION) { + return Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::UNAUTHORIZED) + .body("Missing authorization header".into()) + .map_err(BoxError::from)?, + context: req.context, + }); + } + match parts.method { + Method::POST => { + let body = Into::::into(body) + .to_bytes() + .await + .map_err(|e| format!("failed to get the request body: {e}")) + .and_then(|bytes| { + serde_json::from_reader::<_, Vec>( + bytes.reader(), + ) + .map_err(|err| { + format!( + "failed to deserialize the request body into JSON: {err}" + ) + }) + }); + let shared_key = parts + .headers + .get(AUTHORIZATION) + .ok_or("cannot find authorization header")? + .to_str()?; + match body { + Ok(body) => { + let valid_shared_key = + body.iter().map(|b| b.subgraph_name()).any(|subgraph_name| { + valid_shared_key(&config, shared_key, subgraph_name) + }); + if !valid_shared_key { + return Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::UNAUTHORIZED) + .body("Invalid authorization header".into()) + .map_err(BoxError::from)?, + context: req.context, + }); + } + match invalidation + .invalidate(InvalidationOrigin::Endpoint, body) + .await + { + Ok(count) => Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::ACCEPTED) + .body( + serde_json::to_string(&json!({ + "count": count + }))? + .into(), + ) + .map_err(BoxError::from)?, + context: req.context, + }), + Err(err) => Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::BAD_REQUEST) + .body(err.to_string().into()) + .map_err(BoxError::from)?, + context: req.context, + }), + } + } + Err(err) => Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::BAD_REQUEST) + .body(err.into()) + .map_err(BoxError::from)?, + context: req.context, + }), + } + } + _ => Ok(router::Response { + response: http::Response::builder() + .status(StatusCode::METHOD_NOT_ALLOWED) + .body("".into()) + .map_err(BoxError::from)?, + context: req.context, + }), + } + } + .instrument(tracing::info_span!("invalidation_endpoint")), + ) + } +} + +fn valid_shared_key( + config: &SubgraphConfiguration, + shared_key: &str, + subgraph_name: &str, +) -> bool { + config + .all + .invalidation + .as_ref() + .map(|i| i.shared_key == shared_key) + .unwrap_or_default() + || config + .subgraphs + .get(subgraph_name) + .and_then(|s| s.invalidation.as_ref()) + .map(|i| i.shared_key == shared_key) + .unwrap_or_default() +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use tokio::sync::broadcast::Sender; + use tokio_stream::StreamExt; + use tower::ServiceExt; + + use super::*; + use crate::plugins::cache::invalidation::InvalidationError; + use crate::plugins::cache::invalidation::InvalidationTopic; + use crate::Notify; + + #[tokio::test] + async fn test_invalidation_service_bad_shared_key() { + #[allow(clippy::type_complexity)] + let mut notify: Notify< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + Sender>, + ), + > = Notify::new(None, None, None); + let (handle, _b) = notify + .create_or_subscribe(InvalidationTopic, false) + .await + .unwrap(); + let invalidation = Invalidation { handle }; + let config = Arc::new(SubgraphConfiguration { + all: Subgraph { + ttl: None, + enabled: true, + redis: None, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test"), + }), + }, + subgraphs: HashMap::new(), + }); + let service = InvalidationService::new(config, invalidation); + let req = router::Request::fake_builder() + .method(http::Method::POST) + .header(AUTHORIZATION, "testttt") + .body( + serde_json::to_vec(&[ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ]) + .unwrap(), + ) + .build() + .unwrap(); + let res = service.oneshot(req).await.unwrap(); + assert_eq!(res.response.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn test_invalidation_service_good_sub_shared_key() { + #[allow(clippy::type_complexity)] + let mut notify: Notify< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + Sender>, + ), + > = Notify::new(None, None, None); + let (handle, _b) = notify + .create_or_subscribe(InvalidationTopic, false) + .await + .unwrap(); + let h = handle.clone(); + + tokio::task::spawn(async move { + let mut handle = h.into_stream(); + let mut called = false; + while let Some((requests, origin, response_tx)) = handle.next().await { + called = true; + if requests + != [ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ] + { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation requests : {requests:?}" + )))) + .unwrap(); + return; + } + if origin != InvalidationOrigin::Endpoint { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation origin : {origin:?}" + )))) + .unwrap(); + return; + } + response_tx.send(Ok(0)).unwrap(); + } + assert!(called); + }); + + let invalidation = Invalidation { + handle: handle.clone(), + }; + let config = Arc::new(SubgraphConfiguration { + all: Subgraph { + ttl: None, + enabled: true, + redis: None, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test"), + }), + }, + subgraphs: [( + String::from("test"), + Subgraph { + ttl: None, + redis: None, + enabled: true, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test_test"), + }), + }, + )] + .into_iter() + .collect(), + }); + let service = InvalidationService::new(config, invalidation); + let req = router::Request::fake_builder() + .method(http::Method::POST) + .header(AUTHORIZATION, "test_test") + .body( + serde_json::to_vec(&[ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ]) + .unwrap(), + ) + .build() + .unwrap(); + let res = service.oneshot(req).await.unwrap(); + assert_eq!(res.response.status(), StatusCode::ACCEPTED); + let h = handle.clone(); + + tokio::task::spawn(async move { + let mut handle = h.into_stream(); + let mut called = false; + while let Some((requests, origin, response_tx)) = handle.next().await { + called = true; + if requests + != [ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ] + { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation requests : {requests:?}" + )))) + .unwrap(); + return; + } + if origin != InvalidationOrigin::Endpoint { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation origin : {origin:?}" + )))) + .unwrap(); + return; + } + response_tx.send(Ok(0)).unwrap(); + } + assert!(called); + }); + } + + #[tokio::test] + async fn test_invalidation_service_bad_shared_key_subgraph() { + #[allow(clippy::type_complexity)] + let mut notify: Notify< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + Sender>, + ), + > = Notify::new(None, None, None); + let (handle, _b) = notify + .create_or_subscribe(InvalidationTopic, false) + .await + .unwrap(); + let invalidation = Invalidation { handle }; + let config = Arc::new(SubgraphConfiguration { + all: Subgraph { + ttl: None, + enabled: true, + redis: None, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test"), + }), + }, + subgraphs: [( + String::from("test"), + Subgraph { + ttl: None, + enabled: true, + redis: None, + private_id: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test_test"), + }), + }, + )] + .into_iter() + .collect(), + }); + // Trying to invalidation with shared_key on subgraph test for a subgraph foo + let service = InvalidationService::new(config, invalidation); + let req = router::Request::fake_builder() + .method(http::Method::POST) + .header(AUTHORIZATION, "test_test") + .body( + serde_json::to_vec(&[InvalidationRequest::Subgraph { + subgraph: String::from("foo"), + }]) + .unwrap(), + ) + .build() + .unwrap(); + let res = service.oneshot(req).await.unwrap(); + assert_eq!(res.response.status(), StatusCode::UNAUTHORIZED); + } + + #[tokio::test] + async fn test_invalidation_service() { + #[allow(clippy::type_complexity)] + let mut notify: Notify< + InvalidationTopic, + ( + Vec, + InvalidationOrigin, + Sender>, + ), + > = Notify::new(None, None, None); + let (handle, _b) = notify + .create_or_subscribe(InvalidationTopic, false) + .await + .unwrap(); + let h = handle.clone(); + + tokio::task::spawn(async move { + let mut handle = h.into_stream(); + let mut called = false; + while let Some((requests, origin, response_tx)) = handle.next().await { + called = true; + if requests + != [ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ] + { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation requests : {requests:?}" + )))) + .unwrap(); + return; + } + if origin != InvalidationOrigin::Endpoint { + response_tx + .send(Err(InvalidationError::Custom(format!( + "it's not the right invalidation origin : {origin:?}" + )))) + .unwrap(); + return; + } + response_tx.send(Ok(2)).unwrap(); + } + assert!(called); + }); + + let invalidation = Invalidation { handle }; + let config = Arc::new(SubgraphConfiguration { + all: Subgraph { + ttl: None, + enabled: true, + private_id: None, + redis: None, + invalidation: Some(SubgraphInvalidationConfig { + enabled: true, + shared_key: String::from("test"), + }), + }, + subgraphs: HashMap::new(), + }); + let service = InvalidationService::new(config, invalidation); + let req = router::Request::fake_builder() + .method(http::Method::POST) + .header(AUTHORIZATION, "test") + .body( + serde_json::to_vec(&[ + InvalidationRequest::Subgraph { + subgraph: String::from("test"), + }, + InvalidationRequest::Type { + subgraph: String::from("test"), + r#type: String::from("Test"), + }, + ]) + .unwrap(), + ) + .build() + .unwrap(); + let res = service.oneshot(req).await.unwrap(); + assert_eq!(res.response.status(), StatusCode::ACCEPTED); + assert_eq!( + serde_json::from_slice::( + &hyper::body::to_bytes(res.response.into_body()) + .await + .unwrap() + ) + .unwrap(), + serde_json::json!({"count": 2}) + ); + } +} diff --git a/apollo-router/src/plugins/cache/mod.rs b/apollo-router/src/plugins/cache/mod.rs index dded2f9586..c45265a3d3 100644 --- a/apollo-router/src/plugins/cache/mod.rs +++ b/apollo-router/src/plugins/cache/mod.rs @@ -1,6 +1,7 @@ pub(crate) mod cache_control; pub(crate) mod entity; pub(crate) mod invalidation; +pub(crate) mod invalidation_endpoint; pub(crate) mod metrics; #[cfg(test)] pub(crate) mod tests; diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities-2.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities-2.snap new file mode 100644 index 0000000000..9798af179e --- /dev/null +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities-2.snap @@ -0,0 +1,34 @@ +--- +source: apollo-router/src/plugins/cache/tests.rs +expression: response +--- +{ + "data": { + "currentUser": { + "allOrganizations": [ + { + "id": "1", + "name": "Organization 1" + }, + { + "id": "2", + "name": "Organization 2" + }, + { + "id": "3", + "name": null + } + ] + } + }, + "errors": [ + { + "message": "Organization not found", + "path": [ + "currentUser", + "allOrganizations", + 2 + ] + } + ] +} diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities.snap new file mode 100644 index 0000000000..6ea1f9fedd --- /dev/null +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__missing_entities.snap @@ -0,0 +1,20 @@ +--- +source: apollo-router/src/plugins/cache/tests.rs +expression: response +--- +{ + "data": { + "currentUser": { + "allOrganizations": [ + { + "id": "1", + "name": "Organization 1" + }, + { + "id": "2", + "name": "Organization 2" + } + ] + } + } +} diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-2.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-2.snap new file mode 100644 index 0000000000..6e58a2d437 --- /dev/null +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-2.snap @@ -0,0 +1,39 @@ +--- +source: apollo-router/src/plugins/cache/tests.rs +expression: response +--- +{ + "data": { + "currentUser": { + "allOrganizations": [ + { + "id": "1", + "name": "Organization 1" + }, + { + "id": "2", + "name": null + }, + { + "id": "3", + "name": "Organization 3" + } + ] + } + }, + "errors": [ + { + "message": "HTTP fetch failed from 'orga': orga not found", + "path": [ + "currentUser", + "allOrganizations", + 1 + ], + "extensions": { + "code": "SUBREQUEST_HTTP_ERROR", + "service": "orga", + "reason": "orga not found" + } + } + ] +} diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap new file mode 100644 index 0000000000..b9832aaeaa --- /dev/null +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap @@ -0,0 +1,20 @@ +--- +source: apollo-router/src/plugins/cache/tests.rs +expression: response +--- +{ + "data": { + "currentUser": { + "allOrganizations": [ + { + "id": "1", + "name": "Organization 1" + }, + { + "id": "3", + "name": "Organization 3" + } + ] + } + } +} diff --git a/apollo-router/src/plugins/cache/tests.rs b/apollo-router/src/plugins/cache/tests.rs index 3d0bb21169..36628acf5e 100644 --- a/apollo-router/src/plugins/cache/tests.rs +++ b/apollo-router/src/plugins/cache/tests.rs @@ -15,7 +15,9 @@ use tower::ServiceExt; use super::entity::EntityCache; use crate::cache::redis::RedisCacheStorage; use crate::plugin::test::MockSubgraph; +use crate::plugin::test::MockSubgraphService; use crate::plugins::cache::entity::Subgraph; +use crate::services::subgraph; use crate::services::supergraph; use crate::Context; use crate::MockedSubgraphs; @@ -55,6 +57,7 @@ const SCHEMA: &str = r#"schema id: ID! name: String activeOrganization: Organization + allOrganizations: [Organization] } type Organization @join__owner(graph: ORGA) @@ -93,6 +96,19 @@ impl Mocks for MockStore { } } } + "MGET" => { + let mut result: Vec = Vec::new(); + + let mut args_it = command.args.iter(); + while let Some(RedisValue::Bytes(key)) = args_it.next() { + if let Some(bytes) = self.map.lock().get(key) { + result.push(RedisValue::Bytes(bytes.clone())); + } else { + result.push(RedisValue::Null); + } + } + return Ok(RedisValue::Array(result)); + } "SET" => { if let (Some(RedisValue::Bytes(key)), Some(RedisValue::Bytes(value))) = (command.args.first(), command.args.get(1)) @@ -169,7 +185,7 @@ impl Mocks for MockStore { } }*/ _ => { - panic!() + panic!("unrecoginzed command: {command:?}") } } Err(RedisError::new(RedisErrorKind::NotFound, "mock not found")) @@ -213,7 +229,31 @@ async fn insert() { let redis_cache = RedisCacheStorage::from_mocks(Arc::new(MockStore::new())) .await .unwrap(); - let entity_cache = EntityCache::with_mocks(redis_cache.clone(), HashMap::new()) + let map = [ + ( + "user".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ( + "orga".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ] + .into_iter() + .collect(); + let entity_cache = EntityCache::with_mocks(redis_cache.clone(), map) .await .unwrap(); @@ -398,17 +438,21 @@ async fn private() { ( "user".to_string(), Subgraph { + redis: None, private_id: Some("sub".to_string()), - enabled: Some(true), + enabled: true, ttl: None, + ..Default::default() }, ), ( "orga".to_string(), Subgraph { + redis: None, private_id: Some("sub".to_string()), - enabled: Some(true), + enabled: true, ttl: None, + ..Default::default() }, ), ] @@ -497,6 +541,328 @@ async fn private() { insta::assert_json_snapshot!(response); } +#[tokio::test] +async fn no_data() { + let query = "query { currentUser { allOrganizations { id name } } }"; + + let subgraphs = MockedSubgraphs([ + ("user", MockSubgraph::builder().with_json( + serde_json::json!{{"query":"{currentUser{allOrganizations{__typename id}}}"}}, + serde_json::json!{{"data": {"currentUser": { "allOrganizations": [ + { + "__typename": "Organization", + "id": "1" + }, + { + "__typename": "Organization", + "id": "3" + } + ] }}}} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("no-store")).build()), + ("orga", MockSubgraph::builder().with_json( + serde_json::json!{{ + "query": "query($representations:[_Any!]!){_entities(representations:$representations){...on Organization{name}}}", + "variables": { + "representations": [ + { + "id": "1", + "__typename": "Organization", + }, + { + "id": "3", + "__typename": "Organization", + } + ] + }}}, + serde_json::json!{{ + "data": { + "_entities": [{ + "name": "Organization 1", + }, + { + "name": "Organization 3" + }] + } + }} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("public, max-age=3600")).build()) + ].into_iter().collect()); + + let redis_cache = RedisCacheStorage::from_mocks(Arc::new(MockStore::new())) + .await + .unwrap(); + let map = [ + ( + "user".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ( + "orga".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ] + .into_iter() + .collect(); + let entity_cache = EntityCache::with_mocks(redis_cache.clone(), map) + .await + .unwrap(); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(SCHEMA) + .extra_plugin(entity_cache) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .query(query) + .context(Context::new()) + .build() + .unwrap(); + let mut response = service.oneshot(request).await.unwrap(); + + let response = response.next_response().await.unwrap(); + insta::assert_json_snapshot!(response); + + let entity_cache = EntityCache::with_mocks(redis_cache.clone(), HashMap::new()) + .await + .unwrap(); + + let subgraphs = MockedSubgraphs( + [( + "user", + MockSubgraph::builder() + .with_json( + serde_json::json! {{"query":"{currentUser{allOrganizations{__typename id}}}"}}, + serde_json::json! {{"data": {"currentUser": { "allOrganizations": [ + { + "__typename": "Organization", + "id": "1" + }, + { + "__typename": "Organization", + "id": "2" + }, + { + "__typename": "Organization", + "id": "3" + } + ] }}}}, + ) + .with_header(CACHE_CONTROL, HeaderValue::from_static("no-store")) + .build(), + )] + .into_iter() + .collect(), + ); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(SCHEMA) + .extra_plugin(entity_cache) + .subgraph_hook(|name, service| { + if name == "orga" { + let mut subgraph = MockSubgraphService::new(); + subgraph + .expect_call() + .times(1) + .returning(move |_req: subgraph::Request| Err("orga not found".into())); + subgraph.boxed() + } else { + service + } + }) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .query(query) + .context(Context::new()) + .build() + .unwrap(); + let mut response = service.oneshot(request).await.unwrap(); + let response = response.next_response().await.unwrap(); + + insta::assert_json_snapshot!(response); +} + +#[tokio::test] +async fn missing_entities() { + let query = "query { currentUser { allOrganizations { id name } } }"; + + let subgraphs = MockedSubgraphs([ + ("user", MockSubgraph::builder().with_json( + serde_json::json!{{"query":"{currentUser{allOrganizations{__typename id}}}"}}, + serde_json::json!{{"data": {"currentUser": { "allOrganizations": [ + { + "__typename": "Organization", + "id": "1" + }, + { + "__typename": "Organization", + "id": "2" + } + ] }}}} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("no-store")).build()), + ("orga", MockSubgraph::builder().with_json( + serde_json::json!{{ + "query": "query($representations:[_Any!]!){_entities(representations:$representations){...on Organization{name}}}", + "variables": { + "representations": [ + { + "id": "1", + "__typename": "Organization", + }, + { + "id": "2", + "__typename": "Organization", + } + ] + }}}, + serde_json::json!{{ + "data": { + "_entities": [ + { + "name": "Organization 1", + }, + { + "name": "Organization 2" + } + ] + } + }} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("public, max-age=3600")).build()) + ].into_iter().collect()); + + let redis_cache = RedisCacheStorage::from_mocks(Arc::new(MockStore::new())) + .await + .unwrap(); + let map = [ + ( + "user".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ( + "orga".to_string(), + Subgraph { + redis: None, + private_id: Some("sub".to_string()), + enabled: true, + ttl: None, + ..Default::default() + }, + ), + ] + .into_iter() + .collect(); + let entity_cache = EntityCache::with_mocks(redis_cache.clone(), map) + .await + .unwrap(); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(SCHEMA) + .extra_plugin(entity_cache) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .query(query) + .context(Context::new()) + .build() + .unwrap(); + let mut response = service.oneshot(request).await.unwrap(); + let response = response.next_response().await.unwrap(); + insta::assert_json_snapshot!(response); + + let entity_cache = EntityCache::with_mocks(redis_cache.clone(), HashMap::new()) + .await + .unwrap(); + + let subgraphs = MockedSubgraphs([ + ("user", MockSubgraph::builder().with_json( + serde_json::json!{{"query":"{currentUser{allOrganizations{__typename id}}}"}}, + serde_json::json!{{"data": {"currentUser": { "allOrganizations": [ + { + "__typename": "Organization", + "id": "1" + }, + { + "__typename": "Organization", + "id": "2" + }, + { + "__typename": "Organization", + "id": "3" + } + ] }}}} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("no-store")).build()), + ("orga", MockSubgraph::builder().with_json( + serde_json::json!{{ + "query": "query($representations:[_Any!]!){_entities(representations:$representations){...on Organization{name}}}", + "variables": { + "representations": [ + { + "id": "3", + "__typename": "Organization", + } + ] + }}}, + serde_json::json!{{ + "data": null, + "errors": [{ + "message": "Organization not found", + }] + }} + ).with_header(CACHE_CONTROL, HeaderValue::from_static("public, max-age=3600")).build()) + ].into_iter().collect()); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(SCHEMA) + .extra_plugin(entity_cache) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .query(query) + .context(Context::new()) + .build() + .unwrap(); + let mut response = service.oneshot(request).await.unwrap(); + let response = response.next_response().await.unwrap(); + insta::assert_json_snapshot!(response); +} + /*FIXME: reactivate test if we manage to make fred return the response to SCAN in mocks #[tokio::test(flavor = "multi_thread")] async fn invalidate() { diff --git a/apollo-router/src/plugins/csrf.rs b/apollo-router/src/plugins/csrf.rs index 6e0f08e118..5c76c57116 100644 --- a/apollo-router/src/plugins/csrf.rs +++ b/apollo-router/src/plugins/csrf.rs @@ -1,5 +1,6 @@ //! Cross Site Request Forgery (CSRF) plugin. use std::ops::ControlFlow; +use std::sync::Arc; use http::header; use http::HeaderMap; @@ -35,14 +36,14 @@ pub(crate) struct CSRFConfig { /// - did not set any `allow_headers` list (so it defaults to `mirror_request`) /// - added your required headers to the allow_headers list, as shown in the /// `examples/cors-and-csrf/custom-headers.router.yaml` files. - required_headers: Vec, + required_headers: Arc>, } -fn apollo_custom_preflight_headers() -> Vec { - vec![ +fn apollo_custom_preflight_headers() -> Arc> { + Arc::new(vec![ "x-apollo-operation-name".to_string(), "apollo-require-preflight".to_string(), - ] + ]) } impl Default for CSRFConfig { diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs b/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs index b3f3afe372..c4dcc36b00 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs @@ -1,13 +1,112 @@ +use ahash::HashMap; +use ahash::HashMapExt; +use ahash::HashSet; +use apollo_compiler::ast::DirectiveList; +use apollo_compiler::ast::FieldDefinition; +use apollo_compiler::ast::InputValueDefinition; use apollo_compiler::ast::NamedType; use apollo_compiler::executable::Field; use apollo_compiler::executable::SelectionSet; +use apollo_compiler::name; use apollo_compiler::parser::Parser; +use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; +use apollo_compiler::Name; use apollo_compiler::Schema; +use apollo_federation::link::spec::APOLLO_SPEC_DOMAIN; +use apollo_federation::link::Link; use tower::BoxError; use super::DemandControlError; +const COST_DIRECTIVE_NAME: Name = name!("cost"); +const COST_DIRECTIVE_DEFAULT_NAME: Name = name!("federation__cost"); +const COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME: Name = name!("weight"); + +const LIST_SIZE_DIRECTIVE_NAME: Name = name!("listSize"); +const LIST_SIZE_DIRECTIVE_DEFAULT_NAME: Name = name!("federation__listSize"); +const LIST_SIZE_DIRECTIVE_ASSUMED_SIZE_ARGUMENT_NAME: Name = name!("assumedSize"); +const LIST_SIZE_DIRECTIVE_SLICING_ARGUMENTS_ARGUMENT_NAME: Name = name!("slicingArguments"); +const LIST_SIZE_DIRECTIVE_SIZED_FIELDS_ARGUMENT_NAME: Name = name!("sizedFields"); +const LIST_SIZE_DIRECTIVE_REQUIRE_ONE_SLICING_ARGUMENT_ARGUMENT_NAME: Name = + name!("requireOneSlicingArgument"); + +pub(in crate::plugins::demand_control) fn get_apollo_directive_names( + schema: &Schema, +) -> HashMap { + let mut hm: HashMap = HashMap::new(); + for directive in &schema.schema_definition.directives { + if directive.name.as_str() == "link" { + if let Ok(link) = Link::from_directive_application(directive) { + if link.url.identity.domain != APOLLO_SPEC_DOMAIN { + continue; + } + for import in link.imports { + hm.insert(import.element.clone(), import.imported_name().clone()); + } + } + } + } + hm +} + +pub(in crate::plugins::demand_control) struct CostDirective { + weight: i32, +} + +impl CostDirective { + pub(in crate::plugins::demand_control) fn weight(&self) -> f64 { + self.weight as f64 + } + + pub(in crate::plugins::demand_control) fn from_argument( + directive_name_map: &HashMap, + argument: &InputValueDefinition, + ) -> Option { + Self::from_directives(directive_name_map, &argument.directives) + } + + pub(in crate::plugins::demand_control) fn from_field( + directive_name_map: &HashMap, + field: &FieldDefinition, + ) -> Option { + Self::from_directives(directive_name_map, &field.directives) + } + + pub(in crate::plugins::demand_control) fn from_type( + directive_name_map: &HashMap, + ty: &ExtendedType, + ) -> Option { + Self::from_schema_directives(directive_name_map, ty.directives()) + } + + fn from_directives( + directive_name_map: &HashMap, + directives: &DirectiveList, + ) -> Option { + directive_name_map + .get(&COST_DIRECTIVE_NAME) + .and_then(|name| directives.get(name)) + .or(directives.get(&COST_DIRECTIVE_DEFAULT_NAME)) + .and_then(|cost| cost.argument_by_name(&COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME)) + .and_then(|weight| weight.to_i32()) + .map(|weight| Self { weight }) + } + + pub(in crate::plugins::demand_control) fn from_schema_directives( + directive_name_map: &HashMap, + directives: &apollo_compiler::schema::DirectiveList, + ) -> Option { + directive_name_map + .get(&COST_DIRECTIVE_NAME) + .and_then(|name| directives.get(name)) + .or(directives.get(&COST_DIRECTIVE_DEFAULT_NAME)) + .and_then(|cost| cost.argument_by_name(&COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME)) + .and_then(|weight| weight.to_i32()) + .map(|weight| Self { weight }) + } +} + pub(in crate::plugins::demand_control) struct IncludeDirective { pub(in crate::plugins::demand_control) is_included: bool, } @@ -27,31 +126,142 @@ impl IncludeDirective { } } +pub(in crate::plugins::demand_control) struct ListSizeDirective<'schema> { + pub(in crate::plugins::demand_control) expected_size: Option, + pub(in crate::plugins::demand_control) sized_fields: Option>, +} + +impl<'schema> ListSizeDirective<'schema> { + pub(in crate::plugins::demand_control) fn size_of(&self, field: &Field) -> Option { + if self + .sized_fields + .as_ref() + .is_some_and(|sf| sf.contains(field.name.as_str())) + { + self.expected_size + } else { + None + } + } +} + +/// The `@listSize` directive from a field definition, which can be converted to +/// `ListSizeDirective` with a concrete field from a request. +pub(in crate::plugins::demand_control) struct DefinitionListSizeDirective { + assumed_size: Option, + slicing_argument_names: Option>, + sized_fields: Option>, + require_one_slicing_argument: bool, +} + +impl DefinitionListSizeDirective { + pub(in crate::plugins::demand_control) fn from_field_definition( + directive_name_map: &HashMap, + definition: &FieldDefinition, + ) -> Result, DemandControlError> { + let directive = directive_name_map + .get(&LIST_SIZE_DIRECTIVE_NAME) + .and_then(|name| definition.directives.get(name)) + .or(definition.directives.get(&LIST_SIZE_DIRECTIVE_DEFAULT_NAME)); + if let Some(directive) = directive { + let assumed_size = directive + .argument_by_name(&LIST_SIZE_DIRECTIVE_ASSUMED_SIZE_ARGUMENT_NAME) + .and_then(|arg| arg.to_i32()); + let slicing_argument_names = directive + .argument_by_name(&LIST_SIZE_DIRECTIVE_SLICING_ARGUMENTS_ARGUMENT_NAME) + .and_then(|arg| arg.as_list()) + .map(|arg_list| { + arg_list + .iter() + .flat_map(|arg| arg.as_str()) + .map(String::from) + .collect() + }); + let sized_fields = directive + .argument_by_name(&LIST_SIZE_DIRECTIVE_SIZED_FIELDS_ARGUMENT_NAME) + .and_then(|arg| arg.as_list()) + .map(|arg_list| { + arg_list + .iter() + .flat_map(|arg| arg.as_str()) + .map(String::from) + .collect() + }); + let require_one_slicing_argument = directive + .argument_by_name(&LIST_SIZE_DIRECTIVE_REQUIRE_ONE_SLICING_ARGUMENT_ARGUMENT_NAME) + .and_then(|arg| arg.to_bool()) + .unwrap_or(true); + + Ok(Some(Self { + assumed_size, + slicing_argument_names, + sized_fields, + require_one_slicing_argument, + })) + } else { + Ok(None) + } + } + + pub(in crate::plugins::demand_control) fn with_field( + &self, + field: &Field, + ) -> Result { + let mut slicing_arguments: HashMap<&str, i32> = HashMap::new(); + if let Some(slicing_argument_names) = self.slicing_argument_names.as_ref() { + // First, collect the default values for each argument + for argument in &field.definition.arguments { + if slicing_argument_names.contains(argument.name.as_str()) { + if let Some(numeric_value) = + argument.default_value.as_ref().and_then(|v| v.to_i32()) + { + slicing_arguments.insert(&argument.name, numeric_value); + } + } + } + // Then, overwrite any default values with the actual values passed in the query + for argument in &field.arguments { + if slicing_argument_names.contains(argument.name.as_str()) { + if let Some(numeric_value) = argument.value.to_i32() { + slicing_arguments.insert(&argument.name, numeric_value); + } + } + } + + if self.require_one_slicing_argument && slicing_arguments.len() != 1 { + return Err(DemandControlError::QueryParseFailure(format!( + "Exactly one slicing argument is required, but found {}", + slicing_arguments.len() + ))); + } + } + + let expected_size = slicing_arguments + .values() + .max() + .cloned() + .or(self.assumed_size); + + Ok(ListSizeDirective { + expected_size, + sized_fields: self + .sized_fields + .as_ref() + .map(|set| set.iter().map(|s| s.as_str()).collect()), + }) + } +} + pub(in crate::plugins::demand_control) struct RequiresDirective { pub(in crate::plugins::demand_control) fields: SelectionSet, } impl RequiresDirective { - pub(in crate::plugins::demand_control) fn from_field( - field: &Field, + pub(in crate::plugins::demand_control) fn from_field_definition( + definition: &FieldDefinition, parent_type_name: &NamedType, schema: &Valid, ) -> Result, DemandControlError> { - // When a user marks a subgraph schema field with `@requires`, the composition process - // replaces `@requires(field: "")` with `@join__field(requires: "")`. - // - // Note we cannot use `field.definition` in this case: The operation executes against the - // API schema, so its definition pointers point into the API schema. To find the - // `@join__field()` directive, we must instead look up the field on the type with the same - // name in the supergraph. - let definition = schema - .type_field(parent_type_name, &field.name) - .map_err(|_err| { - DemandControlError::QueryParseFailure(format!( - "Could not find the API schema type {}.{} in the supergraph. This looks like a bug", - parent_type_name, &field.name - )) - })?; let requires_arg = definition .directives .get("join__field") diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql new file mode 100644 index 0000000000..c8494f9697 --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query.graphql @@ -0,0 +1,5 @@ +query BasicInputObjectQuery { + getScalarByObject( + args: { inner: { id: 1 }, listOfInner: [{ id: 2 }, { id: 3 }] } + ) +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query_2.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query_2.graphql new file mode 100644 index 0000000000..26a1a06623 --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_query_2.graphql @@ -0,0 +1,8 @@ +query BasicInputObjectQuery2 { + getObjectsByObject( + args: { inner: { id: 1 }, listOfInner: [{ id: 2 }, { id: 3 }] } + ) { + field1 + field2 + } +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_response.json b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_response.json new file mode 100644 index 0000000000..092377bf7f --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_input_object_response.json @@ -0,0 +1,9 @@ +{ + "data": { + "getObjectsByObject": [ + { "field1": 1, "field2": "one" }, + { "field1": 2, "field2": "two" }, + { "field1": 3, "field2": "three" } + ] + } +} \ No newline at end of file diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql index 716b0b3a3d..17f3046414 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/basic_schema.graphql @@ -1,29 +1,41 @@ type Query { - getScalar(id: ID): String - anotherScalar: Int - object1: FirstObjectType - interfaceInstance1: MyInterface - someUnion: UnionOfObjectTypes - someObjects: [FirstObjectType] - intList: [Int] + getScalar(id: ID): String + getScalarByObject(args: OuterInput): String + anotherScalar: Int + object1: FirstObjectType + interfaceInstance1: MyInterface + someUnion: UnionOfObjectTypes + someObjects: [FirstObjectType] + intList: [Int] + getObjectsByObject(args: OuterInput): [SecondObjectType] } type Mutation { - doSomething: Int + doSomething: Int } type FirstObjectType { - field1: Int - innerList: [SecondObjectType] + field1: Int + innerList: [SecondObjectType] } interface MyInterface { - field2: String + field2: String } type SecondObjectType implements MyInterface { - field1: Int - field2: String + field1: Int + field2: String } -union UnionOfObjectTypes = FirstObjectType | SecondObjectType \ No newline at end of file +union UnionOfObjectTypes = FirstObjectType | SecondObjectType + +input InnerInput { + id: ID +} + +input OuterInput { + inner: InnerInput + inner2: InnerInput + listOfInner: [InnerInput!] +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query.graphql new file mode 100644 index 0000000000..751c8a005e --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query.graphql @@ -0,0 +1,20 @@ +fragment Items on SizedField { + items { + id + } +} + +{ + fieldWithCost + argWithCost(arg: 3) + enumWithCost + inputWithCost(someInput: { somethingWithCost: 10 }) + scalarWithCost + objectWithCost { + id + } + fieldWithListSize + fieldWithDynamicListSize(first: 5) { + ...Items + } +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query_with_default_slicing_argument.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query_with_default_slicing_argument.graphql new file mode 100644 index 0000000000..fb50e08fef --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_query_with_default_slicing_argument.graphql @@ -0,0 +1,20 @@ +fragment Items on SizedField { + items { + id + } +} + +{ + fieldWithCost + argWithCost(arg: 3) + enumWithCost + inputWithCost(someInput: { somethingWithCost: 10 }) + scalarWithCost + objectWithCost { + id + } + fieldWithListSize + fieldWithDynamicListSize { + ...Items + } +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_response.json b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_response.json new file mode 100644 index 0000000000..664a2684e6 --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_response.json @@ -0,0 +1,24 @@ +{ + "data": { + "fieldWithCost": 1, + "argWithCost": 2, + "enumWithCost": "A", + "inputWithCost": 3, + "scalarWithCost": 4, + "objectWithCost": { + "id": 5 + }, + "fieldWithListSize": [ + "first", + "second", + "third" + ], + "fieldWithDynamicListSize": { + "items": [ + { "id": 6 }, + { "id": 7 }, + { "id": 8 } + ] + } + } +} \ No newline at end of file diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql new file mode 100644 index 0000000000..d966512be1 --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql @@ -0,0 +1,154 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link( + url: "https://specs.apollo.dev/cost/v0.1" + import: ["@cost", "@listSize"] + ) { + query: Query +} + +directive @cost( + weight: Int! +) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @cost__listSize( + assumedSize: Int + slicingArguments: [String!] + sizedFields: [String!] + requireOneSlicingArgument: Boolean = true +) on FIELD_DEFINITION + +directive @join__directive( + graphs: [join__Graph!] + name: String! + args: join__DirectiveArguments +) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field( + graph: join__Graph + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean + override: String + usedOverridden: Boolean + overrideLabel: String + contextArguments: [join__ContextArgument!] +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__type( + graph: join__Graph! + key: join__FieldSet + extension: Boolean! = false + resolvable: Boolean! = true + isInterfaceObject: Boolean! = false +) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +directive @listSize( + assumedSize: Int + slicingArguments: [String!] + sizedFields: [String!] + requireOneSlicingArgument: Boolean = true +) on FIELD_DEFINITION + +type A @join__type(graph: SUBGRAPHWITHLISTSIZE) { + id: ID +} + +enum AorB @join__type(graph: SUBGRAPHWITHCOST) @cost(weight: 15) { + A @join__enumValue(graph: SUBGRAPHWITHCOST) + B @join__enumValue(graph: SUBGRAPHWITHCOST) +} + +scalar ExpensiveInt @join__type(graph: SUBGRAPHWITHCOST) @cost(weight: 30) + +type ExpensiveObject @join__type(graph: SUBGRAPHWITHCOST) @cost(weight: 40) { + id: ID +} + +input InputTypeWithCost @join__type(graph: SUBGRAPHWITHCOST) { + somethingWithCost: Int @cost(weight: 20) +} + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPHWITHCOST + @join__graph(name: "subgraphWithCost", url: "http://localhost:4001") + SUBGRAPHWITHLISTSIZE + @join__graph(name: "subgraphWithListSize", url: "http://localhost:4002") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPHWITHCOST) + @join__type(graph: SUBGRAPHWITHLISTSIZE) { + fieldWithCost: Int @join__field(graph: SUBGRAPHWITHCOST) @cost(weight: 5) + argWithCost(arg: Int @cost(weight: 10)): Int + @join__field(graph: SUBGRAPHWITHCOST) + enumWithCost: AorB @join__field(graph: SUBGRAPHWITHCOST) + inputWithCost(someInput: InputTypeWithCost): Int + @join__field(graph: SUBGRAPHWITHCOST) + scalarWithCost: ExpensiveInt @join__field(graph: SUBGRAPHWITHCOST) + objectWithCost: ExpensiveObject @join__field(graph: SUBGRAPHWITHCOST) + fieldWithListSize: [String!] + @join__field(graph: SUBGRAPHWITHLISTSIZE) + @listSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int = 10): SizedField + @join__field(graph: SUBGRAPHWITHLISTSIZE) + @listSize( + slicingArguments: ["first"] + sizedFields: ["items"] + requireOneSlicingArgument: true + ) +} + +type SizedField @join__type(graph: SUBGRAPHWITHLISTSIZE) { + items: [A] +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema_with_renamed_directives.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema_with_renamed_directives.graphql new file mode 100644 index 0000000000..1d1f17263d --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema_with_renamed_directives.graphql @@ -0,0 +1,163 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link( + url: "https://specs.apollo.dev/cost/v0.1" + import: [ + { name: "@cost", as: "@renamedCost" } + { name: "@listSize", as: "@renamedListSize" } + ] + ) { + query: Query +} + +directive @cost__listSize( + assumedSize: Int + slicingArguments: [String!] + sizedFields: [String!] + requireOneSlicingArgument: Boolean = true +) on FIELD_DEFINITION + +directive @join__directive( + graphs: [join__Graph!] + name: String! + args: join__DirectiveArguments +) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field( + graph: join__Graph + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean + override: String + usedOverridden: Boolean + overrideLabel: String + contextArguments: [join__ContextArgument!] +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__type( + graph: join__Graph! + key: join__FieldSet + extension: Boolean! = false + resolvable: Boolean! = true + isInterfaceObject: Boolean! = false +) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +directive @renamedCost( + weight: Int! +) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR + +directive @renamedListSize( + assumedSize: Int + slicingArguments: [String!] + sizedFields: [String!] + requireOneSlicingArgument: Boolean = true +) on FIELD_DEFINITION + +type A @join__type(graph: SUBGRAPHWITHLISTSIZE) { + id: ID +} + +enum AorB @join__type(graph: SUBGRAPHWITHCOST) @renamedCost(weight: 15) { + A @join__enumValue(graph: SUBGRAPHWITHCOST) + B @join__enumValue(graph: SUBGRAPHWITHCOST) +} + +scalar ExpensiveInt + @join__type(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 30) + +type ExpensiveObject + @join__type(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 40) { + id: ID +} + +input InputTypeWithCost @join__type(graph: SUBGRAPHWITHCOST) { + somethingWithCost: Int @renamedCost(weight: 20) +} + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPHWITHCOST + @join__graph(name: "subgraphWithCost", url: "http://localhost:4001") + SUBGRAPHWITHLISTSIZE + @join__graph(name: "subgraphWithListSize", url: "http://localhost:4002") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPHWITHCOST) + @join__type(graph: SUBGRAPHWITHLISTSIZE) { + fieldWithCost: Int + @join__field(graph: SUBGRAPHWITHCOST) + @renamedCost(weight: 5) + argWithCost(arg: Int @renamedCost(weight: 10)): Int + @join__field(graph: SUBGRAPHWITHCOST) + enumWithCost: AorB @join__field(graph: SUBGRAPHWITHCOST) + inputWithCost(someInput: InputTypeWithCost): Int + @join__field(graph: SUBGRAPHWITHCOST) + scalarWithCost: ExpensiveInt @join__field(graph: SUBGRAPHWITHCOST) + objectWithCost: ExpensiveObject @join__field(graph: SUBGRAPHWITHCOST) + fieldWithListSize: [String!] + @join__field(graph: SUBGRAPHWITHLISTSIZE) + @renamedListSize(assumedSize: 2000, requireOneSlicingArgument: false) + fieldWithDynamicListSize(first: Int = 10): SizedField + @join__field(graph: SUBGRAPHWITHLISTSIZE) + @renamedListSize( + slicingArguments: ["first"] + sizedFields: ["items"] + requireOneSlicingArgument: true + ) +} + +type SizedField @join__type(graph: SUBGRAPHWITHLISTSIZE) { + items: [A] +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/mod.rs b/apollo-router/src/plugins/demand_control/cost_calculator/mod.rs index 290ce4dbe4..a534f91a94 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/mod.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/mod.rs @@ -1,4 +1,5 @@ mod directives; +pub(in crate::plugins::demand_control) mod schema; pub(crate) mod static_cost; use crate::plugins::demand_control::DemandControlError; diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/schema.rs b/apollo-router/src/plugins/demand_control/cost_calculator/schema.rs new file mode 100644 index 0000000000..6a46ee9fe9 --- /dev/null +++ b/apollo-router/src/plugins/demand_control/cost_calculator/schema.rs @@ -0,0 +1,180 @@ +use std::ops::Deref; +use std::sync::Arc; + +use ahash::HashMap; +use ahash::HashMapExt; +use apollo_compiler::schema::ExtendedType; +use apollo_compiler::validation::Valid; +use apollo_compiler::Name; +use apollo_compiler::Schema; + +use super::directives::get_apollo_directive_names; +use super::directives::CostDirective; +use super::directives::DefinitionListSizeDirective as ListSizeDirective; +use super::directives::RequiresDirective; +use crate::plugins::demand_control::DemandControlError; + +pub(crate) struct DemandControlledSchema { + directive_name_map: HashMap, + inner: Arc>, + type_field_cost_directives: HashMap>, + type_field_list_size_directives: HashMap>, + type_field_requires_directives: HashMap>, +} + +impl DemandControlledSchema { + pub(crate) fn new(schema: Arc>) -> Result { + let directive_name_map = get_apollo_directive_names(&schema); + + let mut type_field_cost_directives: HashMap> = + HashMap::new(); + let mut type_field_list_size_directives: HashMap> = + HashMap::new(); + let mut type_field_requires_directives: HashMap> = + HashMap::new(); + + for (type_name, type_) in &schema.types { + let field_cost_directives = type_field_cost_directives + .entry(type_name.clone()) + .or_default(); + let field_list_size_directives = type_field_list_size_directives + .entry(type_name.clone()) + .or_default(); + let field_requires_directives = type_field_requires_directives + .entry(type_name.clone()) + .or_default(); + + match type_ { + ExtendedType::Interface(ty) => { + for field_name in ty.fields.keys() { + let field_definition = schema.type_field(type_name, field_name)?; + let field_type = schema.types.get(field_definition.ty.inner_named_type()).ok_or_else(|| { + DemandControlError::QueryParseFailure(format!( + "Field {} was found in query, but its type is missing from the schema.", + field_name + )) + })?; + + if let Some(cost_directive) = + CostDirective::from_field(&directive_name_map, field_definition) + .or(CostDirective::from_type(&directive_name_map, field_type)) + { + field_cost_directives.insert(field_name.clone(), cost_directive); + } + + if let Some(list_size_directive) = ListSizeDirective::from_field_definition( + &directive_name_map, + field_definition, + )? { + field_list_size_directives + .insert(field_name.clone(), list_size_directive); + } + + if let Some(requires_directive) = RequiresDirective::from_field_definition( + field_definition, + type_name, + &schema, + )? { + field_requires_directives + .insert(field_name.clone(), requires_directive); + } + } + } + ExtendedType::Object(ty) => { + for field_name in ty.fields.keys() { + let field_definition = schema.type_field(type_name, field_name)?; + let field_type = schema.types.get(field_definition.ty.inner_named_type()).ok_or_else(|| { + DemandControlError::QueryParseFailure(format!( + "Field {} was found in query, but its type is missing from the schema.", + field_name + )) + })?; + + if let Some(cost_directive) = + CostDirective::from_field(&directive_name_map, field_definition) + .or(CostDirective::from_type(&directive_name_map, field_type)) + { + field_cost_directives.insert(field_name.clone(), cost_directive); + } + + if let Some(list_size_directive) = ListSizeDirective::from_field_definition( + &directive_name_map, + field_definition, + )? { + field_list_size_directives + .insert(field_name.clone(), list_size_directive); + } + + if let Some(requires_directive) = RequiresDirective::from_field_definition( + field_definition, + type_name, + &schema, + )? { + field_requires_directives + .insert(field_name.clone(), requires_directive); + } + } + } + _ => { + // Other types don't have fields + } + } + } + + Ok(Self { + directive_name_map, + inner: schema, + type_field_cost_directives, + type_field_list_size_directives, + type_field_requires_directives, + }) + } + + pub(in crate::plugins::demand_control) fn directive_name_map(&self) -> &HashMap { + &self.directive_name_map + } + + pub(in crate::plugins::demand_control) fn type_field_cost_directive( + &self, + type_name: &str, + field_name: &str, + ) -> Option<&CostDirective> { + self.type_field_cost_directives + .get(type_name)? + .get(field_name) + } + + pub(in crate::plugins::demand_control) fn type_field_list_size_directive( + &self, + type_name: &str, + field_name: &str, + ) -> Option<&ListSizeDirective> { + self.type_field_list_size_directives + .get(type_name)? + .get(field_name) + } + + pub(in crate::plugins::demand_control) fn type_field_requires_directive( + &self, + type_name: &str, + field_name: &str, + ) -> Option<&RequiresDirective> { + self.type_field_requires_directives + .get(type_name)? + .get(field_name) + } +} + +impl AsRef> for DemandControlledSchema { + fn as_ref(&self) -> &Valid { + &self.inner + } +} + +impl Deref for DemandControlledSchema { + type Target = Schema; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs index f84a4fcd0a..439d09558f 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs @@ -1,5 +1,8 @@ use std::sync::Arc; +use ahash::HashMap; +use apollo_compiler::ast; +use apollo_compiler::ast::InputValueDefinition; use apollo_compiler::ast::NamedType; use apollo_compiler::executable::ExecutableDocument; use apollo_compiler::executable::Field; @@ -8,18 +11,19 @@ use apollo_compiler::executable::InlineFragment; use apollo_compiler::executable::Operation; use apollo_compiler::executable::Selection; use apollo_compiler::executable::SelectionSet; -use apollo_compiler::validation::Valid; -use apollo_compiler::Schema; +use apollo_compiler::schema::ExtendedType; +use apollo_compiler::Node; use serde_json_bytes::Value; use super::directives::IncludeDirective; -use super::directives::RequiresDirective; use super::directives::SkipDirective; +use super::schema::DemandControlledSchema; use super::DemandControlError; use crate::graphql::Response; use crate::graphql::ResponseVisitor; +use crate::plugins::demand_control::cost_calculator::directives::CostDirective; +use crate::plugins::demand_control::cost_calculator::directives::ListSizeDirective; use crate::query_planner::fetch::SubgraphOperation; -use crate::query_planner::fetch::SubgraphSchemas; use crate::query_planner::DeferredNode; use crate::query_planner::PlanNode; use crate::query_planner::Primary; @@ -27,13 +31,74 @@ use crate::query_planner::QueryPlan; pub(crate) struct StaticCostCalculator { list_size: u32, - subgraph_schemas: Arc, + supergraph_schema: Arc, + subgraph_schemas: Arc>, +} + +fn score_argument( + argument: &apollo_compiler::ast::Value, + argument_definition: &Node, + schema: &DemandControlledSchema, +) -> Result { + let cost_directive = + CostDirective::from_argument(schema.directive_name_map(), argument_definition); + let ty = schema + .types + .get(argument_definition.ty.inner_named_type()) + .ok_or_else(|| { + DemandControlError::QueryParseFailure(format!( + "Argument {} was found in query, but its type ({}) was not found in the schema", + argument_definition.name, + argument_definition.ty.inner_named_type() + )) + })?; + + match (argument, ty) { + (_, ExtendedType::Interface(_)) + | (_, ExtendedType::Object(_)) + | (_, ExtendedType::Union(_)) => Err(DemandControlError::QueryParseFailure( + format!( + "Argument {} has type {}, but objects, interfaces, and unions are disallowed in this position", + argument_definition.name, + argument_definition.ty.inner_named_type() + ) + )), + + (ast::Value::Object(inner_args), ExtendedType::InputObject(inner_arg_defs)) => { + let mut cost = cost_directive.map_or(1.0, |cost| cost.weight()); + for (arg_name, arg_val) in inner_args { + let arg_def = inner_arg_defs.fields.get(arg_name).ok_or_else(|| { + DemandControlError::QueryParseFailure(format!( + "Argument {} was found in query, but its type ({}) was not found in the schema", + argument_definition.name, + argument_definition.ty.inner_named_type() + )) + })?; + cost += score_argument(arg_val, arg_def, schema)?; + } + Ok(cost) + } + (ast::Value::List(inner_args), _) => { + let mut cost = cost_directive.map_or(0.0, |cost| cost.weight()); + for arg_val in inner_args { + cost += score_argument(arg_val, argument_definition, schema)?; + } + Ok(cost) + } + (ast::Value::Null, _) => Ok(0.0), + _ => Ok(cost_directive.map_or(0.0, |cost| cost.weight())) + } } impl StaticCostCalculator { - pub(crate) fn new(subgraph_schemas: Arc, list_size: u32) -> Self { + pub(crate) fn new( + supergraph_schema: Arc, + subgraph_schemas: Arc>, + list_size: u32, + ) -> Self { Self { list_size, + supergraph_schema, subgraph_schemas, } } @@ -60,14 +125,18 @@ impl StaticCostCalculator { &self, field: &Field, parent_type: &NamedType, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, + list_size_from_upstream: Option, ) -> Result { if StaticCostCalculator::skipped_by_directives(field) { return Ok(0.0); } + // We need to look up the `FieldDefinition` from the supergraph schema instead of using `field.definition` + // because `field.definition` was generated from the API schema, which strips off the directives we need. + let definition = schema.type_field(parent_type, &field.name)?; let ty = field.inner_type_def(schema).ok_or_else(|| { DemandControlError::QueryParseFailure(format!( "Field {} was found in query, but its type is missing from the schema.", @@ -75,17 +144,32 @@ impl StaticCostCalculator { )) })?; - // Determine how many instances we're scoring. If there's no user-provided - // information, assume lists have 100 items. - let instance_count = if field.ty().is_list() { - self.list_size as f64 + let list_size_directive = + match schema.type_field_list_size_directive(parent_type, &field.name) { + Some(dir) => dir.with_field(field).map(Some), + None => Ok(None), + }?; + let instance_count = if !field.ty().is_list() { + 1 + } else if let Some(value) = list_size_from_upstream { + // This is a sized field whose length is defined by the `@listSize` directive on the parent field + value + } else if let Some(expected_size) = list_size_directive + .as_ref() + .and_then(|dir| dir.expected_size) + { + expected_size } else { - 1.0 + self.list_size as i32 }; // Determine the cost for this particular field. Scalars are free, non-scalars are not. // For fields with selections, add in the cost of the selections as well. - let mut type_cost = if ty.is_interface() || ty.is_object() || ty.is_union() { + let mut type_cost = if let Some(cost_directive) = + schema.type_field_cost_directive(parent_type, &field.name) + { + cost_directive.weight() + } else if ty.is_interface() || ty.is_object() || ty.is_union() { 1.0 } else { 0.0 @@ -96,32 +180,48 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + list_size_directive.as_ref(), )?; + let mut arguments_cost = 0.0; + for argument in &field.arguments { + let argument_definition = + definition.argument_by_name(&argument.name).ok_or_else(|| { + DemandControlError::QueryParseFailure(format!( + "Argument {} of field {} is missing a definition in the schema", + argument.name, field.name + )) + })?; + arguments_cost += score_argument(&argument.value, argument_definition, schema)?; + } + let mut requirements_cost = 0.0; if should_estimate_requires { // If the field is marked with `@requires`, the required selection may not be included // in the query's selection. Adding that requirement's cost to the field ensures it's // accounted for. - let requirements = - RequiresDirective::from_field(field, parent_type, schema)?.map(|d| d.fields); + let requirements = schema + .type_field_requires_directive(parent_type, &field.name) + .map(|d| &d.fields); if let Some(selection_set) = requirements { requirements_cost = self.score_selection_set( - &selection_set, + selection_set, parent_type, schema, executable, should_estimate_requires, + list_size_directive.as_ref(), )?; } } - let cost = instance_count * type_cost + requirements_cost; + let cost = (instance_count as f64) * type_cost + arguments_cost + requirements_cost; tracing::debug!( - "Field {} cost breakdown: (count) {} * (type cost) {} + (requirements) {} = {}", + "Field {} cost breakdown: (count) {} * (type cost) {} + (arguments) {} + (requirements) {} = {}", field.name, instance_count, type_cost, + arguments_cost, requirements_cost, cost ); @@ -133,9 +233,10 @@ impl StaticCostCalculator { &self, fragment_spread: &FragmentSpread, parent_type: &NamedType, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, + list_size_directive: Option<&ListSizeDirective>, ) -> Result { let fragment = fragment_spread.fragment_def(executable).ok_or_else(|| { DemandControlError::QueryParseFailure(format!( @@ -149,6 +250,7 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + list_size_directive, ) } @@ -156,9 +258,10 @@ impl StaticCostCalculator { &self, inline_fragment: &InlineFragment, parent_type: &NamedType, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, + list_size_directive: Option<&ListSizeDirective>, ) -> Result { self.score_selection_set( &inline_fragment.selection_set, @@ -166,13 +269,14 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + list_size_directive, ) } fn score_operation( &self, operation: &Operation, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, ) -> Result { @@ -191,6 +295,7 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + None, )?; Ok(cost) @@ -200,20 +305,27 @@ impl StaticCostCalculator { &self, selection: &Selection, parent_type: &NamedType, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, + list_size_directive: Option<&ListSizeDirective>, ) -> Result { match selection { - Selection::Field(f) => { - self.score_field(f, parent_type, schema, executable, should_estimate_requires) - } + Selection::Field(f) => self.score_field( + f, + parent_type, + schema, + executable, + should_estimate_requires, + list_size_directive.and_then(|dir| dir.size_of(f)), + ), Selection::FragmentSpread(s) => self.score_fragment_spread( s, parent_type, schema, executable, should_estimate_requires, + list_size_directive, ), Selection::InlineFragment(i) => self.score_inline_fragment( i, @@ -221,6 +333,7 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + list_size_directive, ), } } @@ -229,9 +342,10 @@ impl StaticCostCalculator { &self, selection_set: &SelectionSet, parent_type_name: &NamedType, - schema: &Valid, + schema: &DemandControlledSchema, executable: &ExecutableDocument, should_estimate_requires: bool, + list_size_directive: Option<&ListSizeDirective>, ) -> Result { let mut cost = 0.0; for selection in selection_set.selections.iter() { @@ -241,6 +355,7 @@ impl StaticCostCalculator { schema, executable, should_estimate_requires, + list_size_directive, )?; } Ok(cost) @@ -347,7 +462,7 @@ impl StaticCostCalculator { pub(crate) fn estimated( &self, query: &ExecutableDocument, - schema: &Valid, + schema: &DemandControlledSchema, should_estimate_requires: bool, ) -> Result { let mut cost = 0.0; @@ -369,39 +484,75 @@ impl StaticCostCalculator { request: &ExecutableDocument, response: &Response, ) -> Result { - let mut visitor = ResponseCostCalculator::new(); + let mut visitor = ResponseCostCalculator::new(&self.supergraph_schema); visitor.visit(request, response); Ok(visitor.cost) } } -pub(crate) struct ResponseCostCalculator { +pub(crate) struct ResponseCostCalculator<'a> { pub(crate) cost: f64, + schema: &'a DemandControlledSchema, } -impl ResponseCostCalculator { - pub(crate) fn new() -> Self { - Self { cost: 0.0 } +impl<'schema> ResponseCostCalculator<'schema> { + pub(crate) fn new(schema: &'schema DemandControlledSchema) -> Self { + Self { cost: 0.0, schema } } } -impl ResponseVisitor for ResponseCostCalculator { +impl<'schema> ResponseVisitor for ResponseCostCalculator<'schema> { fn visit_field( &mut self, request: &ExecutableDocument, - _ty: &NamedType, + parent_ty: &NamedType, field: &Field, value: &Value, ) { + self.visit_list_item(request, parent_ty, field, value); + + let definition = self.schema.type_field(parent_ty, &field.name); + for argument in &field.arguments { + if let Ok(Some(argument_definition)) = definition + .as_ref() + .map(|def| def.argument_by_name(&argument.name)) + { + if let Ok(score) = score_argument(&argument.value, argument_definition, self.schema) + { + self.cost += score; + } + } else { + tracing::warn!( + "Failed to get schema definition for argument {} of field {}. The resulting actual cost will be a partial result.", + argument.name, + field.name + ) + } + } + } + + fn visit_list_item( + &mut self, + request: &apollo_compiler::ExecutableDocument, + parent_ty: &apollo_compiler::executable::NamedType, + field: &apollo_compiler::executable::Field, + value: &Value, + ) { + let cost_directive = self + .schema + .type_field_cost_directive(parent_ty, &field.name); + match value { - Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_) => {} + Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_) => { + self.cost += cost_directive.map_or(0.0, |cost| cost.weight()); + } Value::Array(items) => { for item in items { - self.visit_field(request, field.ty().inner_named_type(), field, item); + self.visit_list_item(request, parent_ty, field, item); } } Value::Object(children) => { - self.cost += 1.0; + self.cost += cost_directive.map_or(1.0, |cost| cost.weight()); self.visit_selections(request, &field.selection_set, children); } } @@ -412,6 +563,8 @@ impl ResponseVisitor for ResponseCostCalculator { mod tests { use std::sync::Arc; + use ahash::HashMapExt; + use apollo_federation::query_plan::query_planner::QueryPlanner; use bytes::Bytes; use test_log::test; use tower::Service; @@ -426,6 +579,16 @@ mod tests { use crate::Configuration; use crate::Context; + impl StaticCostCalculator { + fn rust_planned( + &self, + query_plan: &apollo_federation::query_plan::QueryPlan, + ) -> Result { + let js_planner_node: PlanNode = query_plan.node.as_ref().unwrap().into(); + self.score_plan_node(&js_planner_node) + } + } + fn parse_schema_and_operation( schema_str: &str, query_str: &str, @@ -440,8 +603,12 @@ mod tests { fn estimated_cost(schema_str: &str, query_str: &str) -> f64 { let (schema, query) = parse_schema_and_operation(schema_str, query_str, &Default::default()); - StaticCostCalculator::new(Default::default(), 100) - .estimated(&query.executable, schema.supergraph_schema(), true) + let schema = + DemandControlledSchema::new(Arc::new(schema.supergraph_schema().clone())).unwrap(); + let calculator = StaticCostCalculator::new(Arc::new(schema), Default::default(), 100); + + calculator + .estimated(&query.executable, &calculator.supergraph_schema, true) .unwrap() } @@ -455,16 +622,20 @@ mod tests { "query.graphql", ) .unwrap(); - StaticCostCalculator::new(Default::default(), 100) - .estimated(&query, &schema, true) + let schema = DemandControlledSchema::new(Arc::new(schema)).unwrap(); + let calculator = StaticCostCalculator::new(Arc::new(schema), Default::default(), 100); + + calculator + .estimated(&query, &calculator.supergraph_schema, true) .unwrap() } - async fn planned_cost(schema_str: &str, query_str: &str) -> f64 { + async fn planned_cost_js(schema_str: &str, query_str: &str) -> f64 { let config: Arc = Arc::new(Default::default()); - let (_schema, query) = parse_schema_and_operation(schema_str, query_str, &config); + let (schema, query) = parse_schema_and_operation(schema_str, query_str, &config); + let supergraph_schema = schema.supergraph_schema().clone(); - let mut planner = BridgeQueryPlanner::new(schema_str.to_string(), config.clone(), None) + let mut planner = BridgeQueryPlanner::new(schema.into(), config.clone(), None, None) .await .unwrap(); @@ -481,23 +652,81 @@ mod tests { _ => panic!("Query planner returned unexpected non-plan content"), }; - let calculator = StaticCostCalculator { - subgraph_schemas: planner.subgraph_schemas(), - list_size: 100, - }; + let schema = DemandControlledSchema::new(Arc::new(supergraph_schema)).unwrap(); + let mut demand_controlled_subgraph_schemas = HashMap::new(); + for (subgraph_name, subgraph_schema) in planner.subgraph_schemas().iter() { + let demand_controlled_subgraph_schema = + DemandControlledSchema::new(subgraph_schema.clone()).unwrap(); + demand_controlled_subgraph_schemas + .insert(subgraph_name.to_string(), demand_controlled_subgraph_schema); + } + + let calculator = StaticCostCalculator::new( + Arc::new(schema), + Arc::new(demand_controlled_subgraph_schemas), + 100, + ); calculator.planned(&query_plan).unwrap() } + fn planned_cost_rust(schema_str: &str, query_str: &str) -> f64 { + let config: Arc = Arc::new(Default::default()); + let (schema, query) = parse_schema_and_operation(schema_str, query_str, &config); + + let planner = + QueryPlanner::new(schema.federation_supergraph(), Default::default()).unwrap(); + + let query_plan = planner.build_query_plan(&query.executable, None).unwrap(); + + let schema = + DemandControlledSchema::new(Arc::new(schema.supergraph_schema().clone())).unwrap(); + let mut demand_controlled_subgraph_schemas = HashMap::new(); + for (subgraph_name, subgraph_schema) in planner.subgraph_schemas().iter() { + let demand_controlled_subgraph_schema = + DemandControlledSchema::new(Arc::new(subgraph_schema.schema().clone())).unwrap(); + demand_controlled_subgraph_schemas + .insert(subgraph_name.to_string(), demand_controlled_subgraph_schema); + } + + let calculator = StaticCostCalculator::new( + Arc::new(schema), + Arc::new(demand_controlled_subgraph_schemas), + 100, + ); + + calculator.rust_planned(&query_plan).unwrap() + } + fn actual_cost(schema_str: &str, query_str: &str, response_bytes: &'static [u8]) -> f64 { - let (_schema, query) = + let (schema, query) = parse_schema_and_operation(schema_str, query_str, &Default::default()); let response = Response::from_bytes("test", Bytes::from(response_bytes)).unwrap(); - StaticCostCalculator::new(Default::default(), 100) + let schema = + DemandControlledSchema::new(Arc::new(schema.supergraph_schema().clone())).unwrap(); + StaticCostCalculator::new(Arc::new(schema), Default::default(), 100) .actual(&query.executable, &response) .unwrap() } + /// Actual cost of an operation on a plain, non-federated schema. + fn basic_actual_cost(schema_str: &str, query_str: &str, response_bytes: &'static [u8]) -> f64 { + let schema = + apollo_compiler::Schema::parse_and_validate(schema_str, "schema.graphqls").unwrap(); + let query = apollo_compiler::ExecutableDocument::parse_and_validate( + &schema, + query_str, + "query.graphql", + ) + .unwrap(); + let response = Response::from_bytes("test", Bytes::from(response_bytes)).unwrap(); + + let schema = DemandControlledSchema::new(Arc::new(schema)).unwrap(); + StaticCostCalculator::new(Arc::new(schema), Default::default(), 100) + .actual(&query, &response) + .unwrap() + } + #[test] fn query_cost() { let schema = include_str!("./fixtures/basic_schema.graphql"); @@ -562,6 +791,25 @@ mod tests { assert_eq!(basic_estimated_cost(schema, query), 10100.0) } + #[test] + fn input_object_cost() { + let schema = include_str!("./fixtures/basic_schema.graphql"); + let query = include_str!("./fixtures/basic_input_object_query.graphql"); + + assert_eq!(basic_estimated_cost(schema, query), 4.0) + } + + #[test] + fn input_object_cost_with_returned_objects() { + let schema = include_str!("./fixtures/basic_schema.graphql"); + let query = include_str!("./fixtures/basic_input_object_query_2.graphql"); + let response = include_bytes!("./fixtures/basic_input_object_response.json"); + + assert_eq!(basic_estimated_cost(schema, query), 104.0); + // The cost of the arguments from the query should be included when scoring the response + assert_eq!(basic_actual_cost(schema, query, response), 7.0); + } + #[test] fn skip_directive_excludes_cost() { let schema = include_str!("./fixtures/basic_schema.graphql"); @@ -595,7 +843,8 @@ mod tests { let response = include_bytes!("./fixtures/federated_ships_required_response.json"); assert_eq!(estimated_cost(schema, query), 10200.0); - assert_eq!(planned_cost(schema, query).await, 10400.0); + assert_eq!(planned_cost_js(schema, query).await, 10400.0); + assert_eq!(planned_cost_rust(schema, query), 10400.0); assert_eq!(actual_cost(schema, query, response), 2.0); } @@ -606,7 +855,8 @@ mod tests { let response = include_bytes!("./fixtures/federated_ships_fragment_response.json"); assert_eq!(estimated_cost(schema, query), 300.0); - assert_eq!(planned_cost(schema, query).await, 400.0); + assert_eq!(planned_cost_js(schema, query).await, 400.0); + assert_eq!(planned_cost_rust(schema, query), 400.0); assert_eq!(actual_cost(schema, query, response), 6.0); } @@ -617,7 +867,8 @@ mod tests { let response = include_bytes!("./fixtures/federated_ships_fragment_response.json"); assert_eq!(estimated_cost(schema, query), 300.0); - assert_eq!(planned_cost(schema, query).await, 400.0); + assert_eq!(planned_cost_js(schema, query).await, 400.0); + assert_eq!(planned_cost_rust(schema, query), 400.0); assert_eq!(actual_cost(schema, query, response), 6.0); } @@ -628,7 +879,8 @@ mod tests { let response = include_bytes!("./fixtures/federated_ships_deferred_response.json"); assert_eq!(estimated_cost(schema, query), 10200.0); - assert_eq!(planned_cost(schema, query).await, 10400.0); + assert_eq!(planned_cost_js(schema, query).await, 10400.0); + assert_eq!(planned_cost_rust(schema, query), 10400.0); assert_eq!(actual_cost(schema, query, response), 2.0); } @@ -637,15 +889,58 @@ mod tests { let schema = include_str!("./fixtures/federated_ships_schema.graphql"); let query = include_str!("./fixtures/federated_ships_deferred_query.graphql"); let (schema, query) = parse_schema_and_operation(schema, query, &Default::default()); + let schema = Arc::new( + DemandControlledSchema::new(Arc::new(schema.supergraph_schema().clone())).unwrap(), + ); - let conservative_estimate = StaticCostCalculator::new(Default::default(), 100) - .estimated(&query.executable, schema.supergraph_schema(), true) + let calculator = StaticCostCalculator::new(schema.clone(), Default::default(), 100); + let conservative_estimate = calculator + .estimated(&query.executable, &calculator.supergraph_schema, true) .unwrap(); - let narrow_estimate = StaticCostCalculator::new(Default::default(), 5) - .estimated(&query.executable, schema.supergraph_schema(), true) + + let calculator = StaticCostCalculator::new(schema.clone(), Default::default(), 5); + let narrow_estimate = calculator + .estimated(&query.executable, &calculator.supergraph_schema, true) .unwrap(); assert_eq!(conservative_estimate, 10200.0); assert_eq!(narrow_estimate, 35.0); } + + #[test(tokio::test)] + async fn custom_cost_query() { + let schema = include_str!("./fixtures/custom_cost_schema.graphql"); + let query = include_str!("./fixtures/custom_cost_query.graphql"); + let response = include_bytes!("./fixtures/custom_cost_response.json"); + + assert_eq!(estimated_cost(schema, query), 127.0); + assert_eq!(planned_cost_js(schema, query).await, 127.0); + assert_eq!(planned_cost_rust(schema, query), 127.0); + assert_eq!(actual_cost(schema, query, response), 125.0); + } + + #[test(tokio::test)] + async fn custom_cost_query_with_renamed_directives() { + let schema = include_str!("./fixtures/custom_cost_schema_with_renamed_directives.graphql"); + let query = include_str!("./fixtures/custom_cost_query.graphql"); + let response = include_bytes!("./fixtures/custom_cost_response.json"); + + assert_eq!(estimated_cost(schema, query), 127.0); + assert_eq!(planned_cost_js(schema, query).await, 127.0); + assert_eq!(planned_cost_rust(schema, query), 127.0); + assert_eq!(actual_cost(schema, query, response), 125.0); + } + + #[test(tokio::test)] + async fn custom_cost_query_with_default_slicing_argument() { + let schema = include_str!("./fixtures/custom_cost_schema.graphql"); + let query = + include_str!("./fixtures/custom_cost_query_with_default_slicing_argument.graphql"); + let response = include_bytes!("./fixtures/custom_cost_response.json"); + + assert_eq!(estimated_cost(schema, query), 132.0); + assert_eq!(planned_cost_js(schema, query).await, 132.0); + assert_eq!(planned_cost_rust(schema, query), 132.0); + assert_eq!(actual_cost(schema, query, response), 125.0); + } } diff --git a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_request.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_request.router.yaml index 43b492c8ad..131a3cc470 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_request.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_request.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: enforce strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_response.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_response.router.yaml index deb3908da5..d3bcba889f 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_response.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_execution_response.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: enforce strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_request.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_request.router.yaml index dc83e08c34..bb77fa7031 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_request.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_request.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: enforce strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_response.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_response.router.yaml index 56fd39e585..8d1a364728 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_response.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/enforce_on_subgraph_response.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: enforce strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_request.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_request.router.yaml index c96a6908bc..4e2a2f5463 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_request.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_request.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_response.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_response.router.yaml index a7422da35b..6256ca53b4 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_response.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/measure_on_execution_response.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_request.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_request.router.yaml index c96a6908bc..4e2a2f5463 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_request.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_request.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_response.router.yaml b/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_response.router.yaml index a7422da35b..6256ca53b4 100644 --- a/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_response.router.yaml +++ b/apollo-router/src/plugins/demand_control/fixtures/measure_on_subgraph_response.router.yaml @@ -1,4 +1,4 @@ -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/plugins/demand_control/mod.rs b/apollo-router/src/plugins/demand_control/mod.rs index 476deeb737..b3faaef747 100644 --- a/apollo-router/src/plugins/demand_control/mod.rs +++ b/apollo-router/src/plugins/demand_control/mod.rs @@ -5,6 +5,9 @@ use std::future; use std::ops::ControlFlow; use std::sync::Arc; +use ahash::HashMap; +use ahash::HashMapExt; +use apollo_compiler::schema::FieldLookupError; use apollo_compiler::validation::Valid; use apollo_compiler::validation::WithErrors; use apollo_compiler::ExecutableDocument; @@ -27,6 +30,7 @@ use crate::json_ext::Object; use crate::layers::ServiceBuilderExt; use crate::plugin::Plugin; use crate::plugin::PluginInit; +use crate::plugins::demand_control::cost_calculator::schema::DemandControlledSchema; use crate::plugins::demand_control::strategy::Strategy; use crate::plugins::demand_control::strategy::StrategyFactory; use crate::register_plugin; @@ -199,6 +203,22 @@ impl From> for DemandControlError { } } +impl<'a> From> for DemandControlError { + fn from(value: FieldLookupError) -> Self { + match value { + FieldLookupError::NoSuchType => DemandControlError::QueryParseFailure( + "Attempted to look up a type which does not exist in the schema".to_string(), + ), + FieldLookupError::NoSuchField(type_name, _) => { + DemandControlError::QueryParseFailure(format!( + "Attempted to look up a field on type {}, but the field does not exist", + type_name + )) + } + } + } +} + pub(crate) struct DemandControl { config: DemandControlConfig, strategy_factory: StrategyFactory, @@ -223,11 +243,21 @@ impl Plugin for DemandControl { type Config = DemandControlConfig; async fn new(init: PluginInit) -> Result { + let demand_controlled_supergraph_schema = + DemandControlledSchema::new(init.supergraph_schema.clone())?; + let mut demand_controlled_subgraph_schemas = HashMap::new(); + for (subgraph_name, subgraph_schema) in init.subgraph_schemas.iter() { + let demand_controlled_subgraph_schema = + DemandControlledSchema::new(subgraph_schema.clone())?; + demand_controlled_subgraph_schemas + .insert(subgraph_name.clone(), demand_controlled_subgraph_schema); + } + Ok(DemandControl { strategy_factory: StrategyFactory::new( init.config.clone(), - init.supergraph_schema.clone(), - init.subgraph_schemas.clone(), + Arc::new(demand_controlled_supergraph_schema), + Arc::new(demand_controlled_subgraph_schemas), ), config: init.config, }) @@ -381,7 +411,7 @@ impl Plugin for DemandControl { } } -register_plugin!("apollo", "preview_demand_control", DemandControl); +register_plugin!("apollo", "demand_control", DemandControl); #[cfg(test)] mod test { diff --git a/apollo-router/src/plugins/demand_control/strategy/mod.rs b/apollo-router/src/plugins/demand_control/strategy/mod.rs index 5defca64d5..6bae126694 100644 --- a/apollo-router/src/plugins/demand_control/strategy/mod.rs +++ b/apollo-router/src/plugins/demand_control/strategy/mod.rs @@ -1,11 +1,10 @@ -use std::collections::HashMap; use std::sync::Arc; -use apollo_compiler::validation::Valid; +use ahash::HashMap; use apollo_compiler::ExecutableDocument; -use apollo_compiler::Schema; use crate::graphql; +use crate::plugins::demand_control::cost_calculator::schema::DemandControlledSchema; use crate::plugins::demand_control::cost_calculator::static_cost::StaticCostCalculator; use crate::plugins::demand_control::strategy::static_estimated::StaticEstimated; use crate::plugins::demand_control::DemandControlConfig; @@ -75,15 +74,15 @@ impl Strategy { pub(crate) struct StrategyFactory { config: DemandControlConfig, #[allow(dead_code)] - supergraph_schema: Arc>, - subgraph_schemas: Arc>>>, + supergraph_schema: Arc, + subgraph_schemas: Arc>, } impl StrategyFactory { pub(crate) fn new( config: DemandControlConfig, - supergraph_schema: Arc>, - subgraph_schemas: Arc>>>, + supergraph_schema: Arc, + subgraph_schemas: Arc>, ) -> Self { Self { config, @@ -97,6 +96,7 @@ impl StrategyFactory { StrategyConfig::StaticEstimated { list_size, max } => Arc::new(StaticEstimated { max: *max, cost_calculator: StaticCostCalculator::new( + self.supergraph_schema.clone(), self.subgraph_schemas.clone(), *list_size, ), diff --git a/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs b/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs index c7bfdc1ec4..22bcf3fdb6 100644 --- a/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs +++ b/apollo-router/src/plugins/file_uploads/rearrange_query_plan.rs @@ -45,6 +45,7 @@ pub(super) fn rearrange_query_plan( formatted_query_plan: query_plan.formatted_query_plan.clone(), query: query_plan.query.clone(), query_metrics: query_plan.query_metrics, + estimated_size: Default::default(), }) } diff --git a/apollo-router/src/plugins/headers.rs b/apollo-router/src/plugins/headers.rs index 2f19a965ff..1e52cd444c 100644 --- a/apollo-router/src/plugins/headers.rs +++ b/apollo-router/src/plugins/headers.rs @@ -193,6 +193,7 @@ struct Config { struct Headers { all_operations: Arc>, subgraph_operations: HashMap>>, + reserved_headers: Arc>, } #[async_trait::async_trait] @@ -220,6 +221,7 @@ impl Plugin for Headers { Ok(Headers { all_operations: Arc::new(operations), subgraph_operations, + reserved_headers: Arc::new(RESERVED_HEADERS.iter().collect()), }) } @@ -230,6 +232,7 @@ impl Plugin for Headers { .get(name) .cloned() .unwrap_or_else(|| self.all_operations.clone()), + self.reserved_headers.clone(), )) .service(service) .boxed() @@ -242,10 +245,13 @@ struct HeadersLayer { } impl HeadersLayer { - fn new(operations: Arc>) -> Self { + fn new( + operations: Arc>, + reserved_headers: Arc>, + ) -> Self { Self { operations, - reserved_headers: Arc::new(RESERVED_HEADERS.iter().collect()), + reserved_headers, } } } @@ -583,12 +589,13 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert(Insert::Static( - InsertStatic { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Insert(Insert::Static(InsertStatic { name: "c".try_into()?, value: "d".try_into()?, - }, - ))])) + }))]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -610,12 +617,15 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert( - Insert::FromContext(InsertFromContext { - name: "header_from_context".try_into()?, - from_context: "my_key".to_string(), - }), - )])) + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Insert(Insert::FromContext( + InsertFromContext { + name: "header_from_context".try_into()?, + from_context: "my_key".to_string(), + }, + ))]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -637,13 +647,14 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert(Insert::FromBody( - InsertFromBody { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Insert(Insert::FromBody(InsertFromBody { name: "header_from_request".try_into()?, path: JSONQuery::parse(".operationName")?, default: None, - }, - ))])) + }))]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -658,9 +669,10 @@ mod test { .withf(|request| request.assert_headers(vec![("ac", "vac"), ("ab", "vab")])) .returning(example_response); - let mut service = HeadersLayer::new(Arc::new(vec![Operation::Remove(Remove::Named( - "aa".try_into()?, - ))])) + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Remove(Remove::Named("aa".try_into()?))]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -675,9 +687,12 @@ mod test { .withf(|request| request.assert_headers(vec![("ac", "vac")])) .returning(example_response); - let mut service = HeadersLayer::new(Arc::new(vec![Operation::Remove(Remove::Matching( - Regex::from_str("a[ab]")?, - ))])) + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Remove(Remove::Matching(Regex::from_str( + "a[ab]", + )?))]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -701,11 +716,13 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Matching { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Propagate(Propagate::Matching { matching: Regex::from_str("d[ab]")?, - })])) - .layer(mock); + })]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -726,13 +743,15 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Propagate(Propagate::Named { named: "da".try_into()?, rename: None, default: None, - })])) - .layer(mock); + })]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -753,13 +772,15 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Propagate(Propagate::Named { named: "da".try_into()?, rename: Some("ea".try_into()?), default: None, - })])) - .layer(mock); + })]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -780,13 +801,15 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + let mut service = HeadersLayer::new( + Arc::new(vec![Operation::Propagate(Propagate::Named { named: "ea".try_into()?, rename: None, default: Some("defaulted".try_into()?), - })])) - .layer(mock); + })]), + Arc::new(RESERVED_HEADERS.iter().collect()), + ) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) diff --git a/apollo-router/src/plugins/include_subgraph_errors.rs b/apollo-router/src/plugins/include_subgraph_errors.rs index 66ffa53917..4f558ced82 100644 --- a/apollo-router/src/plugins/include_subgraph_errors.rs +++ b/apollo-router/src/plugins/include_subgraph_errors.rs @@ -100,22 +100,24 @@ mod test { use crate::services::HasSchema; use crate::services::PluggableSupergraphServiceBuilder; use crate::services::SupergraphRequest; + use crate::spec::Schema; use crate::Configuration; static UNREDACTED_PRODUCT_RESPONSE: Lazy = Lazy::new(|| { - Bytes::from_static(r#"{"data":{"topProducts":null},"errors":[{"message":"couldn't find mock for query {\"query\":\"query ErrorTopProducts__products__0($first:Int){topProducts(first:$first){__typename upc name}}\",\"operationName\":\"ErrorTopProducts__products__0\",\"variables\":{\"first\":2}}","extensions":{"test":"value","code":"FETCH_ERROR"}}]}"#.as_bytes()) + Bytes::from_static(r#"{"data":{"topProducts":null},"errors":[{"message":"couldn't find mock for query {\"query\":\"query ErrorTopProducts__products__0($first:Int){topProducts(first:$first){__typename upc name}}\",\"operationName\":\"ErrorTopProducts__products__0\",\"variables\":{\"first\":2}}","path":[],"extensions":{"test":"value","code":"FETCH_ERROR"}}]}"#.as_bytes()) }); static REDACTED_PRODUCT_RESPONSE: Lazy = Lazy::new(|| { Bytes::from_static( - r#"{"data":{"topProducts":null},"errors":[{"message":"Subgraph errors redacted"}]}"# + r#"{"data":{"topProducts":null},"errors":[{"message":"Subgraph errors redacted","path":[]}]}"# .as_bytes(), ) }); static REDACTED_ACCOUNT_RESPONSE: Lazy = Lazy::new(|| { Bytes::from_static( - r#"{"data":null,"errors":[{"message":"Subgraph errors redacted"}]}"#.as_bytes(), + r#"{"data":null,"errors":[{"message":"Subgraph errors redacted","path":[]}]}"# + .as_bytes(), ) }); @@ -191,8 +193,9 @@ mod test { let schema = include_str!("../../../apollo-router-benchmarks/benches/fixtures/supergraph.graphql"); + let schema = Schema::parse(schema, &Default::default()).unwrap(); let planner = BridgeQueryPlannerPool::new( - schema.to_string(), + schema.into(), Default::default(), NonZeroUsize::new(1).unwrap(), ) diff --git a/apollo-router/src/plugins/progressive_override/mod.rs b/apollo-router/src/plugins/progressive_override/mod.rs index bcbf462afd..542b0d0722 100644 --- a/apollo-router/src/plugins/progressive_override/mod.rs +++ b/apollo-router/src/plugins/progressive_override/mod.rs @@ -29,7 +29,7 @@ pub(crate) const LABELS_TO_OVERRIDE_KEY: &str = "apollo_override::labels_to_over pub(crate) const JOIN_FIELD_DIRECTIVE_NAME: &str = "join__field"; pub(crate) const JOIN_SPEC_BASE_URL: &str = "https://specs.apollo.dev/join"; -pub(crate) const JOIN_SPEC_VERSION_RANGE: &str = ">=0.4.0, <=0.4.0"; +pub(crate) const JOIN_SPEC_VERSION_RANGE: &str = ">=0.4"; pub(crate) const OVERRIDE_LABEL_ARG_NAME: &str = "overrideLabel"; /// Configuration for the progressive override plugin diff --git a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap index cb657dcdce..01cca77a5b 100644 --- a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap +++ b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap @@ -19,7 +19,7 @@ expression: query_plan "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "12dda6193654ae4fe6e38bc09d4f81cc73d0c9e098692096f72d2158eef4776f", + "schemaAwareHash": "23605b350473485e40bc8b1245f0c5c226a2997a96291bf3ad3412570a5172bb", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap index d18a3e2b11..455898049f 100644 --- a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap +++ b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap @@ -24,7 +24,7 @@ expression: query_plan "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "00ad582ea45fc1bce436b36b21512f3d2c47b74fdbdc61e4b349289722c9ecf2", + "schemaAwareHash": "d14f50b039a3b961385f4d2a878c5800dd01141cddd3f8f1874a5499bbe397a9", "authorization": { "is_authenticated": false, "scopes": [], @@ -63,7 +63,7 @@ expression: query_plan "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "a8ebdc2151a2e5207882e43c6906c0c64167fd9a8e0c7c4becc47736a5105096", + "schemaAwareHash": "caa182daf66e4ffe9b1af8c386092ba830887bbae0d58395066fa480525080ec", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/src/plugins/progressive_override/tests.rs b/apollo-router/src/plugins/progressive_override/tests.rs index d8b3cb31af..0cead42fd9 100644 --- a/apollo-router/src/plugins/progressive_override/tests.rs +++ b/apollo-router/src/plugins/progressive_override/tests.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use apollo_compiler::Schema; use tower::ServiceExt; use crate::metrics::FutureMetricsExt; @@ -9,6 +10,9 @@ use crate::plugin::Plugin; use crate::plugin::PluginInit; use crate::plugins::progressive_override::Config; use crate::plugins::progressive_override::ProgressiveOverridePlugin; +use crate::plugins::progressive_override::JOIN_FIELD_DIRECTIVE_NAME; +use crate::plugins::progressive_override::JOIN_SPEC_BASE_URL; +use crate::plugins::progressive_override::JOIN_SPEC_VERSION_RANGE; use crate::plugins::progressive_override::LABELS_TO_OVERRIDE_KEY; use crate::plugins::progressive_override::UNRESOLVED_LABELS_KEY; use crate::services::layers::query_analysis::ParsedDocument; @@ -22,6 +26,49 @@ use crate::TestHarness; const SCHEMA: &str = include_str!("testdata/supergraph.graphql"); const SCHEMA_NO_USAGES: &str = include_str!("testdata/supergraph_no_usages.graphql"); +#[test] +fn test_progressive_overrides_are_recognised_vor_join_v0_4_and_above() { + let schema_for_version = |version| { + format!( + r#"schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/{}", for: EXECUTION) + @link(url: "https://specs.apollo.dev/context/v0.1", for: SECURITY) + + directive @join__field repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION"#, + version + ) + }; + + let join_v3_schema = Schema::parse(schema_for_version("v0.3"), "test").unwrap(); + assert!(crate::spec::Schema::directive_name( + &join_v3_schema, + JOIN_SPEC_BASE_URL, + JOIN_SPEC_VERSION_RANGE, + JOIN_FIELD_DIRECTIVE_NAME, + ) + .is_none()); + + let join_v4_schema = Schema::parse(schema_for_version("v0.4"), "test").unwrap(); + assert!(crate::spec::Schema::directive_name( + &join_v4_schema, + JOIN_SPEC_BASE_URL, + JOIN_SPEC_VERSION_RANGE, + JOIN_FIELD_DIRECTIVE_NAME, + ) + .is_some()); + + let join_v5_schema = Schema::parse(schema_for_version("v0.5"), "test").unwrap(); + + assert!(crate::spec::Schema::directive_name( + &join_v5_schema, + JOIN_SPEC_BASE_URL, + JOIN_SPEC_VERSION_RANGE, + JOIN_FIELD_DIRECTIVE_NAME, + ) + .is_some()) +} + #[tokio::test] async fn plugin_disables_itself_with_no_progressive_override_usages() { let plugin = ProgressiveOverridePlugin::new(PluginInit::fake_new( diff --git a/apollo-router/src/plugins/record_replay/record.rs b/apollo-router/src/plugins/record_replay/record.rs index e821b016b2..f9dc97b52a 100644 --- a/apollo-router/src/plugins/record_replay/record.rs +++ b/apollo-router/src/plugins/record_replay/record.rs @@ -67,7 +67,7 @@ impl Plugin for Record { enabled: init.config.enabled, supergraph_sdl: init.supergraph_sdl.clone(), storage_path: storage_path.clone().into(), - schema: Arc::new(Schema::parse(&init.supergraph_sdl, &Default::default())?), + schema: Arc::new(Schema::parse_arc(init.supergraph_sdl, &Default::default())?), }; if init.config.enabled { diff --git a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap index 0d6ab611f6..e914049664 100644 --- a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap +++ b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap @@ -69,7 +69,7 @@ expression: "serde_json::to_value(response).unwrap()" "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "7245d488e97c3b2ac9f5fa4dd4660940b94ad81af070013305b2c0f76337b2f9", + "schemaAwareHash": "39cac6386a951cd4dbdfc9c91d7d24cc1061481ab03b72c483422446e09cba32", "authorization": { "is_authenticated": false, "scopes": [], @@ -109,7 +109,7 @@ expression: "serde_json::to_value(response).unwrap()" "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6e0b4156706ea0cf924500cfdc99dd44b9f0ed07e2d3f888d4aff156e6a33238", + "schemaAwareHash": "ee6ac550117eed7d8fcaf66c83fd5177bf03a9d5761f484e2664ea4e66149127", "authorization": { "is_authenticated": false, "scopes": [], @@ -156,7 +156,7 @@ expression: "serde_json::to_value(response).unwrap()" "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "ff649f3d70241d5a8cd5f5d03ff4c41ecff72b0e4129a480207b05ac92318042", + "schemaAwareHash": "76d400fc6a494cbe05a44751923e570ee31928f0fb035ea36c14d4d6f4545482", "authorization": { "is_authenticated": false, "scopes": [], @@ -200,7 +200,7 @@ expression: "serde_json::to_value(response).unwrap()" "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "bf9f3beda78a7a565e47c862157bad4ec871d724d752218da1168455dddca074", + "schemaAwareHash": "66c61f60e730b77cd0a58908fee01dc7a0742c47e9f847037e01297d37918821", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap index 0d6ab611f6..e914049664 100644 --- a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap +++ b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap @@ -69,7 +69,7 @@ expression: "serde_json::to_value(response).unwrap()" "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "7245d488e97c3b2ac9f5fa4dd4660940b94ad81af070013305b2c0f76337b2f9", + "schemaAwareHash": "39cac6386a951cd4dbdfc9c91d7d24cc1061481ab03b72c483422446e09cba32", "authorization": { "is_authenticated": false, "scopes": [], @@ -109,7 +109,7 @@ expression: "serde_json::to_value(response).unwrap()" "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6e0b4156706ea0cf924500cfdc99dd44b9f0ed07e2d3f888d4aff156e6a33238", + "schemaAwareHash": "ee6ac550117eed7d8fcaf66c83fd5177bf03a9d5761f484e2664ea4e66149127", "authorization": { "is_authenticated": false, "scopes": [], @@ -156,7 +156,7 @@ expression: "serde_json::to_value(response).unwrap()" "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "ff649f3d70241d5a8cd5f5d03ff4c41ecff72b0e4129a480207b05ac92318042", + "schemaAwareHash": "76d400fc6a494cbe05a44751923e570ee31928f0fb035ea36c14d4d6f4545482", "authorization": { "is_authenticated": false, "scopes": [], @@ -200,7 +200,7 @@ expression: "serde_json::to_value(response).unwrap()" "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "bf9f3beda78a7a565e47c862157bad4ec871d724d752218da1168455dddca074", + "schemaAwareHash": "66c61f60e730b77cd0a58908fee01dc7a0742c47e9f847037e01297d37918821", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/src/plugins/subscription.rs b/apollo-router/src/plugins/subscription.rs index 4ca4d56201..50d5e78ead 100644 --- a/apollo-router/src/plugins/subscription.rs +++ b/apollo-router/src/plugins/subscription.rs @@ -229,7 +229,7 @@ fn default_path() -> String { String::from("/callback") } -fn default_listen_addr() -> ListenAddr { +pub(crate) fn default_listen_addr() -> ListenAddr { ListenAddr::SocketAddr("127.0.0.1:4000".parse().expect("valid ListenAddr")) } diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 1f36eb4c34..dba6f207f4 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -4,6 +4,7 @@ use std::collections::HashSet; use axum::headers::HeaderName; use derivative::Derivative; +use num_traits::ToPrimitive; use opentelemetry::sdk::metrics::new_view; use opentelemetry::sdk::metrics::Aggregation; use opentelemetry::sdk::metrics::Instrument; @@ -93,6 +94,14 @@ pub(crate) struct Instrumentation { pub(crate) instruments: config_new::instruments::InstrumentsConfig, } +impl Instrumentation { + pub(crate) fn validate(&self) -> Result<(), String> { + self.events.validate()?; + self.instruments.validate()?; + self.spans.validate() + } +} + /// Metrics configuration #[derive(Clone, Default, Debug, Deserialize, JsonSchema)] #[serde(deny_unknown_fields, default)] @@ -232,14 +241,18 @@ pub(crate) struct ExposeTraceId { pub(crate) format: TraceIdFormat, } -#[derive(Clone, Default, Debug, Deserialize, JsonSchema)] -#[serde(deny_unknown_fields, rename_all = "lowercase")] +#[derive(Clone, Default, Debug, Deserialize, JsonSchema, PartialEq, Eq)] +#[serde(deny_unknown_fields, rename_all = "snake_case")] pub(crate) enum TraceIdFormat { /// Format the Trace ID as a hexadecimal number /// /// (e.g. Trace ID 16 -> 00000000000000000000000000000010) #[default] Hexadecimal, + /// Format the Trace ID as a hexadecimal number + /// + /// (e.g. Trace ID 16 -> 00000000000000000000000000000010) + OpenTelemetry, /// Format the Trace ID as a decimal number /// /// (e.g. Trace ID 16 -> 16) @@ -247,6 +260,23 @@ pub(crate) enum TraceIdFormat { /// Datadog Datadog, + + /// UUID format with dashes + /// (eg. 67e55044-10b1-426f-9247-bb680e5fe0c8) + Uuid, +} + +impl TraceIdFormat { + pub(crate) fn format(&self, trace_id: TraceId) -> String { + match self { + TraceIdFormat::Hexadecimal | TraceIdFormat::OpenTelemetry => { + format!("{:032x}", trace_id) + } + TraceIdFormat::Decimal => format!("{}", u128::from_be_bytes(trace_id.to_bytes())), + TraceIdFormat::Datadog => trace_id.to_datadog(), + TraceIdFormat::Uuid => Uuid::from_bytes(trace_id.to_bytes()).to_string(), + } + } } /// Apollo usage report signature normalization algorithm @@ -301,6 +331,10 @@ pub(crate) struct RequestPropagation { #[schemars(with = "String")] #[serde(deserialize_with = "deserialize_option_header_name")] pub(crate) header_name: Option, + + /// The trace ID format that will be used when propagating to subgraph services. + #[serde(default)] + pub(crate) format: TraceIdFormat, } #[derive(Debug, Clone, Deserialize, JsonSchema)] @@ -409,6 +443,18 @@ pub(crate) enum AttributeValue { Array(AttributeArray), } +impl AttributeValue { + pub(crate) fn as_f64(&self) -> Option { + match self { + AttributeValue::Bool(_) => None, + AttributeValue::I64(v) => Some(*v as f64), + AttributeValue::F64(v) => Some(*v), + AttributeValue::String(v) => v.parse::().ok(), + AttributeValue::Array(_) => None, + } + } +} + impl From for AttributeValue { fn from(value: String) -> Self { AttributeValue::String(value) @@ -458,7 +504,12 @@ impl PartialOrd for AttributeValue { (AttributeValue::F64(f1), AttributeValue::F64(f2)) => f1.partial_cmp(f2), (AttributeValue::I64(i1), AttributeValue::I64(i2)) => i1.partial_cmp(i2), (AttributeValue::String(s1), AttributeValue::String(s2)) => s1.partial_cmp(s2), - // Arrays and mismatched types are incomparable + // Mismatched numerics are comparable + (AttributeValue::F64(f1), AttributeValue::I64(i)) => { + i.to_f64().as_ref().and_then(|f2| f1.partial_cmp(f2)) + } + (AttributeValue::I64(i), AttributeValue::F64(f)) => i.to_f64()?.partial_cmp(f), + // Arrays and other mismatched types are incomparable _ => None, } } diff --git a/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs b/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs index f1d01f2393..adc172911b 100644 --- a/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs @@ -1,29 +1,19 @@ use std::sync::Arc; use attributes::CacheAttributes; -use opentelemetry::metrics::MeterProvider; -use opentelemetry::metrics::Unit; use opentelemetry::Key; use opentelemetry::KeyValue; -use parking_lot::Mutex; use schemars::JsonSchema; use serde::Deserialize; use tower::BoxError; use super::instruments::CustomCounter; -use super::instruments::CustomCounterInner; -use super::instruments::Increment; -use super::instruments::InstrumentsConfig; -use super::instruments::METER_NAME; -use super::selectors::CacheKind; use super::selectors::SubgraphSelector; -use crate::metrics; use crate::plugins::cache::entity::CacheHitMiss; use crate::plugins::cache::entity::CacheSubgraph; use crate::plugins::cache::metrics::CacheMetricContextKey; use crate::plugins::telemetry::config::AttributeValue; use crate::plugins::telemetry::config_new::attributes::DefaultAttributeRequirementLevel; -use crate::plugins::telemetry::config_new::conditions::Condition; use crate::plugins::telemetry::config_new::extendable::Extendable; use crate::plugins::telemetry::config_new::instruments::DefaultedStandardInstrument; use crate::plugins::telemetry::config_new::instruments::Instrumented; @@ -33,7 +23,7 @@ use crate::services::subgraph; pub(crate) mod attributes; -static CACHE_METRIC: &str = "apollo.router.operations.entity.cache"; +pub(crate) const CACHE_METRIC: &str = "apollo.router.operations.entity.cache"; const ENTITY_TYPE: Key = Key::from_static_str("entity.type"); const CACHE_HIT: Key = Key::from_static_str("cache.hit"); @@ -63,48 +53,6 @@ pub(crate) struct CacheInstruments { >, } -impl From<&InstrumentsConfig> for CacheInstruments { - fn from(value: &InstrumentsConfig) -> Self { - let meter = metrics::meter_provider().meter(METER_NAME); - CacheInstruments { - cache_hit: value.cache.attributes.cache.is_enabled().then(|| { - let mut nb_attributes = 0; - let selectors = match &value.cache.attributes.cache { - DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => { - None - } - DefaultedStandardInstrument::Extendable { attributes } => { - nb_attributes = attributes.custom.len(); - Some(attributes.clone()) - } - }; - CustomCounter { - inner: Mutex::new(CustomCounterInner { - increment: Increment::Custom(None), - condition: Condition::True, - counter: Some( - meter - .f64_counter(CACHE_METRIC) - .with_unit(Unit::new("ops")) - .with_description( - "Entity cache hit/miss operations at the subgraph level", - ) - .init(), - ), - attributes: Vec::with_capacity(nb_attributes), - selector: Some(Arc::new(SubgraphSelector::Cache { - cache: CacheKind::Hit, - entity_type: None, - })), - selectors, - incremented: false, - }), - } - }), - } - } -} - impl Instrumented for CacheInstruments { type Request = subgraph::Request; type Response = subgraph::Response; diff --git a/apollo-router/src/plugins/telemetry/config_new/conditional.rs b/apollo-router/src/plugins/telemetry/config_new/conditional.rs index 94136ee63d..a42f112a8c 100644 --- a/apollo-router/src/plugins/telemetry/config_new/conditional.rs +++ b/apollo-router/src/plugins/telemetry/config_new/conditional.rs @@ -149,6 +149,18 @@ where } } +impl Conditional +where + Att: Selector, +{ + pub(crate) fn validate(&self) -> Result<(), String> { + match &self.condition { + Some(cond) => cond.lock().validate(None), + None => Ok(()), + } + } +} + impl Selector for Conditional where Att: Selector, @@ -334,6 +346,10 @@ where _ => None, } } + + fn is_active(&self, stage: super::Stage) -> bool { + self.selector.is_active(stage) + } } /// Custom Deserializer for attributes that will deserialize into a custom field if possible, but otherwise into one of the pre-defined attributes. diff --git a/apollo-router/src/plugins/telemetry/config_new/conditions.rs b/apollo-router/src/plugins/telemetry/config_new/conditions.rs index f4782a3c13..915fad6135 100644 --- a/apollo-router/src/plugins/telemetry/config_new/conditions.rs +++ b/apollo-router/src/plugins/telemetry/config_new/conditions.rs @@ -3,6 +3,7 @@ use schemars::JsonSchema; use serde::Deserialize; use tower::BoxError; +use super::Stage; use crate::plugins::telemetry::config::AttributeValue; use crate::plugins::telemetry::config_new::Selector; use crate::Context; @@ -55,28 +56,98 @@ impl Condition where T: Selector, { - pub(crate) fn evaluate_request(&mut self, request: &T::Request) -> Option { + /// restricted_stage is Some if this condiiton will only applies at a specific stage like for events for example + pub(crate) fn validate(&self, restricted_stage: Option) -> Result<(), String> { match self { - Condition::Eq(eq) => match (eq[0].on_request(request), eq[1].on_request(request)) { - (None, None) => None, - (None, Some(right)) => { - eq[1] = SelectorOrValue::Value(right.into()); - None - } - (Some(left), None) => { - eq[0] = SelectorOrValue::Value(left.into()); - None + Condition::Eq(arr) | Condition::Gt(arr) | Condition::Lt(arr) => match (&arr[0], &arr[1]) { + (SelectorOrValue::Value(val1), SelectorOrValue::Value(val2)) => { + Err(format!("trying to compare 2 values ('{val1}' and '{val2}'), usually it's a syntax error because you want to use a specific selector and a value in a condition")) } - (Some(left), Some(right)) => { - if left == right { - *self = Condition::True; - Some(true) - } else { - Some(false) + (SelectorOrValue::Value(_), SelectorOrValue::Selector(sel)) | (SelectorOrValue::Selector(sel), SelectorOrValue::Value(_)) => { + // Special condition for events + if let Some(Stage::Request) = &restricted_stage { + if !sel.is_active(Stage::Request) { + return Err(format!("selector {sel:?} is only valid for request stage, this log event will never trigger")); + } } + Ok(()) + }, + (SelectorOrValue::Selector(sel1), SelectorOrValue::Selector(sel2)) => { + // Special condition for events + if let Some(Stage::Request) = &restricted_stage { + if !sel1.is_active(Stage::Request) { + return Err(format!("selector {sel1:?} is only valid for request stage, this log event will never trigger")); + } + if !sel2.is_active(Stage::Request) { + return Err(format!("selector {sel2:?} is only valid for request stage, this log event will never trigger")); + } + } + Ok(()) + }, + }, + Condition::Exists(sel) => { + match restricted_stage { + Some(stage) => { + if sel.is_active(stage) { + Ok(()) + } else { + Err(format!("the 'exists' condition use a selector applied at the wrong stage, this condition will be executed at the {} stage", stage)) + } + }, + None => Ok(()) } }, + Condition::All(all) => { + for cond in all { + cond.validate(restricted_stage)?; + } + + Ok(()) + }, + Condition::Any(any) => { + for cond in any { + cond.validate(restricted_stage)?; + } + + Ok(()) + }, + Condition::Not(cond) => cond.validate(restricted_stage), + Condition::True | Condition::False => Ok(()), + } + } + + pub(crate) fn evaluate_request(&mut self, request: &T::Request) -> Option { + match self { + Condition::Eq(eq) => { + if !eq[0].is_active(Stage::Request) && !eq[1].is_active(Stage::Request) { + // Nothing to compute here + return None; + } + match (eq[0].on_request(request), eq[1].on_request(request)) { + (None, None) => None, + (None, Some(right)) => { + eq[1] = SelectorOrValue::Value(right.into()); + None + } + (Some(left), None) => { + eq[0] = SelectorOrValue::Value(left.into()); + None + } + (Some(left), Some(right)) => { + if left == right { + *self = Condition::True; + Some(true) + } else { + Some(false) + } + } + } + } Condition::Gt(gt) => { + if !gt[0].is_active(Stage::Request) && !gt[1].is_active(Stage::Request) { + // Nothing to compute here + return None; + } let left_att = gt[0].on_request(request).map(AttributeValue::from); let right_att = gt[1].on_request(request).map(AttributeValue::from); match (left_att, right_att) { @@ -89,18 +160,33 @@ where gt[1] = SelectorOrValue::Value(r); None } - (Some(l), Some(r)) => { - if l > r { - *self = Condition::True; - Some(true) - } else { - *self = Condition::False; - Some(false) + (Some(l), Some(r)) => match (l.as_f64(), r.as_f64()) { + (Some(l), Some(r)) => { + if l > r { + *self = Condition::True; + Some(true) + } else { + *self = Condition::False; + Some(false) + } } - } + _ => { + if l > r { + *self = Condition::True; + Some(true) + } else { + *self = Condition::False; + Some(false) + } + } + }, } } Condition::Lt(lt) => { + if !lt[0].is_active(Stage::Request) && !lt[1].is_active(Stage::Request) { + // Nothing to compute here + return None; + } let left_att = lt[0].on_request(request).map(AttributeValue::from); let right_att = lt[1].on_request(request).map(AttributeValue::from); match (left_att, right_att) { @@ -113,21 +199,36 @@ where lt[1] = SelectorOrValue::Value(r); None } - (Some(l), Some(r)) => { - if l < r { - *self = Condition::True; - Some(true) - } else { - *self = Condition::False; - Some(false) + (Some(l), Some(r)) => match (l.as_f64(), r.as_f64()) { + (Some(l), Some(r)) => { + if l < r { + *self = Condition::True; + Some(true) + } else { + *self = Condition::False; + Some(false) + } } - } + _ => { + if l < r { + *self = Condition::True; + Some(true) + } else { + *self = Condition::False; + Some(false) + } + } + }, } } Condition::Exists(exist) => { - if exist.on_request(request).is_some() { - *self = Condition::True; - Some(true) + if exist.is_active(Stage::Request) { + if exist.on_request(request).is_some() { + *self = Condition::True; + Some(true) + } else { + Some(false) + } } else { None } @@ -309,6 +410,7 @@ where Condition::False => false, } } + pub(crate) fn evaluate_drop(&self) -> Option { match self { Condition::Eq(eq) => match (eq[0].on_drop(), eq[1].on_drop()) { @@ -456,6 +558,13 @@ where SelectorOrValue::Selector(selector) => selector.on_drop(), } } + + fn is_active(&self, stage: super::Stage) -> bool { + match self { + SelectorOrValue::Value(_) => true, + SelectorOrValue::Selector(selector) => selector.is_active(stage), + } + } } #[cfg(test)] @@ -472,8 +581,10 @@ mod test { use crate::plugins::telemetry::config_new::test::field; use crate::plugins::telemetry::config_new::test::ty; use crate::plugins::telemetry::config_new::Selector; + use crate::plugins::telemetry::config_new::Stage; use crate::Context; + #[derive(Debug)] enum TestSelector { Req, Resp, @@ -545,11 +656,22 @@ mod test { _ => None, } } + + fn is_active(&self, stage: crate::plugins::telemetry::config_new::Stage) -> bool { + match self { + Req => matches!(stage, Stage::Request), + Resp => matches!( + stage, + Stage::Response | Stage::ResponseEvent | Stage::ResponseField + ), + Static(_) => true, + } + } } #[test] fn test_condition_exist() { - assert_eq!(exists(Req).req(None), None); + assert_eq!(exists(Req).req(None), Some(false)); assert_eq!(exists(Req).req(Some(1i64)), Some(true)); assert!(!exists(Resp).resp(None)); assert!(exists(Resp).resp(Some(1i64))); @@ -577,6 +699,7 @@ mod test { #[test] fn test_condition_gt() { + test_gt("2", "1", "1"); test_gt(2, 1, 1); test_gt(2.0, 1.0, 1.0); test_gt("b", "a", "a"); @@ -604,8 +727,10 @@ mod test { #[test] fn test_condition_lt() { + test_lt("1", "2", "2"); test_lt(1, 2, 2); test_lt(1.0, 2.0, 2.0); + test_lt("1.0", "2.0", "2.0"); test_lt("a", "b", "b"); assert_eq!(lt(true, false).req(None), Some(false)); assert_eq!(lt(false, true).req(None), Some(true)); @@ -707,6 +832,8 @@ mod test { assert_eq!(gt(Req, 1).req(Some(2i64)), Some(true)); assert_eq!(gt(Req, 1).req(None), None); + assert_eq!(gt("2", Req).req(Some(1i64)), Some(true)); + assert_eq!(gt("2.1", Req).req(Some(1i64)), Some(true)); assert_eq!(gt(2, Req).req(Some(1i64)), Some(true)); assert_eq!(gt(2, Req).req(None), None); assert_eq!(gt(Req, Req).req(Some(1i64)), Some(false)); @@ -720,7 +847,8 @@ mod test { assert_eq!(lt(Req, Req).req(None), None); assert_eq!(exists(Req).req(Some(1i64)), Some(true)); - assert_eq!(exists(Req).req(None), None); + assert_eq!(exists(Req).req(None), Some(false)); + assert!(!exists(Resp).resp(None)); assert_eq!(all(eq(1, 1), eq(1, Req)).req(Some(1i64)), Some(true)); assert_eq!(all(eq(1, 1), eq(1, Req)).req(None), None); @@ -733,6 +861,22 @@ mod test { assert!(eq(Resp, "error").error(Some("error"))); } + #[test] + fn test_condition_validate() { + assert!(eq(Req, 1).validate(Some(Stage::Request)).is_ok()); + assert!(eq(Req, 1).validate(Some(Stage::Response)).is_ok()); + assert!(eq(1, Req).validate(Some(Stage::Request)).is_ok()); + assert!(eq(1, Req).validate(Some(Stage::Response)).is_ok()); + assert!(eq(Resp, 1).validate(Some(Stage::Request)).is_err()); + assert!(eq(Resp, 1).validate(None).is_ok()); + assert!(eq(1, Resp).validate(None).is_ok()); + assert!(eq(1, Resp).validate(Some(Stage::Request)).is_err()); + assert!(exists(Resp).validate(Some(Stage::Request)).is_err()); + assert!(exists(Req).validate(None).is_ok()); + assert!(exists(Req).validate(Some(Stage::Request)).is_ok()); + assert!(exists(Resp).validate(None).is_ok()); + } + #[test] fn test_evaluate_drop() { assert!(eq(Req, 1).evaluate_drop().is_none()); @@ -745,6 +889,7 @@ mod test { assert_eq!(lt(2, 1).evaluate_drop(), Some(false)); assert_eq!(lt(Static(1), 2).evaluate_drop(), Some(true)); assert_eq!(lt(2, Static(1)).evaluate_drop(), Some(false)); + assert_eq!(gt("2", "1").evaluate_drop(), Some(true)); assert_eq!(gt(2, 1).evaluate_drop(), Some(true)); assert_eq!(gt(1, 2).evaluate_drop(), Some(false)); assert_eq!(gt(Static(2), 1).evaluate_drop(), Some(true)); diff --git a/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs b/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs index df3dcf7b0d..503b191904 100644 --- a/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs @@ -1,3 +1,4 @@ +use std::collections::HashMap; use std::sync::Arc; use opentelemetry::metrics::MeterProvider; @@ -9,6 +10,7 @@ use serde::Deserialize; use tower::BoxError; use super::instruments::Increment; +use super::instruments::StaticInstrument; use crate::metrics; use crate::plugins::demand_control::CostContext; use crate::plugins::telemetry::config::AttributeValue; @@ -115,7 +117,28 @@ pub(crate) struct CostInstrumentsConfig { } impl CostInstrumentsConfig { - pub(crate) fn to_instruments(&self) -> CostInstruments { + pub(crate) fn new_static_instruments(&self) -> HashMap { + let meter = metrics::meter_provider() + .meter(crate::plugins::telemetry::config_new::instruments::METER_NAME); + + [( + COST_ESTIMATED.to_string(), + StaticInstrument::Histogram(meter.f64_histogram(COST_ESTIMATED).with_description("Estimated cost of the operation using the currently configured cost model").init()), + ),( + COST_ACTUAL.to_string(), + StaticInstrument::Histogram(meter.f64_histogram(COST_ACTUAL).with_description("Actual cost of the operation using the currently configured cost model").init()), + ),( + COST_DELTA.to_string(), + StaticInstrument::Histogram(meter.f64_histogram(COST_DELTA).with_description("Delta between the estimated and actual cost of the operation using the currently configured cost model").init()), + )] + .into_iter() + .collect() + } + + pub(crate) fn to_instruments( + &self, + static_instruments: Arc>, + ) -> CostInstruments { let cost_estimated = self.cost_estimated.is_enabled().then(|| { Self::histogram( COST_ESTIMATED, @@ -123,6 +146,7 @@ impl CostInstrumentsConfig { SupergraphSelector::Cost { cost: CostValue::Estimated, }, + &static_instruments, ) }); @@ -133,6 +157,7 @@ impl CostInstrumentsConfig { SupergraphSelector::Cost { cost: CostValue::Actual, }, + &static_instruments, ) }); @@ -143,6 +168,7 @@ impl CostInstrumentsConfig { SupergraphSelector::Cost { cost: CostValue::Delta, }, + &static_instruments, ) }); CostInstruments { @@ -156,9 +182,8 @@ impl CostInstrumentsConfig { name: &'static str, config: &DefaultedStandardInstrument>, selector: SupergraphSelector, + static_instruments: &Arc>, ) -> CustomHistogram { - let meter = metrics::meter_provider() - .meter(crate::plugins::telemetry::config_new::instruments::METER_NAME); let mut nb_attributes = 0; let selectors = match config { DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => None, @@ -172,7 +197,13 @@ impl CostInstrumentsConfig { inner: Mutex::new(CustomHistogramInner { increment: Increment::EventCustom(None), condition: Condition::True, - histogram: Some(meter.f64_histogram(name).init()), + histogram: Some( + static_instruments + .get(name) + .expect("cannot get static instrument for cost; this should not happen") + .as_histogram() + .expect("cannot convert instrument to histogram for cost; this should not happen").clone(), + ), attributes: Vec::with_capacity(nb_attributes), selector: Some(Arc::new(selector)), selectors, @@ -307,6 +338,8 @@ pub(crate) fn add_cost_attributes(context: &Context, custom_attributes: &mut Vec #[cfg(test)] mod test { + use std::sync::Arc; + use crate::context::OPERATION_NAME; use crate::plugins::demand_control::CostContext; use crate::plugins::telemetry::config_new::cost::CostInstruments; @@ -318,7 +351,7 @@ mod test { #[test] fn test_default_estimated() { let config = config(include_str!("fixtures/cost_estimated.router.yaml")); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!("cost.estimated", 100.0); @@ -330,7 +363,7 @@ mod test { #[test] fn test_default_actual() { let config = config(include_str!("fixtures/cost_actual.router.yaml")); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!("cost.actual", 10.0); @@ -342,7 +375,7 @@ mod test { #[test] fn test_default_delta() { let config = config(include_str!("fixtures/cost_delta.router.yaml")); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!("cost.delta", 90.0); @@ -356,7 +389,7 @@ mod test { let config = config(include_str!( "fixtures/cost_estimated_with_attributes.router.yaml" )); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!("cost.estimated", 100.0, cost.result = "COST_TOO_EXPENSIVE"); @@ -370,7 +403,7 @@ mod test { let config = config(include_str!( "fixtures/cost_actual_with_attributes.router.yaml" )); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!("cost.actual", 10.0, cost.result = "COST_TOO_EXPENSIVE"); @@ -384,7 +417,7 @@ mod test { let config = config(include_str!( "fixtures/cost_delta_with_attributes.router.yaml" )); - let instruments = config.to_instruments(); + let instruments = config.to_instruments(Arc::new(config.new_static_instruments())); make_request(&instruments); assert_histogram_sum!( diff --git a/apollo-router/src/plugins/telemetry/config_new/events.rs b/apollo-router/src/plugins/telemetry/config_new/events.rs index 7f957ea7e6..e3bbd668f1 100644 --- a/apollo-router/src/plugins/telemetry/config_new/events.rs +++ b/apollo-router/src/plugins/telemetry/config_new/events.rs @@ -14,6 +14,7 @@ use tracing::Span; use super::instruments::Instrumented; use super::Selector; use super::Selectors; +use super::Stage; use crate::plugins::telemetry::config_new::attributes::RouterAttributes; use crate::plugins::telemetry::config_new::attributes::SubgraphAttributes; use crate::plugins::telemetry::config_new::attributes::SupergraphAttributes; @@ -127,6 +128,54 @@ impl Events { custom: custom_events, } } + + pub(crate) fn validate(&self) -> Result<(), String> { + if let StandardEventConfig::Conditional { condition, .. } = &self.router.attributes.request + { + condition.validate(Some(Stage::Request))?; + } + if let StandardEventConfig::Conditional { condition, .. } = &self.router.attributes.response + { + condition.validate(Some(Stage::Response))?; + } + if let StandardEventConfig::Conditional { condition, .. } = + &self.supergraph.attributes.request + { + condition.validate(Some(Stage::Request))?; + } + if let StandardEventConfig::Conditional { condition, .. } = + &self.supergraph.attributes.response + { + condition.validate(Some(Stage::Response))?; + } + if let StandardEventConfig::Conditional { condition, .. } = + &self.subgraph.attributes.request + { + condition.validate(Some(Stage::Request))?; + } + if let StandardEventConfig::Conditional { condition, .. } = + &self.subgraph.attributes.response + { + condition.validate(Some(Stage::Response))?; + } + for (name, custom_event) in &self.router.custom { + custom_event.validate().map_err(|err| { + format!("configuration error for router custom event {name:?}: {err}") + })?; + } + for (name, custom_event) in &self.supergraph.custom { + custom_event.validate().map_err(|err| { + format!("configuration error for supergraph custom event {name:?}: {err}") + })?; + } + for (name, custom_event) in &self.subgraph.custom { + custom_event.validate().map_err(|err| { + format!("configuration error for subgraph custom event {name:?}: {err}") + })?; + } + + Ok(()) + } } pub(crate) type RouterEvents = @@ -576,6 +625,21 @@ where condition: Condition, } +impl Event +where + A: Selectors + + Default + + Debug, + E: Selector + Debug, +{ + pub(crate) fn validate(&self) -> Result<(), String> { + let stage = Some(self.on.into()); + self.attributes.validate(stage)?; + self.condition.validate(stage)?; + Ok(()) + } +} + /// When to trigger the event. #[derive(Deserialize, JsonSchema, Clone, Debug, Copy, PartialEq)] #[serde(rename_all = "snake_case")] @@ -736,6 +800,7 @@ mod tests { use super::*; use crate::assert_snapshot_subscriber; use crate::context::CONTAINS_GRAPHQL_ERROR; + use crate::context::OPERATION_NAME; use crate::graphql; use crate::plugins::telemetry::Telemetry; use crate::plugins::test::PluginTestHarness; @@ -877,6 +942,54 @@ mod tests { .await } + #[tokio::test(flavor = "multi_thread")] + async fn test_supergraph_events_with_exists_condition() { + let test_harness: PluginTestHarness = PluginTestHarness::builder() + .config(include_str!( + "../testdata/custom_events_exists_condition.router.yaml" + )) + .build() + .await; + + async { + let ctx = Context::new(); + ctx.insert(OPERATION_NAME, String::from("Test")).unwrap(); + test_harness + .call_supergraph( + supergraph::Request::fake_builder() + .query("query Test { foo }") + .context(ctx) + .build() + .unwrap(), + |_r| { + supergraph::Response::fake_builder() + .data(serde_json::json!({"data": "res"}).to_string()) + .build() + .expect("expecting valid response") + }, + ) + .await + .expect("expecting successful response"); + test_harness + .call_supergraph( + supergraph::Request::fake_builder() + .query("query { foo }") + .build() + .unwrap(), + |_r| { + supergraph::Response::fake_builder() + .data(serde_json::json!({"data": "res"}).to_string()) + .build() + .expect("expecting valid response") + }, + ) + .await + .expect("expecting successful response"); + } + .with_subscriber(assert_snapshot_subscriber!()) + .await + } + #[tokio::test(flavor = "multi_thread")] async fn test_supergraph_events_on_graphql_error() { let test_harness: PluginTestHarness = PluginTestHarness::builder() @@ -1006,6 +1119,7 @@ mod tests { subgraph::Response::fake2_builder() .header("custom-header", "val1") .header("x-log-response", HeaderValue::from_static("log")) + .subgraph_name("subgraph") .data(serde_json::json!({"data": "res"}).to_string()) .build() .expect("expecting valid response") diff --git a/apollo-router/src/plugins/telemetry/config_new/extendable.rs b/apollo-router/src/plugins/telemetry/config_new/extendable.rs index f3c1a4d332..6af5d2bf1c 100644 --- a/apollo-router/src/plugins/telemetry/config_new/extendable.rs +++ b/apollo-router/src/plugins/telemetry/config_new/extendable.rs @@ -17,6 +17,7 @@ use serde_json::Map; use serde_json::Value; use tower::BoxError; +use super::Stage; use crate::plugins::telemetry::config_new::attributes::DefaultAttributeRequirementLevel; use crate::plugins::telemetry::config_new::DefaultForLevel; use crate::plugins::telemetry::config_new::Selector; @@ -255,6 +256,23 @@ where } } +impl Extendable +where + A: Default + Selectors, + E: Selector, +{ + pub(crate) fn validate(&self, restricted_stage: Option) -> Result<(), String> { + if let Some(Stage::Request) = &restricted_stage { + for (name, custom) in &self.custom { + if !custom.is_active(Stage::Request) { + return Err(format!("cannot set the attribute {name:?} because it is using a selector computed in another stage than 'request' so it will not be computed")); + } + } + } + + Ok(()) + } +} #[cfg(test)] mod test { use std::sync::Arc; diff --git a/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.execution/metrics.snap b/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.execution/metrics.snap index d1214d2f8f..0b3e04d9ef 100644 --- a/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.execution/metrics.snap +++ b/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.execution/metrics.snap @@ -10,6 +10,7 @@ info: field.execution: true --- - name: graphql.field.execution + description: Number of times a field is used. data: datapoints: - value: 1 diff --git a/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.length/metrics.snap b/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.length/metrics.snap index cce830a861..d3e0270014 100644 --- a/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.length/metrics.snap +++ b/apollo-router/src/plugins/telemetry/config_new/fixtures/graphql/field.length/metrics.snap @@ -11,6 +11,7 @@ info: list.length: true --- - name: graphql.field.list.length + description: Length of a selected field in the GraphQL response data: datapoints: - sum: 3 diff --git a/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs b/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs index 1178f0a102..7e68446e3d 100644 --- a/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs @@ -1,32 +1,20 @@ -use std::sync::Arc; - use apollo_compiler::ast::NamedType; use apollo_compiler::executable::Field; use apollo_compiler::ExecutableDocument; -use opentelemetry::metrics::MeterProvider; -use parking_lot::Mutex; use schemars::JsonSchema; use serde::Deserialize; use serde_json_bytes::Value; use tower::BoxError; use super::instruments::CustomCounter; -use super::instruments::CustomCounterInner; use super::instruments::CustomInstruments; -use super::instruments::Increment; -use super::instruments::InstrumentsConfig; -use super::instruments::METER_NAME; use crate::graphql::ResponseVisitor; -use crate::metrics; use crate::plugins::telemetry::config_new::attributes::DefaultAttributeRequirementLevel; -use crate::plugins::telemetry::config_new::conditions::Condition; use crate::plugins::telemetry::config_new::extendable::Extendable; use crate::plugins::telemetry::config_new::graphql::attributes::GraphQLAttributes; use crate::plugins::telemetry::config_new::graphql::selectors::GraphQLSelector; use crate::plugins::telemetry::config_new::graphql::selectors::GraphQLValue; -use crate::plugins::telemetry::config_new::graphql::selectors::ListLength; use crate::plugins::telemetry::config_new::instruments::CustomHistogram; -use crate::plugins::telemetry::config_new::instruments::CustomHistogramInner; use crate::plugins::telemetry::config_new::instruments::DefaultedStandardInstrument; use crate::plugins::telemetry::config_new::instruments::Instrumented; use crate::plugins::telemetry::config_new::DefaultForLevel; @@ -37,8 +25,8 @@ use crate::Context; pub(crate) mod attributes; pub(crate) mod selectors; -static FIELD_LENGTH: &str = "graphql.field.list.length"; -static FIELD_EXECUTION: &str = "graphql.field.execution"; +pub(crate) const FIELD_LENGTH: &str = "graphql.field.list.length"; +pub(crate) const FIELD_EXECUTION: &str = "graphql.field.execution"; #[derive(Deserialize, JsonSchema, Clone, Default, Debug)] #[serde(deny_unknown_fields, default)] @@ -98,67 +86,6 @@ pub(crate) struct GraphQLInstruments { pub(crate) custom: GraphQLCustomInstruments, } -impl From<&InstrumentsConfig> for GraphQLInstruments { - fn from(value: &InstrumentsConfig) -> Self { - let meter = metrics::meter_provider().meter(METER_NAME); - GraphQLInstruments { - list_length: value.graphql.attributes.list_length.is_enabled().then(|| { - let mut nb_attributes = 0; - let selectors = match &value.graphql.attributes.list_length { - DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => { - None - } - DefaultedStandardInstrument::Extendable { attributes } => { - nb_attributes = attributes.custom.len(); - Some(attributes.clone()) - } - }; - CustomHistogram { - inner: Mutex::new(CustomHistogramInner { - increment: Increment::FieldCustom(None), - condition: Condition::True, - histogram: Some(meter.f64_histogram(FIELD_LENGTH).init()), - attributes: Vec::with_capacity(nb_attributes), - selector: Some(Arc::new(GraphQLSelector::ListLength { - list_length: ListLength::Value, - })), - selectors, - updated: false, - }), - } - }), - field_execution: value - .graphql - .attributes - .field_execution - .is_enabled() - .then(|| { - let mut nb_attributes = 0; - let selectors = match &value.graphql.attributes.field_execution { - DefaultedStandardInstrument::Bool(_) - | DefaultedStandardInstrument::Unset => None, - DefaultedStandardInstrument::Extendable { attributes } => { - nb_attributes = attributes.custom.len(); - Some(attributes.clone()) - } - }; - CustomCounter { - inner: Mutex::new(CustomCounterInner { - increment: Increment::FieldUnit, - condition: Condition::True, - counter: Some(meter.f64_counter(FIELD_EXECUTION).init()), - attributes: Vec::with_capacity(nb_attributes), - selector: None, - selectors, - incremented: false, - }), - } - }), - custom: CustomInstruments::new(&value.graphql.custom), - } - } -} - impl Instrumented for GraphQLInstruments { type Request = supergraph::Request; type Response = supergraph::Response; @@ -327,12 +254,11 @@ pub(crate) mod test { .build() .unwrap(); - let harness = PluginTestHarness::::builder() + let harness: PluginTestHarness = PluginTestHarness::::builder() .config(include_str!("fixtures/field_length_enabled.router.yaml")) .schema(schema_str) .build() .await; - harness .call_supergraph(request, |req| { let response: serde_json::Value = serde_json::from_str(include_str!( diff --git a/apollo-router/src/plugins/telemetry/config_new/graphql/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/graphql/selectors.rs index 20a648d465..853681087f 100644 --- a/apollo-router/src/plugins/telemetry/config_new/graphql/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/graphql/selectors.rs @@ -13,6 +13,7 @@ use crate::plugins::telemetry::config_new::instruments::InstrumentValue; use crate::plugins::telemetry::config_new::instruments::StandardUnit; use crate::plugins::telemetry::config_new::selectors::OperationName; use crate::plugins::telemetry::config_new::Selector; +use crate::plugins::telemetry::config_new::Stage; use crate::Context; #[derive(Deserialize, JsonSchema, Clone, Debug)] @@ -173,6 +174,10 @@ impl Selector for GraphQLSelector { } } } + + fn is_active(&self, stage: Stage) -> bool { + matches!(stage, Stage::ResponseField) + } } fn name_to_otel_string(name: &apollo_compiler::Name) -> opentelemetry::StringValue { diff --git a/apollo-router/src/plugins/telemetry/config_new/instruments.rs b/apollo-router/src/plugins/telemetry/config_new/instruments.rs index d9b758f42d..341f84ad35 100644 --- a/apollo-router/src/plugins/telemetry/config_new/instruments.rs +++ b/apollo-router/src/plugins/telemetry/config_new/instruments.rs @@ -22,7 +22,14 @@ use tower::BoxError; use super::attributes::HttpServerAttributes; use super::cache::attributes::CacheAttributes; +use super::cache::CacheInstruments; use super::cache::CacheInstrumentsConfig; +use super::cache::CACHE_METRIC; +use super::graphql::selectors::ListLength; +use super::graphql::GraphQLInstruments; +use super::graphql::FIELD_EXECUTION; +use super::graphql::FIELD_LENGTH; +use super::selectors::CacheKind; use super::DefaultForLevel; use super::Selector; use crate::metrics; @@ -86,7 +93,46 @@ pub(crate) struct InstrumentsConfig { >, } +const HTTP_SERVER_REQUEST_DURATION_METRIC: &str = "http.server.request.duration"; +const HTTP_SERVER_REQUEST_BODY_SIZE_METRIC: &str = "http.server.request.body.size"; +const HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC: &str = "http.server.response.body.size"; +const HTTP_SERVER_ACTIVE_REQUESTS: &str = "http.server.active_requests"; + +const HTTP_CLIENT_REQUEST_DURATION_METRIC: &str = "http.client.request.duration"; +const HTTP_CLIENT_REQUEST_BODY_SIZE_METRIC: &str = "http.client.request.body.size"; +const HTTP_CLIENT_RESPONSE_BODY_SIZE_METRIC: &str = "http.client.response.body.size"; + impl InstrumentsConfig { + pub(crate) fn validate(&self) -> Result<(), String> { + for (name, custom) in &self.router.custom { + custom.condition.validate(None).map_err(|err| { + format!("error for custom router instrument {name:?} in condition: {err}") + })?; + } + for (name, custom) in &self.supergraph.custom { + custom.condition.validate(None).map_err(|err| { + format!("error for custom supergraph instrument {name:?} in condition: {err}") + })?; + } + for (name, custom) in &self.subgraph.custom { + custom.condition.validate(None).map_err(|err| { + format!("error for custom subgraph instrument {name:?} in condition: {err}") + })?; + } + for (name, custom) in &self.graphql.custom { + custom.condition.validate(None).map_err(|err| { + format!("error for custom graphql instrument {name:?} in condition: {err}") + })?; + } + for (name, custom) in &self.cache.custom { + custom.condition.validate(None).map_err(|err| { + format!("error for custom cache instrument {name:?} in condition: {err}") + })?; + } + + Ok(()) + } + /// Update the defaults for spans configuration regarding the `default_attribute_requirement_level` pub(crate) fn update_defaults(&mut self) { self.router @@ -100,8 +146,118 @@ impl InstrumentsConfig { .defaults_for_levels(self.default_requirement_level, TelemetryDataKind::Metrics); } - pub(crate) fn new_router_instruments(&self) -> RouterInstruments { + pub(crate) fn new_builtin_router_instruments(&self) -> HashMap { let meter = metrics::meter_provider().meter(METER_NAME); + let mut static_instruments = HashMap::with_capacity(self.router.custom.len()); + + if self + .router + .attributes + .http_server_request_duration + .is_enabled() + { + static_instruments.insert( + HTTP_SERVER_REQUEST_DURATION_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_SERVER_REQUEST_DURATION_METRIC) + .with_unit(Unit::new("s")) + .with_description("Duration of HTTP server requests.") + .init(), + ), + ); + } + + if self + .router + .attributes + .http_server_request_body_size + .is_enabled() + { + static_instruments.insert( + HTTP_SERVER_REQUEST_BODY_SIZE_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_SERVER_REQUEST_BODY_SIZE_METRIC) + .with_unit(Unit::new("By")) + .with_description("Size of HTTP server request bodies.") + .init(), + ), + ); + } + + if self + .router + .attributes + .http_server_response_body_size + .is_enabled() + { + static_instruments.insert( + HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC) + .with_unit(Unit::new("By")) + .with_description("Size of HTTP server response bodies.") + .init(), + ), + ); + } + + if self + .router + .attributes + .http_server_active_requests + .is_enabled() + { + static_instruments.insert( + HTTP_SERVER_ACTIVE_REQUESTS.to_string(), + StaticInstrument::UpDownCounterI64( + meter + .i64_up_down_counter(HTTP_SERVER_ACTIVE_REQUESTS) + .with_unit(Unit::new("request")) + .with_description("Number of active HTTP server requests.") + .init(), + ), + ); + } + + for (instrument_name, instrument) in &self.router.custom { + match instrument.ty { + InstrumentType::Counter => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::CounterF64( + meter + .f64_counter(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + InstrumentType::Histogram => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::Histogram( + meter + .f64_histogram(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + } + } + + static_instruments + } + + pub(crate) fn new_router_instruments( + &self, + static_instruments: Arc>, + ) -> RouterInstruments { let http_server_request_duration = self .router .attributes @@ -112,11 +268,16 @@ impl InstrumentsConfig { increment: Increment::Duration(Instant::now()), condition: Condition::True, histogram: Some( - meter - .f64_histogram("http.server.request.duration") - .with_unit(Unit::new("s")) - .with_description("Duration of HTTP server requests.") - .init(), + static_instruments + .get(HTTP_SERVER_REQUEST_DURATION_METRIC) + .expect( + "cannot get static instrument for router; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to histogram for router; this should not happen", + ), ), attributes: Vec::new(), selector: None, @@ -150,11 +311,15 @@ impl InstrumentsConfig { increment: Increment::Custom(None), condition: Condition::True, histogram: Some( - meter - .f64_histogram("http.server.request.body.size") - .with_unit(Unit::new("By")) - .with_description("Size of HTTP server request bodies.") - .init(), + static_instruments + .get(HTTP_SERVER_REQUEST_BODY_SIZE_METRIC) + .expect( + "cannot get static instrument for router; this should not happen", + ) + .as_histogram() + .cloned().expect( + "cannot convert instrument to histogram for router; this should not happen", + ) ), attributes: Vec::with_capacity(nb_attributes), selector: Some(Arc::new(RouterSelector::RequestHeader { @@ -188,11 +353,16 @@ impl InstrumentsConfig { increment: Increment::Custom(None), condition: Condition::True, histogram: Some( - meter - .f64_histogram("http.server.response.body.size") - .with_unit(Unit::new("By")) - .with_description("Size of HTTP server response bodies.") - .init(), + static_instruments + .get(HTTP_SERVER_RESPONSE_BODY_SIZE_METRIC) + .expect( + "cannot get static instrument for router; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to histogram for router; this should not happen", + ) ), attributes: Vec::with_capacity(nb_attributes), selector: Some(Arc::new(RouterSelector::ResponseHeader { @@ -213,11 +383,16 @@ impl InstrumentsConfig { .then(|| ActiveRequestsCounter { inner: Mutex::new(ActiveRequestsCounterInner { counter: Some( - meter - .i64_up_down_counter("http.server.active_requests") - .with_unit(Unit::new("request")) - .with_description("Number of active HTTP server requests.") - .init(), + static_instruments + .get(HTTP_SERVER_ACTIVE_REQUESTS) + .expect( + "cannot get static instrument for router; this should not happen", + ) + .as_up_down_counter_i64() + .cloned() + .expect( + "cannot convert instrument to up and down counter for router; this should not happen", + ), ), attrs_config: match &self.router.attributes.http_server_active_requests { DefaultedStandardInstrument::Bool(_) @@ -234,19 +409,155 @@ impl InstrumentsConfig { http_server_request_body_size, http_server_response_body_size, http_server_active_requests, - custom: CustomInstruments::new(&self.router.custom), + custom: CustomInstruments::new(&self.router.custom, static_instruments), } } - pub(crate) fn new_supergraph_instruments(&self) -> SupergraphInstruments { + pub(crate) fn new_builtin_supergraph_instruments(&self) -> HashMap { + let meter = metrics::meter_provider().meter(METER_NAME); + + let mut static_instruments = HashMap::with_capacity(self.supergraph.custom.len()); + for (instrument_name, instrument) in &self.supergraph.custom { + match instrument.ty { + InstrumentType::Counter => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::CounterF64( + meter + .f64_counter(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + InstrumentType::Histogram => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::Histogram( + meter + .f64_histogram(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + } + } + static_instruments.extend(self.supergraph.attributes.cost.new_static_instruments()); + + static_instruments + } + + pub(crate) fn new_supergraph_instruments( + &self, + static_instruments: Arc>, + ) -> SupergraphInstruments { SupergraphInstruments { - cost: self.supergraph.attributes.cost.to_instruments(), - custom: CustomInstruments::new(&self.supergraph.custom), + cost: self + .supergraph + .attributes + .cost + .to_instruments(static_instruments.clone()), + custom: CustomInstruments::new(&self.supergraph.custom, static_instruments), } } - pub(crate) fn new_subgraph_instruments(&self) -> SubgraphInstruments { + pub(crate) fn new_builtin_subgraph_instruments(&self) -> HashMap { let meter = metrics::meter_provider().meter(METER_NAME); + let mut static_instruments = HashMap::with_capacity(self.subgraph.custom.len()); + + if self + .subgraph + .attributes + .http_client_request_duration + .is_enabled() + { + static_instruments.insert( + HTTP_CLIENT_REQUEST_DURATION_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_CLIENT_REQUEST_DURATION_METRIC) + .with_unit(Unit::new("s")) + .with_description("Duration of HTTP client requests.") + .init(), + ), + ); + } + + if self + .subgraph + .attributes + .http_client_request_body_size + .is_enabled() + { + static_instruments.insert( + HTTP_CLIENT_REQUEST_BODY_SIZE_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_CLIENT_REQUEST_BODY_SIZE_METRIC) + .with_unit(Unit::new("By")) + .with_description("Size of HTTP client request bodies.") + .init(), + ), + ); + } + + if self + .subgraph + .attributes + .http_client_response_body_size + .is_enabled() + { + static_instruments.insert( + HTTP_CLIENT_RESPONSE_BODY_SIZE_METRIC.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(HTTP_CLIENT_RESPONSE_BODY_SIZE_METRIC) + .with_unit(Unit::new("By")) + .with_description("Size of HTTP client response bodies.") + .init(), + ), + ); + } + + for (instrument_name, instrument) in &self.subgraph.custom { + match instrument.ty { + InstrumentType::Counter => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::CounterF64( + meter + .f64_counter(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + InstrumentType::Histogram => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::Histogram( + meter + .f64_histogram(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + } + } + + static_instruments + } + + pub(crate) fn new_subgraph_instruments( + &self, + static_instruments: Arc>, + ) -> SubgraphInstruments { let http_client_request_duration = self.subgraph .attributes @@ -266,12 +577,16 @@ impl InstrumentsConfig { inner: Mutex::new(CustomHistogramInner { increment: Increment::Duration(Instant::now()), condition: Condition::True, - histogram: Some( - meter - .f64_histogram("http.client.request.duration") - .with_unit(Unit::new("s")) - .with_description("Duration of HTTP client requests.") - .init(), + histogram: Some(static_instruments + .get(HTTP_CLIENT_REQUEST_DURATION_METRIC) + .expect( + "cannot get static instrument for subgraph; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to histogram for subgraph; this should not happen", + ) ), attributes: Vec::with_capacity(nb_attributes), selector: None, @@ -299,12 +614,16 @@ impl InstrumentsConfig { inner: Mutex::new(CustomHistogramInner { increment: Increment::Custom(None), condition: Condition::True, - histogram: Some( - meter - .f64_histogram("http.client.request.body.size") - .with_unit(Unit::new("By")) - .with_description("Size of HTTP client request bodies.") - .init(), + histogram: Some(static_instruments + .get(HTTP_CLIENT_REQUEST_BODY_SIZE_METRIC) + .expect( + "cannot get static instrument for subgraph; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to histogram for subgraph; this should not happen", + ) ), attributes: Vec::with_capacity(nb_attributes), selector: Some(Arc::new(SubgraphSelector::SubgraphRequestHeader { @@ -336,12 +655,16 @@ impl InstrumentsConfig { inner: Mutex::new(CustomHistogramInner { increment: Increment::Custom(None), condition: Condition::True, - histogram: Some( - meter - .f64_histogram("http.client.response.body.size") - .with_unit(Unit::new("By")) - .with_description("Size of HTTP client response bodies.") - .init(), + histogram: Some(static_instruments + .get(HTTP_CLIENT_RESPONSE_BODY_SIZE_METRIC) + .expect( + "cannot get static instrument for subgraph; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to histogram for subgraph; this should not happen", + ) ), attributes: Vec::with_capacity(nb_attributes), selector: Some(Arc::new(SubgraphSelector::SubgraphResponseHeader { @@ -358,7 +681,243 @@ impl InstrumentsConfig { http_client_request_duration, http_client_request_body_size, http_client_response_body_size, - custom: CustomInstruments::new(&self.subgraph.custom), + custom: CustomInstruments::new(&self.subgraph.custom, static_instruments), + } + } + + pub(crate) fn new_builtin_graphql_instruments(&self) -> HashMap { + let meter = metrics::meter_provider().meter(METER_NAME); + let mut static_instruments = HashMap::with_capacity(self.graphql.custom.len()); + if self.graphql.attributes.list_length.is_enabled() { + static_instruments.insert( + FIELD_LENGTH.to_string(), + StaticInstrument::Histogram( + meter + .f64_histogram(FIELD_LENGTH) + .with_description("Length of a selected field in the GraphQL response") + .init(), + ), + ); + } + + if self.graphql.attributes.field_execution.is_enabled() { + static_instruments.insert( + FIELD_EXECUTION.to_string(), + StaticInstrument::CounterF64( + meter + .f64_counter(FIELD_EXECUTION) + .with_description("Number of times a field is used.") + .init(), + ), + ); + } + + for (instrument_name, instrument) in &self.graphql.custom { + match instrument.ty { + InstrumentType::Counter => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::CounterF64( + meter + .f64_counter(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + InstrumentType::Histogram => { + static_instruments.insert( + instrument_name.clone(), + StaticInstrument::Histogram( + meter + .f64_histogram(instrument_name.clone()) + .with_description(instrument.description.clone()) + .with_unit(Unit::new(instrument.unit.clone())) + .init(), + ), + ); + } + } + } + + static_instruments + } + + pub(crate) fn new_graphql_instruments( + &self, + static_instruments: Arc>, + ) -> GraphQLInstruments { + GraphQLInstruments { + list_length: self.graphql.attributes.list_length.is_enabled().then(|| { + let mut nb_attributes = 0; + let selectors = match &self.graphql.attributes.list_length { + DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => { + None + } + DefaultedStandardInstrument::Extendable { attributes } => { + nb_attributes = attributes.custom.len(); + Some(attributes.clone()) + } + }; + CustomHistogram { + inner: Mutex::new(CustomHistogramInner { + increment: Increment::FieldCustom(None), + condition: Condition::True, + histogram: Some(static_instruments + .get(FIELD_LENGTH) + .expect( + "cannot get static instrument for graphql; this should not happen", + ) + .as_histogram() + .cloned() + .expect( + "cannot convert instrument to counter for graphql; this should not happen", + ) + ), + attributes: Vec::with_capacity(nb_attributes), + selector: Some(Arc::new(GraphQLSelector::ListLength { + list_length: ListLength::Value, + })), + selectors, + updated: false, + }), + } + }), + field_execution: self + .graphql + .attributes + .field_execution + .is_enabled() + .then(|| { + let mut nb_attributes = 0; + let selectors = match &self.graphql.attributes.field_execution { + DefaultedStandardInstrument::Bool(_) + | DefaultedStandardInstrument::Unset => None, + DefaultedStandardInstrument::Extendable { attributes } => { + nb_attributes = attributes.custom.len(); + Some(attributes.clone()) + } + }; + CustomCounter { + inner: Mutex::new(CustomCounterInner { + increment: Increment::FieldUnit, + condition: Condition::True, + counter: Some(static_instruments + .get(FIELD_EXECUTION) + .expect( + "cannot get static instrument for graphql; this should not happen", + ) + .as_counter_f64() + .cloned() + .expect( + "cannot convert instrument to counter for graphql; this should not happen", + ) + ), + attributes: Vec::with_capacity(nb_attributes), + selector: None, + selectors, + incremented: false, + }), + } + }), + custom: CustomInstruments::new(&self.graphql.custom, static_instruments), + } + } + + pub(crate) fn new_builtin_cache_instruments(&self) -> HashMap { + let meter = metrics::meter_provider().meter(METER_NAME); + let mut static_instruments: HashMap = HashMap::new(); + if self.cache.attributes.cache.is_enabled() { + static_instruments.insert( + CACHE_METRIC.to_string(), + StaticInstrument::CounterF64( + meter + .f64_counter(CACHE_METRIC) + .with_unit(Unit::new("ops")) + .with_description("Entity cache hit/miss operations at the subgraph level") + .init(), + ), + ); + } + + static_instruments + } + + pub(crate) fn new_cache_instruments( + &self, + static_instruments: Arc>, + ) -> CacheInstruments { + CacheInstruments { + cache_hit: self.cache.attributes.cache.is_enabled().then(|| { + let mut nb_attributes = 0; + let selectors = match &self.cache.attributes.cache { + DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => { + None + } + DefaultedStandardInstrument::Extendable { attributes } => { + nb_attributes = attributes.custom.len(); + Some(attributes.clone()) + } + }; + CustomCounter { + inner: Mutex::new(CustomCounterInner { + increment: Increment::Custom(None), + condition: Condition::True, + counter: Some(static_instruments + .get(CACHE_METRIC) + .expect( + "cannot get static instrument for cache; this should not happen", + ) + .as_counter_f64() + .cloned() + .expect( + "cannot convert instrument to counter for cache; this should not happen", + ) + ), + attributes: Vec::with_capacity(nb_attributes), + selector: Some(Arc::new(SubgraphSelector::Cache { + cache: CacheKind::Hit, + entity_type: None, + })), + selectors, + incremented: false, + }), + } + }), + } + } +} + +#[derive(Debug)] +pub(crate) enum StaticInstrument { + CounterF64(Counter), + UpDownCounterI64(UpDownCounter), + Histogram(Histogram), +} + +impl StaticInstrument { + pub(crate) fn as_counter_f64(&self) -> Option<&Counter> { + if let Self::CounterF64(v) = self { + Some(v) + } else { + None + } + } + + pub(crate) fn as_up_down_counter_i64(&self) -> Option<&UpDownCounter> { + if let Self::UpDownCounterI64(v) = self { + Some(v) + } else { + None + } + } + + pub(crate) fn as_histogram(&self) -> Option<&Histogram> { + if let Self::Histogram(v) = self { + Some(v) + } else { + None } } } @@ -831,10 +1390,10 @@ where { pub(crate) fn new( config: &HashMap>, + static_instruments: Arc>, ) -> Self { let mut counters = Vec::new(); let mut histograms = Vec::new(); - let meter = metrics::meter_provider().meter(METER_NAME); for (instrument_name, instrument) in config { match instrument.ty { @@ -864,25 +1423,32 @@ where } }, }; - let counter = CustomCounterInner { - increment, - condition: instrument.condition.clone(), - counter: Some( - meter - .f64_counter(instrument_name.clone()) - .with_description(instrument.description.clone()) - .with_unit(Unit::new(instrument.unit.clone())) - .init(), - ), - attributes: Vec::new(), - selector, - selectors: Some(instrument.attributes.clone()), - incremented: false, - }; - - counters.push(CustomCounter { - inner: Mutex::new(counter), - }) + match static_instruments + .get(instrument_name) + .expect( + "cannot get static instrument for supergraph; this should not happen", + ) + .as_counter_f64() + .cloned() + { + Some(counter) => { + let counter = CustomCounterInner { + increment, + condition: instrument.condition.clone(), + counter: Some(counter), + attributes: Vec::new(), + selector, + selectors: Some(instrument.attributes.clone()), + incremented: false, + }; + counters.push(CustomCounter { + inner: Mutex::new(counter), + }) + } + None => { + ::tracing::error!("cannot convert static instrument into a counter, this is an error; please fill an issue on GitHub"); + } + } } InstrumentType::Histogram => { let (selector, increment) = match (&instrument.value).into() { @@ -910,25 +1476,34 @@ where } }, }; - let histogram = CustomHistogramInner { - increment, - condition: instrument.condition.clone(), - histogram: Some( - meter - .f64_histogram(instrument_name.clone()) - .with_description(instrument.description.clone()) - .with_unit(Unit::new(instrument.unit.clone())) - .init(), - ), - attributes: Vec::new(), - selector, - selectors: Some(instrument.attributes.clone()), - updated: false, - }; - histograms.push(CustomHistogram { - inner: Mutex::new(histogram), - }) + match static_instruments + .get(instrument_name) + .expect( + "cannot get static instrument for supergraph; this should not happen", + ) + .as_histogram() + .cloned() + { + Some(histogram) => { + let histogram = CustomHistogramInner { + increment, + condition: instrument.condition.clone(), + histogram: Some(histogram), + attributes: Vec::new(), + selector, + selectors: Some(instrument.attributes.clone()), + updated: false, + }; + + histograms.push(CustomHistogram { + inner: Mutex::new(histogram), + }); + } + None => { + ::tracing::error!("cannot convert static instrument into a histogram, this is an error; please fill an issue on GitHub"); + } + } } } } @@ -2298,7 +2873,10 @@ mod tests { let mut supergraph_instruments = None; let mut subgraph_instruments = None; let mut cache_instruments: Option = None; - let graphql_instruments: GraphQLInstruments = (&config).into(); + let graphql_instruments: GraphQLInstruments = config + .new_graphql_instruments(Arc::new( + config.new_builtin_graphql_instruments(), + )); let context = Context::new(); for event in request { match event { @@ -2316,7 +2894,9 @@ mod tests { .body(body) .build() .unwrap(); - router_instruments = Some(config.new_router_instruments()); + router_instruments = Some(config.new_router_instruments( + Arc::new(config.new_builtin_router_instruments()), + )); router_instruments .as_mut() .expect("router instruments") @@ -2352,7 +2932,9 @@ mod tests { headers, } => { supergraph_instruments = - Some(config.new_supergraph_instruments()); + Some(config.new_supergraph_instruments(Arc::new( + config.new_builtin_supergraph_instruments(), + ))); let mut request = supergraph::Request::fake_builder() .context(context.clone()) @@ -2404,8 +2986,12 @@ mod tests { extensions, headers, } => { - subgraph_instruments = Some(config.new_subgraph_instruments()); - cache_instruments = Some((&config).into()); + subgraph_instruments = Some(config.new_subgraph_instruments( + Arc::new(config.new_builtin_subgraph_instruments()), + )); + cache_instruments = Some(config.new_cache_instruments( + Arc::new(config.new_builtin_cache_instruments()), + )); let graphql_request = graphql::Request::fake_builder() .query(query) .and_operation_name(operation_name) @@ -2701,7 +3287,8 @@ mod tests { ) .unwrap(); - let router_instruments = config.new_router_instruments(); + let router_instruments = + config.new_router_instruments(Arc::new(config.new_builtin_router_instruments())); let router_req = RouterRequest::fake_builder() .header("conditional-custom", "X") .header("x-my-header-count", "55") @@ -2739,7 +3326,8 @@ mod tests { "acme.my_attribute" = "TEST" ); - let router_instruments = config.new_router_instruments(); + let router_instruments = + config.new_router_instruments(Arc::new(config.new_builtin_router_instruments())); let router_req = RouterRequest::fake_builder() .header("content-length", "35") .header("x-my-header-count", "5") @@ -2780,7 +3368,8 @@ mod tests { "acme.my_attribute" = "unknown" ); - let router_instruments = config.new_router_instruments(); + let router_instruments = + config.new_router_instruments(Arc::new(config.new_builtin_router_instruments())); let router_req = RouterRequest::fake_builder() .header("content-length", "35") .header("content-type", "application/graphql") @@ -2809,7 +3398,8 @@ mod tests { "http.response.status_code" = 400 ); - let router_instruments = config.new_router_instruments(); + let router_instruments = + config.new_router_instruments(Arc::new(config.new_builtin_router_instruments())); let router_req = RouterRequest::fake_builder() .header("content-length", "35") .header("content-type", "application/graphql") @@ -2947,7 +3537,10 @@ mod tests { ) .unwrap(); - let custom_instruments = SupergraphCustomInstruments::new(&config.supergraph.custom); + let custom_instruments = SupergraphCustomInstruments::new( + &config.supergraph.custom, + Arc::new(config.new_builtin_supergraph_instruments()), + ); let context = crate::context::Context::new(); let _ = context.insert(OPERATION_KIND, "query".to_string()).unwrap(); let context_with_error = crate::context::Context::new(); @@ -3012,7 +3605,10 @@ mod tests { ); assert_counter!("acme.request.on_graphql_data", 500.0, response.data = 500); - let custom_instruments = SupergraphCustomInstruments::new(&config.supergraph.custom); + let custom_instruments = SupergraphCustomInstruments::new( + &config.supergraph.custom, + Arc::new(config.new_builtin_supergraph_instruments()), + ); let supergraph_req = supergraph::Request::fake_builder() .header("content-length", "35") .header("x-my-header-count", "5") @@ -3066,7 +3662,10 @@ mod tests { ); assert_counter!("acme.request.on_graphql_data", 1000.0, response.data = 500); - let custom_instruments = SupergraphCustomInstruments::new(&config.supergraph.custom); + let custom_instruments = SupergraphCustomInstruments::new( + &config.supergraph.custom, + Arc::new(config.new_builtin_supergraph_instruments()), + ); let supergraph_req = supergraph::Request::fake_builder() .header("content-length", "35") .header("content-type", "application/graphql") diff --git a/apollo-router/src/plugins/telemetry/config_new/logging.rs b/apollo-router/src/plugins/telemetry/config_new/logging.rs index 0439142c5e..be9aeefdb4 100644 --- a/apollo-router/src/plugins/telemetry/config_new/logging.rs +++ b/apollo-router/src/plugins/telemetry/config_new/logging.rs @@ -18,6 +18,7 @@ use serde::Deserializer; use crate::configuration::ConfigurationError; use crate::plugins::telemetry::config::AttributeValue; +use crate::plugins::telemetry::config::TraceIdFormat; use crate::plugins::telemetry::config_new::experimental_when_header::HeaderLoggingCondition; use crate::plugins::telemetry::resource::ConfigResource; use crate::services::SupergraphRequest; @@ -335,11 +336,44 @@ pub(crate) struct JsonFormat { /// Include the resource with the log event. (default: true) pub(crate) display_resource: bool, /// Include the trace id (if any) with the log event. (default: true) - pub(crate) display_trace_id: bool, + pub(crate) display_trace_id: DisplayTraceIdFormat, /// Include the span id (if any) with the log event. (default: true) pub(crate) display_span_id: bool, } +#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Eq)] +#[serde(deny_unknown_fields, rename_all = "snake_case", untagged)] +pub(crate) enum DisplayTraceIdFormat { + // /// Format the Trace ID as a hexadecimal number + // /// + // /// (e.g. Trace ID 16 -> 00000000000000000000000000000010) + // #[default] + // Hexadecimal, + // /// Format the Trace ID as a hexadecimal number + // /// + // /// (e.g. Trace ID 16 -> 00000000000000000000000000000010) + // OpenTelemetry, + // /// Format the Trace ID as a decimal number + // /// + // /// (e.g. Trace ID 16 -> 16) + // Decimal, + + // /// Datadog + // Datadog, + + // /// UUID format with dashes + // /// (eg. 67e55044-10b1-426f-9247-bb680e5fe0c8) + // Uuid, + TraceIdFormat(TraceIdFormat), + Bool(bool), +} + +impl Default for DisplayTraceIdFormat { + fn default() -> Self { + Self::TraceIdFormat(TraceIdFormat::default()) + } +} + impl Default for JsonFormat { fn default() -> Self { JsonFormat { @@ -353,7 +387,7 @@ impl Default for JsonFormat { display_current_span: false, display_span_list: true, display_resource: true, - display_trace_id: true, + display_trace_id: DisplayTraceIdFormat::Bool(true), display_span_id: true, } } @@ -389,7 +423,7 @@ pub(crate) struct TextFormat { /// Include all of the containing span information with the log event. (default: true) pub(crate) display_span_list: bool, /// Include the trace id (if any) with the log event. (default: false) - pub(crate) display_trace_id: bool, + pub(crate) display_trace_id: DisplayTraceIdFormat, /// Include the span id (if any) with the log event. (default: false) pub(crate) display_span_id: bool, } @@ -410,7 +444,7 @@ impl Default for TextFormat { display_resource: false, display_current_span: true, display_span_list: true, - display_trace_id: false, + display_trace_id: DisplayTraceIdFormat::Bool(false), display_span_id: false, } } diff --git a/apollo-router/src/plugins/telemetry/config_new/mod.rs b/apollo-router/src/plugins/telemetry/config_new/mod.rs index 2a3f46edcf..082d0a438e 100644 --- a/apollo-router/src/plugins/telemetry/config_new/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/mod.rs @@ -1,3 +1,4 @@ +use events::EventOn; use opentelemetry::baggage::BaggageExt; use opentelemetry::trace::TraceContextExt; use opentelemetry::trace::TraceId; @@ -51,7 +52,42 @@ pub(crate) trait Selectors { } } -pub(crate) trait Selector { +#[allow(dead_code)] +#[derive(Clone, Copy, Debug, PartialEq)] +pub(crate) enum Stage { + Request, + Response, + ResponseEvent, + ResponseField, + Error, + Drop, +} + +impl std::fmt::Display for Stage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Stage::Request => write!(f, "request"), + Stage::Response => write!(f, "response"), + Stage::ResponseEvent => write!(f, "response_event"), + Stage::ResponseField => write!(f, "response_field"), + Stage::Error => write!(f, "error"), + Stage::Drop => write!(f, "drop"), + } + } +} + +impl From for Stage { + fn from(value: EventOn) -> Self { + match value { + EventOn::Request => Self::Request, + EventOn::Response => Self::Response, + EventOn::EventResponse => Self::ResponseEvent, + EventOn::Error => Self::Error, + } + } +} + +pub(crate) trait Selector: std::fmt::Debug { type Request; type Response; type EventResponse; @@ -79,6 +115,8 @@ pub(crate) trait Selector { fn on_drop(&self) -> Option { None } + + fn is_active(&self, stage: Stage) -> bool; } pub(crate) trait DefaultForLevel { diff --git a/apollo-router/src/plugins/telemetry/config_new/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/selectors.rs index 3f8cde2faf..9047764a80 100644 --- a/apollo-router/src/plugins/telemetry/config_new/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/selectors.rs @@ -16,13 +16,13 @@ use crate::plugins::cache::entity::CacheSubgraph; use crate::plugins::cache::metrics::CacheMetricContextKey; use crate::plugins::demand_control::CostContext; use crate::plugins::telemetry::config::AttributeValue; +use crate::plugins::telemetry::config::TraceIdFormat; use crate::plugins::telemetry::config_new::cost::CostValue; use crate::plugins::telemetry::config_new::get_baggage; use crate::plugins::telemetry::config_new::instruments::Event; use crate::plugins::telemetry::config_new::instruments::InstrumentValue; use crate::plugins::telemetry::config_new::instruments::Standard; use crate::plugins::telemetry::config_new::trace_id; -use crate::plugins::telemetry::config_new::DatadogId; use crate::plugins::telemetry::config_new::Selector; use crate::plugins::telemetry::config_new::ToOtelValue; use crate::query_planner::APOLLO_OPERATION_ID; @@ -33,15 +33,6 @@ use crate::services::FIRST_EVENT_CONTEXT_KEY; use crate::spec::operation_limits::OperationLimits; use crate::Context; -#[derive(Deserialize, JsonSchema, Clone, Debug, PartialEq)] -#[serde(deny_unknown_fields, rename_all = "snake_case")] -pub(crate) enum TraceIdFormat { - /// Open Telemetry trace ID, a hex string. - OpenTelemetry, - /// Datadog trace ID, a u64. - Datadog, -} - #[derive(Deserialize, JsonSchema, Clone, Debug, PartialEq)] #[serde(deny_unknown_fields, rename_all = "snake_case")] pub(crate) enum OperationName { @@ -233,7 +224,12 @@ impl From<&SupergraphValue> for InstrumentValue { fn from(value: &SupergraphValue) -> Self { match value { SupergraphValue::Standard(s) => InstrumentValue::Standard(s.clone()), - SupergraphValue::Custom(selector) => InstrumentValue::Custom(selector.clone()), + SupergraphValue::Custom(selector) => match selector { + SupergraphSelector::Cost { .. } => { + InstrumentValue::Chunked(Event::Custom(selector.clone())) + } + _ => InstrumentValue::Custom(selector.clone()), + }, SupergraphValue::Event(e) => InstrumentValue::Chunked(e.clone()), } } @@ -681,13 +677,7 @@ impl Selector for RouterSelector { .map(opentelemetry::Value::from), RouterSelector::TraceId { trace_id: trace_id_format, - } => trace_id().map(|id| { - match trace_id_format { - TraceIdFormat::OpenTelemetry => id.to_string(), - TraceIdFormat::Datadog => id.to_datadog(), - } - .into() - }), + } => trace_id().map(|id| trace_id_format.format(id).into()), RouterSelector::Baggage { baggage, default, .. } => get_baggage(baggage).or_else(|| default.maybe_to_otel_value()), @@ -815,6 +805,55 @@ impl Selector for RouterSelector { _ => None, } } + + fn is_active(&self, stage: super::Stage) -> bool { + match stage { + super::Stage::Request => { + matches!( + self, + RouterSelector::RequestHeader { .. } + | RouterSelector::RequestMethod { .. } + | RouterSelector::TraceId { .. } + | RouterSelector::StudioOperationId { .. } + | RouterSelector::Baggage { .. } + | RouterSelector::Static(_) + | RouterSelector::Env { .. } + | RouterSelector::StaticField { .. } + ) + } + super::Stage::Response | super::Stage::ResponseEvent => matches!( + self, + RouterSelector::TraceId { .. } + | RouterSelector::StudioOperationId { .. } + | RouterSelector::OperationName { .. } + | RouterSelector::Baggage { .. } + | RouterSelector::Static(_) + | RouterSelector::Env { .. } + | RouterSelector::StaticField { .. } + | RouterSelector::ResponseHeader { .. } + | RouterSelector::ResponseContext { .. } + | RouterSelector::ResponseStatus { .. } + | RouterSelector::OnGraphQLError { .. } + ), + super::Stage::ResponseField => false, + super::Stage::Error => matches!( + self, + RouterSelector::TraceId { .. } + | RouterSelector::StudioOperationId { .. } + | RouterSelector::OperationName { .. } + | RouterSelector::Baggage { .. } + | RouterSelector::Static(_) + | RouterSelector::Env { .. } + | RouterSelector::StaticField { .. } + | RouterSelector::ResponseContext { .. } + | RouterSelector::Error { .. } + ), + super::Stage::Drop => matches!( + self, + RouterSelector::Static(_) | RouterSelector::StaticField { .. } + ), + } + } } impl Selector for SupergraphSelector { @@ -848,7 +887,11 @@ impl Selector for SupergraphSelector { .flatten() .map(opentelemetry::Value::from), - SupergraphSelector::Query { default, .. } => request + SupergraphSelector::Query { + default, + query: Query::String, + .. + } => request .supergraph_request .body() .query @@ -1005,6 +1048,25 @@ impl Selector for SupergraphSelector { ctx: &Context, ) -> Option { match self { + SupergraphSelector::Query { query, .. } => { + let limits_opt = ctx + .extensions() + .with_lock(|lock| lock.get::>().cloned()); + match query { + Query::Aliases => { + limits_opt.map(|limits| opentelemetry::Value::I64(limits.aliases as i64)) + } + Query::Depth => { + limits_opt.map(|limits| opentelemetry::Value::I64(limits.depth as i64)) + } + Query::Height => { + limits_opt.map(|limits| opentelemetry::Value::I64(limits.height as i64)) + } + Query::RootFields => limits_opt + .map(|limits| opentelemetry::Value::I64(limits.root_fields as i64)), + Query::String => None, + } + } SupergraphSelector::ResponseData { response_data, default, @@ -1160,6 +1222,66 @@ impl Selector for SupergraphSelector { _ => None, } } + + fn is_active(&self, stage: super::Stage) -> bool { + match stage { + super::Stage::Request => matches!( + self, + SupergraphSelector::OperationName { .. } + | SupergraphSelector::OperationKind { .. } + | SupergraphSelector::Query { .. } + | SupergraphSelector::RequestHeader { .. } + | SupergraphSelector::QueryVariable { .. } + | SupergraphSelector::RequestContext { .. } + | SupergraphSelector::Baggage { .. } + | SupergraphSelector::Env { .. } + | SupergraphSelector::Static(_) + | SupergraphSelector::StaticField { .. } + ), + super::Stage::Response => matches!( + self, + SupergraphSelector::Query { .. } + | SupergraphSelector::ResponseHeader { .. } + | SupergraphSelector::ResponseStatus { .. } + | SupergraphSelector::ResponseContext { .. } + | SupergraphSelector::OnGraphQLError { .. } + | SupergraphSelector::OperationName { .. } + | SupergraphSelector::OperationKind { .. } + | SupergraphSelector::IsPrimaryResponse { .. } + | SupergraphSelector::Static(_) + | SupergraphSelector::StaticField { .. } + ), + super::Stage::ResponseEvent => matches!( + self, + SupergraphSelector::ResponseData { .. } + | SupergraphSelector::ResponseErrors { .. } + | SupergraphSelector::Cost { .. } + | SupergraphSelector::OnGraphQLError { .. } + | SupergraphSelector::OperationName { .. } + | SupergraphSelector::OperationKind { .. } + | SupergraphSelector::IsPrimaryResponse { .. } + | SupergraphSelector::ResponseContext { .. } + | SupergraphSelector::Static(_) + | SupergraphSelector::StaticField { .. } + ), + super::Stage::ResponseField => false, + super::Stage::Error => matches!( + self, + SupergraphSelector::OperationName { .. } + | SupergraphSelector::OperationKind { .. } + | SupergraphSelector::Query { .. } + | SupergraphSelector::Error { .. } + | SupergraphSelector::Static(_) + | SupergraphSelector::StaticField { .. } + | SupergraphSelector::ResponseContext { .. } + | SupergraphSelector::IsPrimaryResponse { .. } + ), + super::Stage::Drop => matches!( + self, + SupergraphSelector::Static(_) | SupergraphSelector::StaticField { .. } + ), + } + } } impl Selector for SubgraphSelector { @@ -1357,6 +1479,39 @@ impl Selector for SubgraphSelector { .canonical_reason() .map(|reason| reason.into()), }, + SubgraphSelector::SubgraphOperationKind { .. } => response + .context + .get::<_, String>(OPERATION_KIND) + .ok() + .flatten() + .map(opentelemetry::Value::from), + SubgraphSelector::SupergraphOperationKind { .. } => response + .context + .get::<_, String>(OPERATION_KIND) + .ok() + .flatten() + .map(opentelemetry::Value::from), + SubgraphSelector::SupergraphOperationName { + supergraph_operation_name, + default, + .. + } => { + let op_name = response.context.get(OPERATION_NAME).ok().flatten(); + match supergraph_operation_name { + OperationName::String => op_name.or_else(|| default.clone()), + OperationName::Hash => op_name.or_else(|| default.clone()).map(|op_name| { + let mut hasher = sha2::Sha256::new(); + hasher.update(op_name.as_bytes()); + let result = hasher.finalize(); + hex::encode(result) + }), + } + .map(opentelemetry::Value::from) + } + SubgraphSelector::SubgraphName { subgraph_name } if *subgraph_name => response + .subgraph_name + .clone() + .map(opentelemetry::Value::from), SubgraphSelector::SubgraphResponseBody { subgraph_response_body, default, @@ -1452,6 +1607,33 @@ impl Selector for SubgraphSelector { fn on_error(&self, error: &tower::BoxError, ctx: &Context) -> Option { match self { + SubgraphSelector::SubgraphOperationKind { .. } => ctx + .get::<_, String>(OPERATION_KIND) + .ok() + .flatten() + .map(opentelemetry::Value::from), + SubgraphSelector::SupergraphOperationKind { .. } => ctx + .get::<_, String>(OPERATION_KIND) + .ok() + .flatten() + .map(opentelemetry::Value::from), + SubgraphSelector::SupergraphOperationName { + supergraph_operation_name, + default, + .. + } => { + let op_name = ctx.get(OPERATION_NAME).ok().flatten(); + match supergraph_operation_name { + OperationName::String => op_name.or_else(|| default.clone()), + OperationName::Hash => op_name.or_else(|| default.clone()).map(|op_name| { + let mut hasher = sha2::Sha256::new(); + hasher.update(op_name.as_bytes()); + let result = hasher.finalize(); + hex::encode(result) + }), + } + .map(opentelemetry::Value::from) + } SubgraphSelector::Error { .. } => Some(error.to_string().into()), SubgraphSelector::Static(val) => Some(val.clone().into()), SubgraphSelector::StaticField { r#static } => Some(r#static.clone().into()), @@ -1475,6 +1657,63 @@ impl Selector for SubgraphSelector { _ => None, } } + + fn is_active(&self, stage: super::Stage) -> bool { + match stage { + super::Stage::Request => matches!( + self, + SubgraphSelector::SubgraphOperationName { .. } + | SubgraphSelector::SupergraphOperationName { .. } + | SubgraphSelector::SubgraphName { .. } + | SubgraphSelector::SubgraphOperationKind { .. } + | SubgraphSelector::SupergraphOperationKind { .. } + | SubgraphSelector::SupergraphQuery { .. } + | SubgraphSelector::SubgraphQuery { .. } + | SubgraphSelector::SubgraphQueryVariable { .. } + | SubgraphSelector::SupergraphQueryVariable { .. } + | SubgraphSelector::SubgraphRequestHeader { .. } + | SubgraphSelector::SupergraphRequestHeader { .. } + | SubgraphSelector::RequestContext { .. } + | SubgraphSelector::Baggage { .. } + | SubgraphSelector::Env { .. } + | SubgraphSelector::Static(_) + | SubgraphSelector::StaticField { .. } + ), + super::Stage::Response => matches!( + self, + SubgraphSelector::SubgraphResponseHeader { .. } + | SubgraphSelector::SubgraphResponseStatus { .. } + | SubgraphSelector::SubgraphOperationKind { .. } + | SubgraphSelector::SupergraphOperationKind { .. } + | SubgraphSelector::SupergraphOperationName { .. } + | SubgraphSelector::SubgraphName { .. } + | SubgraphSelector::SubgraphResponseBody { .. } + | SubgraphSelector::SubgraphResponseData { .. } + | SubgraphSelector::SubgraphResponseErrors { .. } + | SubgraphSelector::ResponseContext { .. } + | SubgraphSelector::OnGraphQLError { .. } + | SubgraphSelector::Static(_) + | SubgraphSelector::StaticField { .. } + | SubgraphSelector::Cache { .. } + ), + super::Stage::ResponseEvent => false, + super::Stage::ResponseField => false, + super::Stage::Error => matches!( + self, + SubgraphSelector::SubgraphOperationKind { .. } + | SubgraphSelector::SupergraphOperationKind { .. } + | SubgraphSelector::SupergraphOperationName { .. } + | SubgraphSelector::Error { .. } + | SubgraphSelector::Static(_) + | SubgraphSelector::StaticField { .. } + | SubgraphSelector::ResponseContext { .. } + ), + super::Stage::Drop => matches!( + self, + SubgraphSelector::Static(_) | SubgraphSelector::StaticField { .. } + ), + } + } } #[cfg(test)] @@ -2318,7 +2557,7 @@ mod test { let subscriber = tracing_subscriber::registry().with(otel::layer()); subscriber::with_default(subscriber, || { let selector = RouterSelector::TraceId { - trace_id: TraceIdFormat::OpenTelemetry, + trace_id: TraceIdFormat::Hexadecimal, }; assert_eq!( selector.on_request( @@ -2367,6 +2606,36 @@ mod test { .unwrap(), opentelemetry::Value::String("42".into()) ); + + let selector = RouterSelector::TraceId { + trace_id: TraceIdFormat::Uuid, + }; + + assert_eq!( + selector + .on_request( + &crate::services::RouterRequest::fake_builder() + .build() + .unwrap(), + ) + .unwrap(), + opentelemetry::Value::String("00000000-0000-0000-0000-00000000002a".into()) + ); + + let selector = RouterSelector::TraceId { + trace_id: TraceIdFormat::Decimal, + }; + + assert_eq!( + selector + .on_request( + &crate::services::RouterRequest::fake_builder() + .build() + .unwrap(), + ) + .unwrap(), + opentelemetry::Value::String("42".into()) + ); }); } @@ -2532,6 +2801,14 @@ mod test { assert_eq!( selector.on_request( &crate::services::SubgraphRequest::fake_builder() + .context(context.clone()) + .build(), + ), + Some("query".into()) + ); + assert_eq!( + selector.on_response( + &crate::services::SubgraphResponse::fake_builder() .context(context) .build(), ), @@ -2548,6 +2825,15 @@ mod test { assert_eq!( selector.on_request( &crate::services::SubgraphRequest::fake_builder() + .context(context.clone()) + .subgraph_name("test".to_string()) + .build(), + ), + Some("test".into()) + ); + assert_eq!( + selector.on_response( + &crate::services::SubgraphResponse::fake_builder() .context(context) .subgraph_name("test".to_string()) .build(), @@ -2683,6 +2969,14 @@ mod test { assert_eq!( selector.on_request( &crate::services::SubgraphRequest::fake_builder() + .context(context.clone()) + .build(), + ), + Some("topProducts".into()) + ); + assert_eq!( + selector.on_response( + &crate::services::SubgraphResponse::fake_builder() .context(context) .build(), ), @@ -2910,13 +3204,19 @@ mod test { selector .on_response( &crate::services::SupergraphResponse::fake_builder() - .context(context) + .context(context.clone()) .build() .unwrap() ) .unwrap(), 4.into() ); + assert_eq!( + selector + .on_response_event(&crate::graphql::Response::builder().build(), &context) + .unwrap(), + 4.into() + ); } #[test] diff --git a/apollo-router/src/plugins/telemetry/config_new/snapshots/apollo_router__plugins__telemetry__config_new__events__tests__supergraph_events_with_exists_condition@logs.snap b/apollo-router/src/plugins/telemetry/config_new/snapshots/apollo_router__plugins__telemetry__config_new__events__tests__supergraph_events_with_exists_condition@logs.snap new file mode 100644 index 0000000000..0c9630144c --- /dev/null +++ b/apollo-router/src/plugins/telemetry/config_new/snapshots/apollo_router__plugins__telemetry__config_new__events__tests__supergraph_events_with_exists_condition@logs.snap @@ -0,0 +1,22 @@ +--- +source: apollo-router/src/plugins/telemetry/config_new/events.rs +expression: yaml +--- +- fields: + kind: my.event + level: INFO + message: Auditing Router Event + span: + apollo_private.field_level_instrumentation_ratio: 0.01 + apollo_private.graphql.variables: "{}" + graphql.document: "query Test { foo }" + graphql.operation.name: Test + name: supergraph + otel.kind: INTERNAL + spans: + - apollo_private.field_level_instrumentation_ratio: 0.01 + apollo_private.graphql.variables: "{}" + graphql.document: "query Test { foo }" + graphql.operation.name: Test + name: supergraph + otel.kind: INTERNAL diff --git a/apollo-router/src/plugins/telemetry/config_new/spans.rs b/apollo-router/src/plugins/telemetry/config_new/spans.rs index ff4a3b00a0..61dfb0f35c 100644 --- a/apollo-router/src/plugins/telemetry/config_new/spans.rs +++ b/apollo-router/src/plugins/telemetry/config_new/spans.rs @@ -53,6 +53,26 @@ impl Spans { TelemetryDataKind::Traces, ); } + + pub(crate) fn validate(&self) -> Result<(), String> { + for (name, custom) in &self.router.attributes.custom { + custom + .validate() + .map_err(|err| format!("error for router span attribute {name:?}: {err}"))?; + } + for (name, custom) in &self.supergraph.attributes.custom { + custom + .validate() + .map_err(|err| format!("error for supergraph span attribute {name:?}: {err}"))?; + } + for (name, custom) in &self.subgraph.attributes.custom { + custom + .validate() + .map_err(|err| format!("error for subgraph span attribute {name:?}: {err}"))?; + } + + Ok(()) + } } #[derive(Deserialize, JsonSchema, Clone, Debug, Default)] diff --git a/apollo-router/src/plugins/telemetry/formatters/json.rs b/apollo-router/src/plugins/telemetry/formatters/json.rs index 8b0dd7fcf2..b7952c2701 100644 --- a/apollo-router/src/plugins/telemetry/formatters/json.rs +++ b/apollo-router/src/plugins/telemetry/formatters/json.rs @@ -21,6 +21,8 @@ use super::EventFormatter; use super::APOLLO_PRIVATE_PREFIX; use super::EXCLUDED_ATTRIBUTES; use crate::plugins::telemetry::config::AttributeValue; +use crate::plugins::telemetry::config::TraceIdFormat; +use crate::plugins::telemetry::config_new::logging::DisplayTraceIdFormat; use crate::plugins::telemetry::config_new::logging::JsonFormat; use crate::plugins::telemetry::dynamic_attribute::EventAttributes; use crate::plugins::telemetry::dynamic_attribute::LogAttributes; @@ -227,12 +229,29 @@ where if let Some(ref span) = current_span { if let Some((trace_id, span_id)) = get_trace_and_span_id(span) { - if self.config.display_trace_id { + let trace_id = match self.config.display_trace_id { + DisplayTraceIdFormat::Bool(true) + | DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Hexadecimal) + | DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::OpenTelemetry) => { + Some(TraceIdFormat::Hexadecimal.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Decimal) => { + Some(TraceIdFormat::Decimal.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Datadog) => { + Some(TraceIdFormat::Datadog.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Uuid) => { + Some(TraceIdFormat::Uuid.format(trace_id)) + } + DisplayTraceIdFormat::Bool(false) => None, + }; + if let Some(trace_id) = trace_id { serializer - .serialize_entry("trace_id", &trace_id.to_string()) + .serialize_entry("trace_id", &trace_id) .unwrap_or(()); } - if self.config.display_trace_id { + if self.config.display_span_id { serializer .serialize_entry("span_id", &span_id.to_string()) .unwrap_or(()); diff --git a/apollo-router/src/plugins/telemetry/formatters/text.rs b/apollo-router/src/plugins/telemetry/formatters/text.rs index 4ea440bacc..d809496964 100644 --- a/apollo-router/src/plugins/telemetry/formatters/text.rs +++ b/apollo-router/src/plugins/telemetry/formatters/text.rs @@ -27,6 +27,8 @@ use super::get_trace_and_span_id; use super::EventFormatter; use super::APOLLO_PRIVATE_PREFIX; use super::EXCLUDED_ATTRIBUTES; +use crate::plugins::telemetry::config::TraceIdFormat; +use crate::plugins::telemetry::config_new::logging::DisplayTraceIdFormat; use crate::plugins::telemetry::config_new::logging::TextFormat; use crate::plugins::telemetry::dynamic_attribute::EventAttributes; use crate::plugins::telemetry::dynamic_attribute::LogAttributes; @@ -324,7 +326,24 @@ where if let Some(ref span) = current_span { if let Some((trace_id, span_id)) = get_trace_and_span_id(span) { - if self.config.display_trace_id { + let trace_id = match self.config.display_trace_id { + DisplayTraceIdFormat::Bool(true) + | DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Hexadecimal) + | DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::OpenTelemetry) => { + Some(TraceIdFormat::Hexadecimal.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Decimal) => { + Some(TraceIdFormat::Decimal.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Datadog) => { + Some(TraceIdFormat::Datadog.format(trace_id)) + } + DisplayTraceIdFormat::TraceIdFormat(TraceIdFormat::Uuid) => { + Some(TraceIdFormat::Uuid.format(trace_id)) + } + DisplayTraceIdFormat::Bool(false) => None, + }; + if let Some(trace_id) = trace_id { write!(writer, "trace_id: {} ", trace_id)?; } if self.config.display_span_id { diff --git a/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs b/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs index a5869daad5..6129b6b9e4 100644 --- a/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs +++ b/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs @@ -1,6 +1,8 @@ use std::collections::HashSet; use std::time::Instant; +use opentelemetry_api::KeyValue; +use opentelemetry_api::Value; use tracing_core::field::Visit; use tracing_core::span; use tracing_core::Field; @@ -70,14 +72,15 @@ where let idle: f64 = timings.idle as f64 / 1_000_000_000_f64; let busy: f64 = timings.busy as f64 / 1_000_000_000_f64; let name = span.metadata().name(); + if let Some(subgraph_name) = timings.subgraph.take() { - ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %name, subgraph = %subgraph_name); - ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %name, subgraph = %subgraph_name); - ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %name, subgraph = %subgraph_name); + record(duration, "duration", name, Some(&subgraph_name)); + record(duration, "idle", name, Some(&subgraph_name)); + record(duration, "busy", name, Some(&subgraph_name)); } else { - ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %name); - ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %name); - ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %name); + record(duration, "duration", name, None); + record(idle, "idle", name, None); + record(busy, "busy", name, None); } } } @@ -105,6 +108,29 @@ where } } +fn record(duration: f64, kind: &'static str, name: &str, subgraph_name: Option<&str>) { + // Avoid a heap allocation for a vec by using a slice + let attrs = [ + KeyValue::new("kind", kind), + KeyValue::new("span", Value::String(name.to_string().into())), + KeyValue::new( + "subgraph", + Value::String( + subgraph_name + .map(|s| s.to_string().into()) + .unwrap_or_else(|| "".into()), + ), + ), + ]; + let splice = if subgraph_name.is_some() { + &attrs + } else { + &attrs[0..2] + }; + + f64_histogram!("apollo_router_span", "Duration of span", duration, splice); +} + struct Timings { idle: i64, busy: i64, diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index af5f78a0de..345d1936ee 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -10,6 +10,8 @@ use ::tracing::info_span; use ::tracing::Span; use axum::headers::HeaderName; use config_new::cache::CacheInstruments; +use config_new::instruments::InstrumentsConfig; +use config_new::instruments::StaticInstrument; use config_new::Selectors; use dashmap::DashMap; use futures::future::ready; @@ -43,6 +45,7 @@ use opentelemetry::KeyValue; use opentelemetry_api::trace::TraceId; use opentelemetry_semantic_conventions::trace::HTTP_REQUEST_METHOD; use parking_lot::Mutex; +use parking_lot::RwLock; use rand::Rng; use router_bridge::planner::UsageReporting; use serde_json_bytes::json; @@ -53,6 +56,7 @@ use tokio::runtime::Handle; use tower::BoxError; use tower::ServiceBuilder; use tower::ServiceExt; +use uuid::Uuid; use self::apollo::ForwardValues; use self::apollo::LicensedOperationCountByType; @@ -198,7 +202,11 @@ pub(crate) struct Telemetry { apollo_metrics_sender: apollo_exporter::Sender, field_level_instrumentation_ratio: f64, sampling_filter_ratio: SamplerOption, - + pub(crate) graphql_custom_instruments: RwLock>>, + router_custom_instruments: RwLock>>, + supergraph_custom_instruments: RwLock>>, + subgraph_custom_instruments: RwLock>>, + cache_custom_instruments: RwLock>>, activation: Mutex, } @@ -252,6 +260,24 @@ impl Drop for Telemetry { } } +struct BuiltinInstruments { + graphql_custom_instruments: Arc>, + router_custom_instruments: Arc>, + supergraph_custom_instruments: Arc>, + subgraph_custom_instruments: Arc>, + cache_custom_instruments: Arc>, +} + +fn create_builtin_instruments(config: &InstrumentsConfig) -> BuiltinInstruments { + BuiltinInstruments { + graphql_custom_instruments: Arc::new(config.new_builtin_graphql_instruments()), + router_custom_instruments: Arc::new(config.new_builtin_router_instruments()), + supergraph_custom_instruments: Arc::new(config.new_builtin_supergraph_instruments()), + subgraph_custom_instruments: Arc::new(config.new_builtin_subgraph_instruments()), + cache_custom_instruments: Arc::new(config.new_builtin_cache_instruments()), + } +} + #[async_trait::async_trait] impl Plugin for Telemetry { type Config = config::Conf; @@ -264,6 +290,9 @@ impl Plugin for Telemetry { config.instrumentation.spans.update_defaults(); config.instrumentation.instruments.update_defaults(); config.exporters.logging.validate()?; + if let Err(err) = config.instrumentation.validate() { + ::tracing::warn!("Potential configuration error for 'instrumentation': {err}, please check the documentation on https://www.apollographql.com/docs/router/configuration/telemetry/instrumentation/events"); + } let field_level_instrumentation_ratio = config.calculate_field_level_instrumentation_ratio()?; @@ -275,6 +304,14 @@ impl Plugin for Telemetry { ::tracing::warn!("telemetry.instrumentation.spans.mode is currently set to 'deprecated', either explicitly or via defaulting. Set telemetry.instrumentation.spans.mode explicitly in your router.yaml to 'spec_compliant' for log and span attributes that follow OpenTelemetry semantic conventions. This option will be defaulted to 'spec_compliant' in a future release and eventually removed altogether"); } + let BuiltinInstruments { + graphql_custom_instruments, + router_custom_instruments, + supergraph_custom_instruments, + subgraph_custom_instruments, + cache_custom_instruments, + } = create_builtin_instruments(&config.instrumentation.instruments); + Ok(Telemetry { custom_endpoints: metrics_builder.custom_endpoints, apollo_metrics_sender: metrics_builder.apollo_metrics_sender, @@ -292,6 +329,11 @@ impl Plugin for Telemetry { .map(FilterMeterProvider::public), is_active: false, }), + graphql_custom_instruments: RwLock::new(graphql_custom_instruments), + router_custom_instruments: RwLock::new(router_custom_instruments), + supergraph_custom_instruments: RwLock::new(supergraph_custom_instruments), + subgraph_custom_instruments: RwLock::new(subgraph_custom_instruments), + cache_custom_instruments: RwLock::new(cache_custom_instruments), sampling_filter_ratio, config: Arc::new(config), }) @@ -306,6 +348,7 @@ impl Plugin for Telemetry { matches!(config.instrumentation.spans.mode, SpanMode::Deprecated); let field_level_instrumentation_ratio = self.field_level_instrumentation_ratio; let metrics_sender = self.apollo_metrics_sender.clone(); + let static_router_instruments = self.router_custom_instruments.read().clone(); ServiceBuilder::new() .map_response(move |response: router::Response| { @@ -400,7 +443,7 @@ impl Plugin for Telemetry { let custom_instruments: RouterInstruments = config_request .instrumentation .instruments - .new_router_instruments(); + .new_router_instruments(static_router_instruments.clone()); custom_instruments.on_request(request); let custom_events: RouterEvents = @@ -527,6 +570,8 @@ impl Plugin for Telemetry { let config_map_res_first = config.clone(); let config_map_res = config.clone(); let field_level_instrumentation_ratio = self.field_level_instrumentation_ratio; + let static_supergraph_instruments = self.supergraph_custom_instruments.read().clone(); + let static_graphql_instruments = self.graphql_custom_instruments.read().clone(); ServiceBuilder::new() .instrument(move |supergraph_req: &SupergraphRequest| span_mode.create_supergraph( &config_instrument.apollo, @@ -553,9 +598,10 @@ impl Plugin for Telemetry { // Append the trace ID with the right format, based on the config let format_id = |trace_id: TraceId| { let id = match config.exporters.tracing.response_trace_id.format { - TraceIdFormat::Hexadecimal => format!("{:032x}", trace_id), + TraceIdFormat::Hexadecimal | TraceIdFormat::OpenTelemetry => format!("{:032x}", trace_id), TraceIdFormat::Decimal => format!("{}", u128::from_be_bytes(trace_id.to_bytes())), - TraceIdFormat::Datadog => trace_id.to_datadog() + TraceIdFormat::Datadog => trace_id.to_datadog(), + TraceIdFormat::Uuid => Uuid::from_bytes(trace_id.to_bytes()).to_string(), }; HeaderValue::from_str(&id).ok() @@ -591,11 +637,11 @@ impl Plugin for Telemetry { let custom_instruments = config .instrumentation .instruments - .new_supergraph_instruments(); + .new_supergraph_instruments(static_supergraph_instruments.clone()); custom_instruments.on_request(req); - let custom_graphql_instruments:GraphQLInstruments = (&config + let custom_graphql_instruments: GraphQLInstruments = config .instrumentation - .instruments).into(); + .instruments.new_graphql_instruments(static_graphql_instruments.clone()); custom_graphql_instruments.on_request(req); let supergraph_events = config.instrumentation.events.new_supergraph_events(); @@ -690,6 +736,8 @@ impl Plugin for Telemetry { let subgraph_metrics_conf_resp = subgraph_metrics_conf_req.clone(); let subgraph_name = ByteString::from(name); let name = name.to_owned(); + let static_subgraph_instruments = self.subgraph_custom_instruments.read().clone(); + let static_cache_instruments = self.cache_custom_instruments.read().clone(); ServiceBuilder::new() .instrument(move |req: &SubgraphRequest| span_mode.create_subgraph(name.as_str(), req)) .map_request(move |req: SubgraphRequest| request_ftv1(req)) @@ -710,13 +758,15 @@ impl Plugin for Telemetry { let custom_instruments = config .instrumentation .instruments - .new_subgraph_instruments(); + .new_subgraph_instruments(static_subgraph_instruments.clone()); custom_instruments.on_request(sub_request); let custom_events = config.instrumentation.events.new_subgraph_events(); custom_events.on_request(sub_request); - let custom_cache_instruments: CacheInstruments = - (&config.instrumentation.instruments).into(); + let custom_cache_instruments: CacheInstruments = config + .instrumentation + .instruments + .new_cache_instruments(static_cache_instruments.clone()); custom_cache_instruments.on_request(sub_request); ( @@ -843,6 +893,20 @@ impl Telemetry { activation.reload_metrics(); + let BuiltinInstruments { + graphql_custom_instruments, + router_custom_instruments, + supergraph_custom_instruments, + subgraph_custom_instruments, + cache_custom_instruments, + } = create_builtin_instruments(&self.config.instrumentation.instruments); + + *self.graphql_custom_instruments.write() = graphql_custom_instruments; + *self.router_custom_instruments.write() = router_custom_instruments; + *self.supergraph_custom_instruments.write() = supergraph_custom_instruments; + *self.subgraph_custom_instruments.write() = subgraph_custom_instruments; + *self.cache_custom_instruments.write() = cache_custom_instruments; + reload_fmt(create_fmt_layer(&self.config)); activation.is_active = true; } @@ -878,6 +942,7 @@ impl Telemetry { if let Some(from_request_header) = &propagation.request.header_name { propagators.push(Box::new(CustomTraceIdPropagator::new( from_request_header.to_string(), + propagation.request.format.clone(), ))); } @@ -1469,9 +1534,11 @@ impl Telemetry { .extensions() .with_lock(|lock| lock.get::().cloned()) .unwrap_or_default(); + // Clear the enum values from responses when we send them in a report so that we properly report enum response + // values for deferred responses and subscriptions. let enum_response_references = context .extensions() - .with_lock(|lock| lock.get::().cloned()) + .with_lock(|mut lock| lock.remove::()) .unwrap_or_default(); SingleStatsReport { @@ -1957,13 +2024,15 @@ fn store_ftv1(subgraph_name: &ByteString, resp: SubgraphResponse) -> SubgraphRes struct CustomTraceIdPropagator { header_name: String, fields: [String; 1], + format: TraceIdFormat, } impl CustomTraceIdPropagator { - fn new(header_name: String) -> Self { + fn new(header_name: String, format: TraceIdFormat) -> Self { Self { fields: [header_name.clone()], header_name, + format, } } @@ -1995,9 +2064,9 @@ impl TextMapPropagator for CustomTraceIdPropagator { fn inject_context(&self, cx: &opentelemetry::Context, injector: &mut dyn Injector) { let span = cx.span(); let span_context = span.span_context(); - if span_context.is_valid() { - let header_value = format!("{}", span_context.trace_id()); - injector.set(&self.header_name, header_value); + if span_context.trace_id() != TraceId::INVALID { + let formatted_trace_id = self.format.format(span_context.trace_id()); + injector.set(&self.header_name, formatted_trace_id); } } @@ -2067,6 +2136,14 @@ mod tests { use http::StatusCode; use insta::assert_snapshot; use itertools::Itertools; + use opentelemetry_api::propagation::Injector; + use opentelemetry_api::propagation::TextMapPropagator; + use opentelemetry_api::trace::SpanContext; + use opentelemetry_api::trace::SpanId; + use opentelemetry_api::trace::TraceContextExt; + use opentelemetry_api::trace::TraceFlags; + use opentelemetry_api::trace::TraceId; + use opentelemetry_api::trace::TraceState; use serde_json::Value; use serde_json_bytes::json; use serde_json_bytes::ByteString; @@ -2088,6 +2165,7 @@ mod tests { use crate::error::FetchError; use crate::graphql; use crate::graphql::Error; + use crate::graphql::IntoGraphQLErrors; use crate::graphql::Request; use crate::http_ext; use crate::json_ext::Object; @@ -2096,6 +2174,9 @@ mod tests { use crate::plugin::test::MockSubgraphService; use crate::plugin::test::MockSupergraphService; use crate::plugin::DynPlugin; + use crate::plugins::demand_control::CostContext; + use crate::plugins::demand_control::DemandControlError; + use crate::plugins::telemetry::config::TraceIdFormat; use crate::plugins::telemetry::handle_error_internal; use crate::services::router::body::get_body_bytes; use crate::services::RouterRequest; @@ -3132,11 +3213,164 @@ mod tests { let trace_id = String::from("04f9e396-465c-4840-bc2b-f493b8b1a7fc"); let expected_trace_id = String::from("04f9e396465c4840bc2bf493b8b1a7fc"); - let propagator = CustomTraceIdPropagator::new(header.clone()); + let propagator = CustomTraceIdPropagator::new(header.clone(), TraceIdFormat::Uuid); let mut headers: HashMap = HashMap::new(); headers.insert(header, trace_id); let span = propagator.extract_span_context(&headers); assert!(span.is_some()); assert_eq!(span.unwrap().trace_id().to_string(), expected_trace_id); } + + #[test] + fn test_header_propagation_format() { + struct Injected(HashMap); + impl Injector for Injected { + fn set(&mut self, key: &str, value: String) { + self.0.insert(key.to_string(), value); + } + } + let mut injected = Injected(HashMap::new()); + let _ctx = opentelemetry::Context::new() + .with_remote_span_context(SpanContext::new( + TraceId::from_u128(0x04f9e396465c4840bc2bf493b8b1a7fc), + SpanId::INVALID, + TraceFlags::default(), + false, + TraceState::default(), + )) + .attach(); + let propagator = CustomTraceIdPropagator::new("my_header".to_string(), TraceIdFormat::Uuid); + propagator.inject_context(&opentelemetry::Context::current(), &mut injected); + assert_eq!( + injected.0.get("my_header").unwrap(), + "04f9e396-465c-4840-bc2b-f493b8b1a7fc" + ); + } + + async fn make_failed_demand_control_request(plugin: &dyn DynPlugin, cost_details: CostContext) { + let mut mock_service = MockSupergraphService::new(); + mock_service + .expect_call() + .times(1) + .returning(move |req: SupergraphRequest| { + req.context.extensions().with_lock(|mut lock| { + lock.insert(cost_details.clone()); + }); + + let errors = if cost_details.result == "COST_ESTIMATED_TOO_EXPENSIVE" { + DemandControlError::EstimatedCostTooExpensive { + estimated_cost: cost_details.estimated, + max_cost: (cost_details.estimated - 5.0).max(0.0), + } + .into_graphql_errors() + .unwrap() + } else if cost_details.result == "COST_ACTUAL_TOO_EXPENSIVE" { + DemandControlError::ActualCostTooExpensive { + actual_cost: cost_details.actual, + max_cost: (cost_details.actual - 5.0).max(0.0), + } + .into_graphql_errors() + .unwrap() + } else { + Vec::new() + }; + + SupergraphResponse::fake_builder() + .context(req.context) + .data( + serde_json::to_value(graphql::Response::builder().errors(errors).build()) + .unwrap(), + ) + .build() + }); + + let mut service = plugin.supergraph_service(BoxService::new(mock_service)); + let router_req = SupergraphRequest::fake_builder().build().unwrap(); + let _router_response = service + .ready() + .await + .unwrap() + .call(router_req) + .await + .unwrap() + .next_response() + .await + .unwrap(); + } + + #[tokio::test] + async fn test_demand_control_delta_filter() { + async { + let plugin = create_plugin_with_config(include_str!( + "testdata/demand_control_delta_filter.router.yaml" + )) + .await; + make_failed_demand_control_request( + plugin.as_ref(), + CostContext { + estimated: 10.0, + actual: 8.0, + result: "COST_ACTUAL_TOO_EXPENSIVE", + strategy: "static_estimated", + }, + ) + .await; + + assert_histogram_sum!("cost.rejected.operations", 8.0); + } + .with_metrics() + .await; + } + + #[tokio::test] + async fn test_demand_control_result_filter() { + async { + let plugin = create_plugin_with_config(include_str!( + "testdata/demand_control_result_filter.router.yaml" + )) + .await; + make_failed_demand_control_request( + plugin.as_ref(), + CostContext { + estimated: 10.0, + actual: 0.0, + result: "COST_ESTIMATED_TOO_EXPENSIVE", + strategy: "static_estimated", + }, + ) + .await; + + assert_histogram_sum!("cost.rejected.operations", 10.0); + } + .with_metrics() + .await; + } + + #[tokio::test] + async fn test_demand_control_result_attributes() { + async { + let plugin = create_plugin_with_config(include_str!( + "testdata/demand_control_result_attribute.router.yaml" + )) + .await; + make_failed_demand_control_request( + plugin.as_ref(), + CostContext { + estimated: 10.0, + actual: 0.0, + result: "COST_ESTIMATED_TOO_EXPENSIVE", + strategy: "static_estimated", + }, + ) + .await; + + assert_histogram_sum!( + "cost.estimated", + 10.0, + "cost.result" = "COST_ESTIMATED_TOO_EXPENSIVE" + ); + } + .with_metrics() + .await; + } } diff --git a/apollo-router/src/plugins/telemetry/otel/layer.rs b/apollo-router/src/plugins/telemetry/otel/layer.rs index 495d22f8ec..e1d20ec739 100644 --- a/apollo-router/src/plugins/telemetry/otel/layer.rs +++ b/apollo-router/src/plugins/telemetry/otel/layer.rs @@ -758,7 +758,9 @@ where let parent_cx = self.parent_context(attrs, &ctx); // Record new trace id if there is no active parent span - let trace_id = if parent_cx.span().span_context().is_valid() { + let trace_id = if parent_cx.span().span_context().is_valid() + || parent_cx.span().span_context().trace_id() != opentelemetry::trace::TraceId::INVALID + { // It probably means we have a remote parent trace parent_cx.span().span_context().trace_id() } else { diff --git a/apollo-router/src/plugins/telemetry/testdata/custom_events.router.yaml b/apollo-router/src/plugins/telemetry/testdata/custom_events.router.yaml index 1edfcd3abe..c3c23cb68f 100644 --- a/apollo-router/src/plugins/telemetry/testdata/custom_events.router.yaml +++ b/apollo-router/src/plugins/telemetry/testdata/custom_events.router.yaml @@ -116,9 +116,13 @@ telemetry: response: level: warn condition: - eq: - - subgraph_response_header: x-log-response - - "log" + all: + - eq: + - subgraph_response_header: x-log-response + - "log" + - eq: + - subgraph_name: true + - "subgraph" error: error # Custom events diff --git a/apollo-router/src/plugins/telemetry/testdata/custom_events_exists_condition.router.yaml b/apollo-router/src/plugins/telemetry/testdata/custom_events_exists_condition.router.yaml new file mode 100644 index 0000000000..0ee5b021f7 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/testdata/custom_events_exists_condition.router.yaml @@ -0,0 +1,13 @@ +telemetry: + instrumentation: + events: + supergraph: + my.event: + message: "Auditing Router Event" + level: info + on: request + attributes: + graphql.operation.name: true + condition: + exists: + operation_name: string \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml b/apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml new file mode 100644 index 0000000000..5b2e55a772 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/testdata/demand_control_delta_filter.router.yaml @@ -0,0 +1,47 @@ +# Demand control enabled in measure mode. +demand_control: + enabled: true + # Use measure mode to monitor the costs of your operations without rejecting any. + mode: measure + + strategy: + # Static estimated strategy has a fixed cost for elements. + static_estimated: + # The assumed returned list size for operations. Set this to the maximum number of items in a GraphQL list + list_size: 10 + # The maximum cost of a single operation, above which the operation is rejected. + max: 1000 + +# Basic telemetry configuration for cost. +telemetry: + exporters: + metrics: + common: + service_name: apollo-router + views: + # Define a custom view because cost is different than the default latency-oriented view of OpenTelemetry + - name: cost.* + aggregation: + histogram: + buckets: + - 0 + - 10 + - 100 + - 1000 + - 10000 + - 100000 + - 1000000 + + instrumentation: + instruments: + supergraph: + cost.rejected.operations: + type: histogram + value: + cost: actual + description: "Estimated cost per rejected operation." + unit: delta + condition: + gt: + - cost: delta + - 1 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml new file mode 100644 index 0000000000..52e2d42dcd --- /dev/null +++ b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_attribute.router.yaml @@ -0,0 +1,40 @@ +# Demand control enabled in measure mode. +demand_control: + enabled: true + # Use measure mode to monitor the costs of your operations without rejecting any. + mode: measure + + strategy: + # Static estimated strategy has a fixed cost for elements. + static_estimated: + # The assumed returned list size for operations. Set this to the maximum number of items in a GraphQL list + list_size: 10 + # The maximum cost of a single operation, above which the operation is rejected. + max: 1000 + +# Basic telemetry configuration for cost. +telemetry: + exporters: + metrics: + common: + service_name: apollo-router + views: + # Define a custom view because cost is different than the default latency-oriented view of OpenTelemetry + - name: cost.* + aggregation: + histogram: + buckets: + - 0 + - 10 + - 100 + - 1000 + - 10000 + - 100000 + - 1000000 + + instrumentation: + instruments: + supergraph: + cost.estimated: + attributes: + cost.result: true \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml new file mode 100644 index 0000000000..b01ddf9d81 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/testdata/demand_control_result_filter.router.yaml @@ -0,0 +1,52 @@ +# Demand control enabled in measure mode. +demand_control: + enabled: true + # Use measure mode to monitor the costs of your operations without rejecting any. + mode: measure + + strategy: + # Static estimated strategy has a fixed cost for elements. + static_estimated: + # The assumed returned list size for operations. Set this to the maximum number of items in a GraphQL list + list_size: 10 + # The maximum cost of a single operation, above which the operation is rejected. + max: 1000 + +# Basic telemetry configuration for cost. +telemetry: + exporters: + metrics: + common: + service_name: apollo-router + views: + # Define a custom view because cost is different than the default latency-oriented view of OpenTelemetry + - name: cost.* + aggregation: + histogram: + buckets: + - 0 + - 10 + - 100 + - 1000 + - 10000 + - 100000 + - 1000000 + + instrumentation: + instruments: + supergraph: + # custom instrument + cost.rejected.operations: + type: histogram + value: + # Estimated cost is used to populate the histogram + cost: estimated + description: "Estimated cost per rejected operation." + unit: delta + condition: + eq: + # Only show rejected operations. + - cost: result + - "COST_ESTIMATED_TOO_EXPENSIVE" + attributes: + graphql.operation.name: true # Graphql operation name is added as an attribute \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs index 8cd3f8e66f..fd1590966e 100644 --- a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/exporter/model/v05.rs @@ -128,8 +128,12 @@ fn write_unified_tag<'a>( Ok(()) } -fn get_sampling_priority(_span: &SpanData) -> f64 { - 1.0 +fn get_sampling_priority(span: &SpanData) -> f64 { + if span.span_context.trace_state().priority_sampling_enabled() { + 1.0 + } else { + 0.0 + } } fn get_measuring(span: &SpanData) -> f64 { diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs index d632eb5872..1c586d48c8 100644 --- a/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs +++ b/apollo-router/src/plugins/telemetry/tracing/datadog_exporter/mod.rs @@ -176,7 +176,7 @@ pub(crate) mod propagator { const DATADOG_SAMPLING_PRIORITY_HEADER: &str = "x-datadog-sampling-priority"; const TRACE_FLAG_DEFERRED: TraceFlags = TraceFlags::new(0x02); - pub(crate) const TRACE_STATE_PRIORITY_SAMPLING: &str = "psr"; + const TRACE_STATE_PRIORITY_SAMPLING: &str = "psr"; pub(crate) const TRACE_STATE_MEASURE: &str = "m"; pub(crate) const TRACE_STATE_TRUE_VALUE: &str = "1"; pub(crate) const TRACE_STATE_FALSE_VALUE: &str = "0"; @@ -243,6 +243,10 @@ pub(crate) mod propagator { fn with_measuring(&self, enabled: bool) -> TraceState; fn measuring_enabled(&self) -> bool; + + fn with_priority_sampling(&self, enabled: bool) -> TraceState; + + fn priority_sampling_enabled(&self) -> bool; } impl DatadogTraceState for TraceState { @@ -256,6 +260,20 @@ pub(crate) mod propagator { .map(trace_flag_to_boolean) .unwrap_or_default() } + + fn with_priority_sampling(&self, enabled: bool) -> TraceState { + self.insert( + TRACE_STATE_PRIORITY_SAMPLING, + boolean_to_trace_state_flag(enabled), + ) + .unwrap_or_else(|_err| self.clone()) + } + + fn priority_sampling_enabled(&self) -> bool { + self.get(TRACE_STATE_PRIORITY_SAMPLING) + .map(trace_flag_to_boolean) + .unwrap_or_default() + } } enum SamplingPriority { @@ -293,7 +311,16 @@ pub(crate) mod propagator { } fn create_trace_state_and_flags(trace_flags: TraceFlags) -> (TraceState, TraceFlags) { - (TraceState::default(), trace_flags) + if trace_flags & TRACE_FLAG_DEFERRED == TRACE_FLAG_DEFERRED { + (TraceState::default(), trace_flags) + } else { + ( + DatadogTraceStateBuilder::default() + .with_priority_sampling(trace_flags.is_sampled()) + .build(), + TraceFlags::SAMPLED, + ) + } } impl DatadogPropagator { @@ -373,7 +400,7 @@ pub(crate) mod propagator { } fn get_sampling_priority(span_context: &SpanContext) -> SamplingPriority { - if span_context.is_sampled() { + if span_context.trace_state().priority_sampling_enabled() { SamplingPriority::AutoKeep } else { SamplingPriority::AutoReject @@ -433,8 +460,8 @@ pub(crate) mod propagator { (vec![(DATADOG_TRACE_ID_HEADER, "garbage")], SpanContext::empty_context()), (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "garbage")], SpanContext::new(TraceId::from_u128(1234), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(false).build())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(true).build())), ] } @@ -446,8 +473,8 @@ pub(crate) mod propagator { (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), - (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(false).build())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, DatadogTraceStateBuilder::default().with_priority_sampling(true).build())), ] } diff --git a/apollo-router/src/plugins/test.rs b/apollo-router/src/plugins/test.rs index ea155f0cd8..50ceac0110 100644 --- a/apollo-router/src/plugins/test.rs +++ b/apollo-router/src/plugins/test.rs @@ -14,11 +14,13 @@ use crate::plugin::DynPlugin; use crate::plugin::Plugin; use crate::plugin::PluginInit; use crate::query_planner::BridgeQueryPlanner; +use crate::query_planner::PlannerMode; use crate::services::execution; use crate::services::http; use crate::services::router; use crate::services::subgraph; use crate::services::supergraph; +use crate::spec::Schema; use crate::Configuration; use crate::Notify; @@ -89,17 +91,18 @@ impl PluginTestHarness { .unwrap_or(Value::Object(Default::default())); let (supergraph_sdl, parsed_schema, subgraph_schemas) = if let Some(schema) = schema { - let planner = BridgeQueryPlanner::new(schema.to_string(), Arc::new(config), None) - .await - .unwrap(); - ( - schema.to_string(), - planner.schema().supergraph_schema().clone(), - planner.subgraph_schemas(), - ) + let schema = Schema::parse(schema, &config).unwrap(); + let sdl = schema.raw_sdl.clone(); + let supergraph = schema.supergraph_schema().clone(); + let rust_planner = PlannerMode::maybe_rust(&schema, &config).unwrap(); + let planner = + BridgeQueryPlanner::new(schema.into(), Arc::new(config), None, rust_planner) + .await + .unwrap(); + (sdl, supergraph, planner.subgraph_schemas()) } else { ( - "".to_string(), + "".to_string().into(), Valid::assume_valid(apollo_compiler::Schema::new()), Default::default(), ) @@ -107,7 +110,7 @@ impl PluginTestHarness { let plugin_init = PluginInit::builder() .config(config_for_plugin.clone()) - .supergraph_sdl(Arc::new(supergraph_sdl)) + .supergraph_sdl(supergraph_sdl) .supergraph_schema(Arc::new(parsed_schema)) .subgraph_schemas(subgraph_schemas) .notify(Notify::default()) diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index a3ddda0e6d..e4e2eabfa1 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -484,6 +484,7 @@ mod test { use crate::services::PluggableSupergraphServiceBuilder; use crate::services::SupergraphRequest; use crate::services::SupergraphResponse; + use crate::spec::Schema; use crate::Configuration; static EXPECTED_RESPONSE: Lazy = Lazy::new(|| { @@ -568,14 +569,14 @@ mod test { .unwrap(); let config = Arc::new(config); + let schema = Arc::new(Schema::parse(schema, &config).unwrap()); let planner = BridgeQueryPlannerPool::new( - schema.to_string(), + schema.clone(), config.clone(), NonZeroUsize::new(1).unwrap(), ) .await .unwrap(); - let schema = planner.schema(); let subgraph_schemas = planner.subgraph_schemas(); let mut builder = diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index cdd1d710ef..fc4ccce41d 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -10,6 +10,7 @@ use apollo_compiler::ast; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_federation::error::FederationError; +use apollo_federation::error::SingleFederationError; use apollo_federation::query_plan::query_planner::QueryPlanner; use futures::future::BoxFuture; use opentelemetry_api::metrics::MeterProvider as _; @@ -64,6 +65,10 @@ use crate::Configuration; pub(crate) const RUST_QP_MODE: &str = "rust"; const JS_QP_MODE: &str = "js"; +const UNSUPPORTED_CONTEXT: &str = "context"; +const UNSUPPORTED_OVERRIDES: &str = "overrides"; +const UNSUPPORTED_FED1: &str = "fed1"; +const INTERNAL_INIT_ERROR: &str = "internal"; #[derive(Clone)] /// A query planner that calls out to the nodejs router-bridge query planner. @@ -81,7 +86,7 @@ pub(crate) struct BridgeQueryPlanner { } #[derive(Clone)] -enum PlannerMode { +pub(crate) enum PlannerMode { Js(Arc>), Both { js: Arc>, @@ -115,6 +120,7 @@ impl PlannerMode { schema: &Schema, configuration: &Configuration, old_planner: Option>>, + rust_planner: Option>, ) -> Result { Ok(match configuration.experimental_query_planner_mode { QueryPlannerMode::New => Self::Rust { @@ -124,18 +130,50 @@ impl PlannerMode { old_planner, ) .await?, - rust: Self::rust(schema, configuration)?, + rust: rust_planner + .expect("expected Rust QP instance for `experimental_query_planner_mode: new`"), }, QueryPlannerMode::Legacy => { Self::Js(Self::js(&schema.raw_sdl, configuration, old_planner).await?) } QueryPlannerMode::Both => Self::Both { js: Self::js(&schema.raw_sdl, configuration, old_planner).await?, - rust: Self::rust(schema, configuration)?, + rust: rust_planner.expect( + "expected Rust QP instance for `experimental_query_planner_mode: both`", + ), }, + QueryPlannerMode::BothBestEffort => { + if let Some(rust) = rust_planner { + Self::Both { + js: Self::js(&schema.raw_sdl, configuration, old_planner).await?, + rust, + } + } else { + Self::Js(Self::js(&schema.raw_sdl, configuration, old_planner).await?) + } + } }) } + pub(crate) fn maybe_rust( + schema: &Schema, + configuration: &Configuration, + ) -> Result>, ServiceBuildError> { + match configuration.experimental_query_planner_mode { + QueryPlannerMode::Legacy => Ok(None), + QueryPlannerMode::New | QueryPlannerMode::Both => { + Ok(Some(Self::rust(schema, configuration)?)) + } + QueryPlannerMode::BothBestEffort => match Self::rust(schema, configuration) { + Ok(planner) => Ok(Some(planner)), + Err(error) => { + tracing::info!("Falling back to the legacy query planner: {error}"); + Ok(None) + } + }, + } + } + fn rust( schema: &Schema, configuration: &Configuration, @@ -146,17 +184,41 @@ impl PlannerMode { .reuse_query_fragments .unwrap_or(true), subgraph_graphql_validation: false, - generate_query_fragments: false, + generate_query_fragments: configuration.supergraph.generate_query_fragments, incremental_delivery: apollo_federation::query_plan::query_planner::QueryPlanIncrementalDeliveryConfig { enable_defer: configuration.supergraph.defer_support, }, debug: Default::default(), }; - Ok(Arc::new(QueryPlanner::new( - schema.federation_supergraph(), - config, - )?)) + let result = QueryPlanner::new(schema.federation_supergraph(), config); + + match &result { + Err(FederationError::SingleFederationError { + inner: error, + trace: _, + }) => match error { + SingleFederationError::UnsupportedFederationVersion { .. } => { + metric_rust_qp_init(Some(UNSUPPORTED_FED1)); + } + SingleFederationError::UnsupportedFeature { message: _, kind } => match kind { + apollo_federation::error::UnsupportedFeatureKind::ProgressiveOverrides => { + metric_rust_qp_init(Some(UNSUPPORTED_OVERRIDES)) + } + apollo_federation::error::UnsupportedFeatureKind::Context => { + metric_rust_qp_init(Some(UNSUPPORTED_CONTEXT)) + } + _ => metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)), + }, + _ => { + metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)); + } + }, + Err(_) => metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)), + Ok(_) => metric_rust_qp_init(None), + } + + Ok(Arc::new(result.map_err(ServiceBuildError::QpInitError)?)) } async fn js( @@ -323,12 +385,13 @@ impl PlannerMode { impl BridgeQueryPlanner { pub(crate) async fn new( - schema: String, + schema: Arc, configuration: Arc, - old_planner: Option>>, + old_js_planner: Option>>, + rust_planner: Option>, ) -> Result { - let schema = Schema::parse(&schema, &configuration)?; - let planner = PlannerMode::new(&schema, &configuration, old_planner).await?; + let planner = + PlannerMode::new(&schema, &configuration, old_js_planner, rust_planner).await?; let subgraph_schemas = Arc::new(planner.subgraphs().await?); @@ -353,7 +416,7 @@ impl BridgeQueryPlanner { Ok(Self { planner, - schema: Arc::new(schema), + schema, subgraph_schemas, introspection, enable_authorization_directives, @@ -369,6 +432,7 @@ impl BridgeQueryPlanner { .clone() } + #[cfg(test)] pub(crate) fn schema(&self) -> Arc { self.schema.clone() } @@ -606,6 +670,7 @@ impl BridgeQueryPlanner { formatted_query_plan, query: Arc::new(selections), query_metrics, + estimated_size: Default::default(), }), }) } @@ -878,7 +943,7 @@ impl BridgeQueryPlanner { } /// Data coming from the `plan` method on the router_bridge -// Note: Reexported under `apollo_compiler::_private` +// Note: Reexported under `apollo_router::_private` #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct QueryPlanResult { @@ -886,6 +951,12 @@ pub struct QueryPlanResult { pub(super) query_plan: QueryPlan, } +impl QueryPlanResult { + pub fn formatted_query_plan(&self) -> Option<&str> { + self.formatted_query_plan.as_deref().map(String::as_str) + } +} + #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] /// The root query plan container. @@ -894,7 +965,8 @@ pub(super) struct QueryPlan { pub(super) node: Option>, } -pub(crate) fn render_diff(differences: &[diff::Result<&str>]) -> String { +// Note: Reexported under `apollo_router::_private` +pub fn render_diff(differences: &[diff::Result<&str>]) -> String { let mut output = String::new(); for diff_line in differences { match diff_line { @@ -929,6 +1001,25 @@ pub(crate) fn metric_query_planning_plan_duration(planner: &'static str, start: ); } +pub(crate) fn metric_rust_qp_init(init_error_kind: Option<&'static str>) { + if let Some(init_error_kind) = init_error_kind { + u64_counter!( + "apollo.router.lifecycle.query_planner.init", + "Rust query planner initialization", + 1, + "init.error_kind" = init_error_kind, + "init.is_success" = false + ); + } else { + u64_counter!( + "apollo.router.lifecycle.query_planner.init", + "Rust query planner initialization", + 1, + "init.is_success" = true + ); + } +} + #[cfg(test)] mod tests { use std::fs; @@ -981,13 +1072,12 @@ mod tests { #[test(tokio::test)] async fn federation_versions() { async { - let _planner = BridgeQueryPlanner::new( - include_str!("../testdata/minimal_supergraph.graphql").into(), - Default::default(), - None, - ) - .await - .unwrap(); + let sdl = include_str!("../testdata/minimal_supergraph.graphql"); + let config = Arc::default(); + let schema = Schema::parse(sdl, &config).unwrap(); + let _planner = BridgeQueryPlanner::new(schema.into(), config, None, None) + .await + .unwrap(); assert_gauge!( "apollo.router.supergraph.federation", @@ -999,13 +1089,12 @@ mod tests { .await; async { - let _planner = BridgeQueryPlanner::new( - include_str!("../testdata/minimal_fed2_supergraph.graphql").into(), - Default::default(), - None, - ) - .await - .unwrap(); + let sdl = include_str!("../testdata/minimal_fed2_supergraph.graphql"); + let config = Arc::default(); + let schema = Schema::parse(sdl, &config).unwrap(); + let _planner = BridgeQueryPlanner::new(schema.into(), config, None, None) + .await + .unwrap(); assert_gauge!( "apollo.router.supergraph.federation", @@ -1019,10 +1108,10 @@ mod tests { #[test(tokio::test)] async fn empty_query_plan_should_be_a_planner_error() { - let schema = Schema::parse(EXAMPLE_SCHEMA, &Default::default()).unwrap(); + let schema = Arc::new(Schema::parse(EXAMPLE_SCHEMA, &Default::default()).unwrap()); let query = include_str!("testdata/unknown_introspection_query.graphql"); - let planner = BridgeQueryPlanner::new(EXAMPLE_SCHEMA.to_string(), Default::default(), None) + let planner = BridgeQueryPlanner::new(schema.clone(), Default::default(), None, None) .await .unwrap(); @@ -1121,10 +1210,10 @@ mod tests { configuration.supergraph.introspection = true; let configuration = Arc::new(configuration); - let planner = - BridgeQueryPlanner::new(EXAMPLE_SCHEMA.to_string(), configuration.clone(), None) - .await - .unwrap(); + let schema = Schema::parse(EXAMPLE_SCHEMA, &configuration).unwrap(); + let planner = BridgeQueryPlanner::new(schema.into(), configuration.clone(), None, None) + .await + .unwrap(); macro_rules! s { ($query: expr) => { @@ -1429,7 +1518,8 @@ mod tests { configuration.supergraph.introspection = true; let configuration = Arc::new(configuration); - let planner = BridgeQueryPlanner::new(schema.to_string(), configuration.clone(), None) + let schema = Schema::parse(schema, &configuration).unwrap(); + let planner = BridgeQueryPlanner::new(schema.into(), configuration.clone(), None, None) .await .unwrap(); @@ -1572,4 +1662,42 @@ mod tests { "planner" = "js" ); } + + #[test] + fn test_metric_rust_qp_initialization() { + metric_rust_qp_init(None); + assert_counter!( + "apollo.router.lifecycle.query_planner.init", + 1, + "init.is_success" = true + ); + metric_rust_qp_init(Some(UNSUPPORTED_CONTEXT)); + assert_counter!( + "apollo.router.lifecycle.query_planner.init", + 1, + "init.error_kind" = "context", + "init.is_success" = false + ); + metric_rust_qp_init(Some(UNSUPPORTED_OVERRIDES)); + assert_counter!( + "apollo.router.lifecycle.query_planner.init", + 1, + "init.error_kind" = "overrides", + "init.is_success" = false + ); + metric_rust_qp_init(Some(UNSUPPORTED_FED1)); + assert_counter!( + "apollo.router.lifecycle.query_planner.init", + 1, + "init.error_kind" = "fed1", + "init.is_success" = false + ); + metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)); + assert_counter!( + "apollo.router.lifecycle.query_planner.init", + 1, + "init.error_kind" = "internal", + "init.is_success" = false + ); + } } diff --git a/apollo-router/src/query_planner/bridge_query_planner_pool.rs b/apollo-router/src/query_planner/bridge_query_planner_pool.rs index 5da661c4d2..bb75124df1 100644 --- a/apollo-router/src/query_planner/bridge_query_planner_pool.rs +++ b/apollo-router/src/query_planner/bridge_query_planner_pool.rs @@ -1,5 +1,7 @@ use std::collections::HashMap; use std::num::NonZeroUsize; +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Instant; @@ -8,6 +10,9 @@ use async_channel::bounded; use async_channel::Sender; use futures::future::BoxFuture; use opentelemetry::metrics::MeterProvider; +use opentelemetry::metrics::ObservableGauge; +use opentelemetry::metrics::Unit; +use opentelemetry_api::metrics::Meter; use router_bridge::planner::Planner; use tokio::sync::oneshot; use tokio::task::JoinSet; @@ -19,6 +24,7 @@ use super::QueryPlanResult; use crate::error::QueryPlannerError; use crate::error::ServiceBuildError; use crate::metrics::meter_provider; +use crate::query_planner::PlannerMode; use crate::services::QueryPlannerRequest; use crate::services::QueryPlannerResponse; use crate::spec::Schema; @@ -28,7 +34,7 @@ static CHANNEL_SIZE: usize = 1_000; #[derive(Clone)] pub(crate) struct BridgeQueryPlannerPool { - planners: Vec>>, + js_planners: Vec>>, sender: Sender<( QueryPlannerRequest, oneshot::Sender>, @@ -36,23 +42,29 @@ pub(crate) struct BridgeQueryPlannerPool { schema: Arc, subgraph_schemas: Arc>>>, _pool_size_gauge: opentelemetry::metrics::ObservableGauge, + v8_heap_used: Arc, + _v8_heap_used_gauge: ObservableGauge, + v8_heap_total: Arc, + _v8_heap_total_gauge: ObservableGauge, } impl BridgeQueryPlannerPool { pub(crate) async fn new( - sdl: String, + schema: Arc, configuration: Arc, size: NonZeroUsize, ) -> Result { - Self::new_from_planners(Default::default(), sdl, configuration, size).await + Self::new_from_planners(Default::default(), schema, configuration, size).await } pub(crate) async fn new_from_planners( - old_planners: Vec>>, - schema: String, + old_js_planners: Vec>>, + schema: Arc, configuration: Arc, size: NonZeroUsize, ) -> Result { + let rust_planner = PlannerMode::maybe_rust(&schema, &configuration)?; + let mut join_set = JoinSet::new(); let (sender, receiver) = bounded::<( @@ -60,15 +72,16 @@ impl BridgeQueryPlannerPool { oneshot::Sender>, )>(CHANNEL_SIZE); - let mut old_planners_iterator = old_planners.into_iter(); + let mut old_js_planners_iterator = old_js_planners.into_iter(); (0..size.into()).for_each(|_| { - let sdl = schema.clone(); + let schema = schema.clone(); let configuration = configuration.clone(); + let rust_planner = rust_planner.clone(); - let old_planner = old_planners_iterator.next(); + let old_planner = old_js_planners_iterator.next(); join_set.spawn(async move { - BridgeQueryPlanner::new(sdl, configuration, old_planner).await + BridgeQueryPlanner::new(schema, configuration, old_planner, rust_planner).await }); }); @@ -80,15 +93,6 @@ impl BridgeQueryPlannerPool { bridge_query_planners.push(bridge_query_planner); } - let schema = bridge_query_planners - .first() - .ok_or_else(|| { - ServiceBuildError::QueryPlannerError(QueryPlannerError::PoolProcessing( - "There should be at least 1 Query Planner service in pool".to_string(), - )) - })? - .schema(); - let subgraph_schemas = bridge_query_planners .first() .ok_or_else(|| { @@ -98,7 +102,7 @@ impl BridgeQueryPlannerPool { })? .subgraph_schemas(); - let planners = bridge_query_planners + let planners: Vec<_> = bridge_query_planners .iter() .map(|p| p.planner().clone()) .collect(); @@ -124,23 +128,70 @@ impl BridgeQueryPlannerPool { }); } let sender_for_gauge = sender.clone(); - let pool_size_gauge = meter_provider() - .meter("apollo/router") + let meter = meter_provider().meter("apollo/router"); + let pool_size_gauge = meter .u64_observable_gauge("apollo.router.query_planning.queued") + .with_description("Number of queries waiting to be planned") + .with_unit(Unit::new("query")) .with_callback(move |m| m.observe(sender_for_gauge.len() as u64, &[])) .init(); + let (v8_heap_used, _v8_heap_used_gauge) = Self::create_heap_used_gauge(&meter); + let (v8_heap_total, _v8_heap_total_gauge) = Self::create_heap_total_gauge(&meter); + + // initialize v8 metrics + if let Some(bridge_query_planner) = planners.first().cloned() { + Self::get_v8_metrics( + bridge_query_planner, + v8_heap_used.clone(), + v8_heap_total.clone(), + ) + .await; + } + Ok(Self { - planners, + js_planners: planners, sender, schema, subgraph_schemas, _pool_size_gauge: pool_size_gauge, + v8_heap_used, + _v8_heap_used_gauge, + v8_heap_total, + _v8_heap_total_gauge, }) } + fn create_heap_used_gauge(meter: &Meter) -> (Arc, ObservableGauge) { + let current_heap_used = Arc::new(AtomicU64::new(0)); + let current_heap_used_for_gauge = current_heap_used.clone(); + let heap_used_gauge = meter + .u64_observable_gauge("apollo.router.v8.heap.used") + .with_description("V8 heap used, in bytes") + .with_unit(Unit::new("By")) + .with_callback(move |i| { + i.observe(current_heap_used_for_gauge.load(Ordering::SeqCst), &[]) + }) + .init(); + (current_heap_used, heap_used_gauge) + } + + fn create_heap_total_gauge(meter: &Meter) -> (Arc, ObservableGauge) { + let current_heap_total = Arc::new(AtomicU64::new(0)); + let current_heap_total_for_gauge = current_heap_total.clone(); + let heap_total_gauge = meter + .u64_observable_gauge("apollo.router.v8.heap.total") + .with_description("V8 heap total, in bytes") + .with_unit(Unit::new("By")) + .with_callback(move |i| { + i.observe(current_heap_total_for_gauge.load(Ordering::SeqCst), &[]) + }) + .init(); + (current_heap_total, heap_total_gauge) + } + pub(crate) fn planners(&self) -> Vec>> { - self.planners.clone() + self.js_planners.clone() } pub(crate) fn schema(&self) -> Arc { @@ -152,6 +203,18 @@ impl BridgeQueryPlannerPool { ) -> Arc>>> { self.subgraph_schemas.clone() } + + async fn get_v8_metrics( + planner: Arc>, + v8_heap_used: Arc, + v8_heap_total: Arc, + ) { + let metrics = planner.get_heap_statistics().await; + if let Ok(metrics) = metrics { + v8_heap_used.store(metrics.heap_used, Ordering::SeqCst); + v8_heap_total.store(metrics.heap_total, Ordering::SeqCst); + } + } } impl tower::Service for BridgeQueryPlannerPool { @@ -178,6 +241,20 @@ impl tower::Service for BridgeQueryPlannerPool { let (response_sender, response_receiver) = oneshot::channel(); let sender = self.sender.clone(); + let get_metrics_future = + if let Some(bridge_query_planner) = self.js_planners.first().cloned() { + let v8_heap_used = self.v8_heap_used.clone(); + let v8_heap_total = self.v8_heap_total.clone(); + + Some(Self::get_v8_metrics( + bridge_query_planner, + v8_heap_used, + v8_heap_total, + )) + } else { + None + }; + Box::pin(async move { let start = Instant::now(); let _ = sender.send((req, response_sender)).await; @@ -192,7 +269,73 @@ impl tower::Service for BridgeQueryPlannerPool { start.elapsed().as_secs_f64() ); + if let Some(f) = get_metrics_future { + // execute in a separate task to avoid blocking the request + tokio::task::spawn(f); + } + res }) } } + +#[cfg(test)] + +mod tests { + use opentelemetry_sdk::metrics::data::Gauge; + + use super::*; + use crate::metrics::FutureMetricsExt; + use crate::spec::Query; + use crate::Context; + + #[tokio::test] + async fn test_v8_metrics() { + let sdl = include_str!("../testdata/supergraph.graphql"); + let config = Arc::default(); + let schema = Arc::new(Schema::parse(sdl, &config).unwrap()); + + async move { + let mut pool = BridgeQueryPlannerPool::new( + schema.clone(), + config.clone(), + NonZeroUsize::new(2).unwrap(), + ) + .await + .unwrap(); + let query = "query { me { name } }".to_string(); + + let doc = Query::parse_document(&query, None, &schema, &config).unwrap(); + let context = Context::new(); + context.extensions().with_lock(|mut lock| lock.insert(doc)); + + pool.call(QueryPlannerRequest::new(query, None, context)) + .await + .unwrap(); + + let metrics = crate::metrics::collect_metrics(); + let heap_used = metrics.find("apollo.router.v8.heap.used").unwrap(); + let heap_total = metrics.find("apollo.router.v8.heap.total").unwrap(); + + println!( + "got heap_used: {:?}, heap_total: {:?}", + heap_used + .data + .as_any() + .downcast_ref::>() + .unwrap() + .data_points[0] + .value, + heap_total + .data + .as_any() + .downcast_ref::>() + .unwrap() + .data_points[0] + .value + ); + } + .with_metrics() + .await; + } +} diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index db4923f17c..20b2f75342 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -24,7 +24,9 @@ use tower_service::Service; use tracing::Instrument; use super::fetch::QueryHash; +use crate::cache::estimate_size; use crate::cache::storage::InMemoryCache; +use crate::cache::storage::ValueType; use crate::cache::DeduplicatingCache; use crate::error::CacheResolverError; use crate::error::QueryPlannerError; @@ -60,6 +62,7 @@ pub(crate) enum ConfigMode { // for now use the JS config as it expected to be identical to the Rust one Rust(Arc), Both(Arc), + BothBestEffort(Arc), Js(Arc), } @@ -133,6 +136,9 @@ where crate::configuration::QueryPlannerMode::Both => { ConfigMode::Both(Arc::new(configuration.js_query_planner_config())) } + crate::configuration::QueryPlannerMode::BothBestEffort => { + ConfigMode::BothBestEffort(Arc::new(configuration.js_query_planner_config())) + } }; Ok(Self { cache, @@ -230,10 +236,13 @@ where } else { cache_keys.len() }; - tracing::info!( - "warming up the query plan cache with {} queries, this might take a while", - capacity - ); + + if capacity > 0 { + tracing::info!( + "warming up the query plan cache with {} queries, this might take a while", + capacity + ); + } // persisted queries are added first because they should get a lower priority in the LRU cache, // since a lot of them may be there to support old clients @@ -684,6 +693,17 @@ pub(crate) struct WarmUpCachingQueryKey { pub(crate) introspection: bool, } +impl ValueType for Result> { + fn estimated_size(&self) -> Option { + match self { + Ok(QueryPlannerContent::Plan { plan }) => Some(plan.estimated_size()), + Ok(QueryPlannerContent::Response { response }) => Some(estimate_size(response)), + Ok(QueryPlannerContent::IntrospectionDisabled) => None, + Err(e) => Some(estimate_size(e)), + } + } +} + #[cfg(test)] mod tests { use mockall::mock; @@ -835,6 +855,7 @@ mod tests { .into(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), }; let qp_content = QueryPlannerContent::Plan { plan: Arc::new(query_plan), diff --git a/apollo-router/src/query_planner/dual_query_planner.rs b/apollo-router/src/query_planner/dual_query_planner.rs index 2360b17703..6a880cf538 100644 --- a/apollo-router/src/query_planner/dual_query_planner.rs +++ b/apollo-router/src/query_planner/dual_query_planner.rs @@ -25,6 +25,8 @@ use crate::query_planner::bridge_query_planner::metric_query_planning_plan_durat use crate::query_planner::bridge_query_planner::RUST_QP_MODE; use crate::query_planner::convert::convert_root_query_plan_node; use crate::query_planner::render_diff; +use crate::query_planner::rewrites::DataRewrite; +use crate::query_planner::selection::Selection; use crate::query_planner::DeferredNode; use crate::query_planner::PlanNode; use crate::query_planner::Primary; @@ -138,19 +140,20 @@ impl BothModeComparisonJob { (Ok(js_plan), Ok(rust_plan)) => { let js_root_node = &js_plan.query_plan.node; let rust_root_node = convert_root_query_plan_node(rust_plan); - is_matched = opt_plan_node_matches(js_root_node, &rust_root_node); - if is_matched { - tracing::debug!("JS and Rust query plans match{operation_desc}! 🎉"); - } else { - tracing::debug!("JS v.s. Rust query plan mismatch{operation_desc}"); - if let Some(formatted) = &js_plan.formatted_query_plan { + let match_result = opt_plan_node_matches(js_root_node, &rust_root_node); + is_matched = match_result.is_ok(); + match match_result { + Ok(_) => tracing::debug!("JS and Rust query plans match{operation_desc}! 🎉"), + Err(err) => { + tracing::debug!("JS v.s. Rust query plan mismatch{operation_desc}"); + tracing::debug!("{}", err.full_description()); tracing::debug!( "Diff of formatted plans:\n{}", - render_diff(&diff::lines(formatted, &rust_plan.to_string())) + diff_plan(js_plan, rust_plan) ); + tracing::trace!("JS query plan Debug: {js_root_node:#?}"); + tracing::trace!("Rust query plan Debug: {rust_root_node:#?}"); } - tracing::trace!("JS query plan Debug: {js_root_node:#?}"); - tracing::trace!("Rust query plan Debug: {rust_root_node:#?}"); } } } @@ -168,7 +171,62 @@ impl BothModeComparisonJob { // Specific comparison functions -fn fetch_node_matches(this: &FetchNode, other: &FetchNode) -> bool { +pub struct MatchFailure { + description: String, + backtrace: std::backtrace::Backtrace, +} + +impl MatchFailure { + pub fn description(&self) -> String { + self.description.clone() + } + + pub fn full_description(&self) -> String { + format!("{}\n\nBacktrace:\n{}", self.description, self.backtrace) + } + + fn new(description: String) -> MatchFailure { + MatchFailure { + description, + backtrace: std::backtrace::Backtrace::force_capture(), + } + } + + fn add_description(self: MatchFailure, description: &str) -> MatchFailure { + MatchFailure { + description: format!("{}\n{}", self.description, description), + backtrace: self.backtrace, + } + } +} + +macro_rules! check_match { + ($pred:expr) => { + if !$pred { + return Err(MatchFailure::new(format!( + "mismatch at {}", + stringify!($pred) + ))); + } + }; +} + +macro_rules! check_match_eq { + ($a:expr, $b:expr) => { + if $a != $b { + let message = format!( + "mismatch between {} and {}:\nleft: {:?}\nright: {:?}", + stringify!($a), + stringify!($b), + $a, + $b + ); + return Err(MatchFailure::new(message)); + } + }; +} + +fn fetch_node_matches(this: &FetchNode, other: &FetchNode) -> Result<(), MatchFailure> { let FetchNode { service_name, requires, @@ -183,16 +241,18 @@ fn fetch_node_matches(this: &FetchNode, other: &FetchNode) -> bool { schema_aware_hash: _, // ignored authorization, } = this; - *service_name == other.service_name - && *requires == other.requires - && vec_matches_sorted(variable_usages, &other.variable_usages) - && *operation_kind == other.operation_kind - && *id == other.id - && *input_rewrites == other.input_rewrites - && *output_rewrites == other.output_rewrites - && *context_rewrites == other.context_rewrites - && *authorization == other.authorization - && operation_matches(operation, &other.operation) + + check_match_eq!(*service_name, other.service_name); + check_match_eq!(*operation_kind, other.operation_kind); + check_match_eq!(*id, other.id); + check_match_eq!(*authorization, other.authorization); + check_match!(same_selection_set_sorted(requires, &other.requires)); + check_match!(vec_matches_sorted(variable_usages, &other.variable_usages)); + check_match!(same_rewrites(input_rewrites, &other.input_rewrites)); + check_match!(same_rewrites(output_rewrites, &other.output_rewrites)); + check_match!(same_rewrites(context_rewrites, &other.context_rewrites)); + operation_matches(operation, &other.operation)?; + Ok(()) } fn subscription_primary_matches(this: &SubscriptionNode, other: &SubscriptionNode) -> bool { @@ -211,22 +271,27 @@ fn subscription_primary_matches(this: &SubscriptionNode, other: &SubscriptionNod && *operation_kind == other.operation_kind && *input_rewrites == other.input_rewrites && *output_rewrites == other.output_rewrites - && operation_matches(operation, &other.operation) + && operation_matches(operation, &other.operation).is_ok() } -fn operation_matches(this: &SubgraphOperation, other: &SubgraphOperation) -> bool { +fn operation_matches( + this: &SubgraphOperation, + other: &SubgraphOperation, +) -> Result<(), MatchFailure> { let this_ast = match ast::Document::parse(this.as_serialized(), "this_operation.graphql") { Ok(document) => document, Err(_) => { - // TODO: log error - return false; + return Err(MatchFailure::new( + "Failed to parse this operation".to_string(), + )); } }; let other_ast = match ast::Document::parse(other.as_serialized(), "other_operation.graphql") { Ok(document) => document, Err(_) => { - // TODO: log error - return false; + return Err(MatchFailure::new( + "Failed to parse other operation".to_string(), + )); } }; same_ast_document(&this_ast, &other_ast) @@ -235,20 +300,49 @@ fn operation_matches(this: &SubgraphOperation, other: &SubgraphOperation) -> boo // The rest is calling the comparison functions above instead of `PartialEq`, // but otherwise behave just like `PartialEq`: -// Note: Reexported under `apollo_compiler::_private` -pub fn plan_matches(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> bool { +// Note: Reexported under `apollo_router::_private` +pub fn plan_matches(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> Result<(), MatchFailure> { let js_root_node = &js_plan.query_plan.node; let rust_root_node = convert_root_query_plan_node(rust_plan); opt_plan_node_matches(js_root_node, &rust_root_node) } +pub fn diff_plan(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> String { + let js_root_node = &js_plan.query_plan.node; + let rust_root_node = convert_root_query_plan_node(rust_plan); + + match (js_root_node, rust_root_node) { + (None, None) => String::from(""), + (None, Some(rust)) => { + let rust = &format!("{rust:#?}"); + let differences = diff::lines("", rust); + render_diff(&differences) + } + (Some(js), None) => { + let js = &format!("{js:#?}"); + let differences = diff::lines(js, ""); + render_diff(&differences) + } + (Some(js), Some(rust)) => { + let rust = &format!("{rust:#?}"); + let js = &format!("{js:#?}"); + let differences = diff::lines(js, rust); + render_diff(&differences) + } + } +} + fn opt_plan_node_matches( this: &Option>, other: &Option>, -) -> bool { +) -> Result<(), MatchFailure> { match (this, other) { - (None, None) => true, - (None, Some(_)) | (Some(_), None) => false, + (None, None) => Ok(()), + (None, Some(_)) | (Some(_), None) => Err(MatchFailure::new(format!( + "mismatch at opt_plan_node_matches\nleft: {:?}\nright: {:?}", + this.is_some(), + other.is_some() + ))), (Some(this), Some(other)) => plan_node_matches(this.borrow(), other.borrow()), } } @@ -258,6 +352,22 @@ fn vec_matches(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) -> bool && std::iter::zip(this, other).all(|(this, other)| item_matches(this, other)) } +fn vec_matches_result( + this: &[T], + other: &[T], + item_matches: impl Fn(&T, &T) -> Result<(), MatchFailure>, +) -> Result<(), MatchFailure> { + check_match_eq!(this.len(), other.len()); + std::iter::zip(this, other) + .enumerate() + .try_fold((), |_acc, (index, (this, other))| { + item_matches(this, other) + .map_err(|err| err.add_description(&format!("under item[{}]", index))) + })?; + assert!(vec_matches(this, other, |a, b| item_matches(a, b).is_ok())); + Ok(()) +} + fn vec_matches_sorted(this: &[T], other: &[T]) -> bool { let mut this_sorted = this.to_owned(); let mut other_sorted = other.to_owned(); @@ -293,16 +403,65 @@ fn vec_matches_as_set(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) }) } -fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> bool { +fn vec_matches_result_as_set( + this: &[T], + other: &[T], + item_matches: impl Fn(&T, &T) -> bool, +) -> Result<(), MatchFailure> { + // Set-inclusion test in both directions + check_match_eq!(this.len(), other.len()); + for (index, this_node) in this.iter().enumerate() { + if !other + .iter() + .any(|other_node| item_matches(this_node, other_node)) + { + return Err(MatchFailure::new(format!( + "mismatched set: missing item[{}]", + index + ))); + } + } + for other_node in other.iter() { + if !this + .iter() + .any(|this_node| item_matches(this_node, other_node)) + { + return Err(MatchFailure::new( + "mismatched set: extra item found".to_string(), + )); + } + } + assert!(vec_matches_as_set(this, other, item_matches)); + Ok(()) +} + +fn option_to_string(name: Option) -> String { + name.map_or_else(|| "".to_string(), |name| name.to_string()) +} + +fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> Result<(), MatchFailure> { match (this, other) { (PlanNode::Sequence { nodes: this }, PlanNode::Sequence { nodes: other }) => { - vec_matches(this, other, plan_node_matches) + vec_matches_result(this, other, plan_node_matches) + .map_err(|err| err.add_description("under Sequence node"))?; } (PlanNode::Parallel { nodes: this }, PlanNode::Parallel { nodes: other }) => { - vec_matches_as_set(this, other, plan_node_matches) + vec_matches_result_as_set(this, other, |a, b| plan_node_matches(a, b).is_ok()) + .map_err(|err| err.add_description("under Parallel node"))?; + } + (PlanNode::Fetch(this), PlanNode::Fetch(other)) => { + fetch_node_matches(this, other).map_err(|err| { + err.add_description(&format!( + "under Fetch node (operation name: {})", + option_to_string(this.operation_name.as_ref()) + )) + })?; + } + (PlanNode::Flatten(this), PlanNode::Flatten(other)) => { + flatten_node_matches(this, other).map_err(|err| { + err.add_description(&format!("under Flatten node (path: {})", this.path)) + })?; } - (PlanNode::Fetch(this), PlanNode::Fetch(other)) => fetch_node_matches(this, other), - (PlanNode::Flatten(this), PlanNode::Flatten(other)) => flatten_node_matches(this, other), ( PlanNode::Defer { primary, deferred }, PlanNode::Defer { @@ -310,8 +469,8 @@ fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> bool { deferred: other_deferred, }, ) => { - defer_primary_node_matches(primary, other_primary) - && vec_matches(deferred, other_deferred, deferred_node_matches) + check_match!(defer_primary_node_matches(primary, other_primary)); + check_match!(vec_matches(deferred, other_deferred, deferred_node_matches)); } ( PlanNode::Subscription { primary, rest }, @@ -320,8 +479,9 @@ fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> bool { rest: other_rest, }, ) => { - subscription_primary_matches(primary, other_primary) - && opt_plan_node_matches(rest, other_rest) + check_match!(subscription_primary_matches(primary, other_primary)); + opt_plan_node_matches(rest, other_rest) + .map_err(|err| err.add_description("under Subscription"))?; } ( PlanNode::Condition { @@ -335,17 +495,25 @@ fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> bool { else_clause: other_else_clause, }, ) => { - condition == other_condition - && opt_plan_node_matches(if_clause, other_if_clause) - && opt_plan_node_matches(else_clause, other_else_clause) + check_match_eq!(condition, other_condition); + opt_plan_node_matches(if_clause, other_if_clause) + .map_err(|err| err.add_description("under Condition node (if_clause)"))?; + opt_plan_node_matches(else_clause, other_else_clause) + .map_err(|err| err.add_description("under Condition node (else_clause)"))?; } - _ => false, - } + _ => { + return Err(MatchFailure::new(format!( + "mismatched plan node types\nleft: {:?}\nright: {:?}", + this, other + ))) + } + }; + Ok(()) } fn defer_primary_node_matches(this: &Primary, other: &Primary) -> bool { let Primary { subselection, node } = this; - *subselection == other.subselection && opt_plan_node_matches(node, &other.node) + *subselection == other.subselection && opt_plan_node_matches(node, &other.node).is_ok() } fn deferred_node_matches(this: &DeferredNode, other: &DeferredNode) -> bool { @@ -360,18 +528,101 @@ fn deferred_node_matches(this: &DeferredNode, other: &DeferredNode) -> bool { && *label == other.label && *query_path == other.query_path && *subselection == other.subselection - && opt_plan_node_matches(node, &other.node) + && opt_plan_node_matches(node, &other.node).is_ok() } -fn flatten_node_matches(this: &FlattenNode, other: &FlattenNode) -> bool { +fn flatten_node_matches(this: &FlattenNode, other: &FlattenNode) -> Result<(), MatchFailure> { let FlattenNode { path, node } = this; - *path == other.path && plan_node_matches(node, &other.node) + check_match_eq!(*path, other.path); + plan_node_matches(node, &other.node) +} + +// Copied and modified from `apollo_federation::operation::SelectionKey` +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +enum SelectionKey { + Field { + /// The field alias (if specified) or field name in the resulting selection set. + response_name: Name, + directives: ast::DirectiveList, + }, + FragmentSpread { + /// The name of the fragment. + fragment_name: Name, + directives: ast::DirectiveList, + }, + InlineFragment { + /// The optional type condition of the fragment. + type_condition: Option, + directives: ast::DirectiveList, + }, +} + +fn get_selection_key(selection: &Selection) -> SelectionKey { + match selection { + Selection::Field(field) => SelectionKey::Field { + response_name: field.response_name().clone(), + directives: Default::default(), + }, + Selection::InlineFragment(fragment) => SelectionKey::InlineFragment { + type_condition: fragment.type_condition.clone(), + directives: Default::default(), + }, + } +} + +fn hash_value(x: &T) -> u64 { + let mut hasher = DefaultHasher::new(); + x.hash(&mut hasher); + hasher.finish() +} + +fn hash_selection_key(selection: &Selection) -> u64 { + hash_value(&get_selection_key(selection)) +} + +fn same_selection(x: &Selection, y: &Selection) -> bool { + let x_key = get_selection_key(x); + let y_key = get_selection_key(y); + if x_key != y_key { + return false; + } + let x_selections = x.selection_set(); + let y_selections = y.selection_set(); + match (x_selections, y_selections) { + (Some(x), Some(y)) => same_selection_set_sorted(x, y), + (None, None) => true, + _ => false, + } +} + +fn same_selection_set_sorted(x: &[Selection], y: &[Selection]) -> bool { + fn sorted_by_selection_key(s: &[Selection]) -> Vec<&Selection> { + let mut sorted: Vec<&Selection> = s.iter().collect(); + sorted.sort_by_key(|x| hash_selection_key(x)); + sorted + } + + if x.len() != y.len() { + return false; + } + sorted_by_selection_key(x) + .into_iter() + .zip(sorted_by_selection_key(y)) + .all(|(x, y)| same_selection(x, y)) +} + +fn same_rewrites(x: &Option>, y: &Option>) -> bool { + match (x, y) { + (None, None) => true, + (Some(x), Some(y)) => vec_matches_as_set(x, y, |a, b| a == b), + _ => false, + } } //================================================================================================== // AST comparison functions -fn same_ast_document(x: &ast::Document, y: &ast::Document) -> bool { +fn same_ast_document(x: &ast::Document, y: &ast::Document) -> Result<(), MatchFailure> { fn split_definitions( doc: &ast::Document, ) -> ( @@ -403,57 +654,57 @@ fn same_ast_document(x: &ast::Document, y: &ast::Document) -> bool { "Different number of operation definitions" ); - x_ops.len() == y_ops.len() - && x_ops - .iter() - .zip(y_ops.iter()) - .all(|(x_op, y_op)| same_ast_operation_definition(x_op, y_op)) - && x_frags.len() == y_frags.len() - && x_frags - .iter() - .zip(y_frags.iter()) - .all(|(x_frag, y_frag)| same_ast_fragment_definition(x_frag, y_frag)) + check_match_eq!(x_ops.len(), y_ops.len()); + x_ops + .iter() + .zip(y_ops.iter()) + .try_fold((), |_, (x_op, y_op)| { + same_ast_operation_definition(x_op, y_op) + .map_err(|err| err.add_description("under operation definition")) + })?; + check_match_eq!(x_frags.len(), y_frags.len()); + x_frags + .iter() + .zip(y_frags.iter()) + .try_fold((), |_, (x_frag, y_frag)| { + same_ast_fragment_definition(x_frag, y_frag) + .map_err(|err| err.add_description("under fragment definition")) + })?; + Ok(()) } fn same_ast_operation_definition( x: &ast::OperationDefinition, y: &ast::OperationDefinition, -) -> bool { +) -> Result<(), MatchFailure> { // Note: Operation names are ignored, since parallel fetches may have different names. - x.operation_type == y.operation_type - && vec_matches_sorted_by(&x.variables, &y.variables, |x, y| x.name.cmp(&y.name)) - && x.directives == y.directives - && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set) + check_match_eq!(x.operation_type, y.operation_type); + check_match!(vec_matches_sorted_by(&x.variables, &y.variables, |x, y| x + .name + .cmp(&y.name))); + check_match_eq!(x.directives, y.directives); + check_match!(same_ast_selection_set_sorted( + &x.selection_set, + &y.selection_set + )); + Ok(()) } -fn same_ast_fragment_definition(x: &ast::FragmentDefinition, y: &ast::FragmentDefinition) -> bool { - x.name == y.name - && x.type_condition == y.type_condition - && x.directives == y.directives - && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set) +fn same_ast_fragment_definition( + x: &ast::FragmentDefinition, + y: &ast::FragmentDefinition, +) -> Result<(), MatchFailure> { + check_match_eq!(x.name, y.name); + check_match_eq!(x.type_condition, y.type_condition); + check_match_eq!(x.directives, y.directives); + check_match!(same_ast_selection_set_sorted( + &x.selection_set, + &y.selection_set + )); + Ok(()) } -// Copied and modified from `apollo_federation::operation::SelectionKey` -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub(crate) enum SelectionKey { - Field { - /// The field alias (if specified) or field name in the resulting selection set. - response_name: Name, - directives: ast::DirectiveList, - }, - FragmentSpread { - /// The name of the fragment. - fragment_name: Name, - directives: ast::DirectiveList, - }, - InlineFragment { - /// The optional type condition of the fragment. - type_condition: Option, - directives: ast::DirectiveList, - }, -} - -fn get_selection_key(selection: &ast::Selection) -> SelectionKey { +fn get_ast_selection_key(selection: &ast::Selection) -> SelectionKey { match selection { ast::Selection::Field(field) => SelectionKey::Field { response_name: field.response_name().clone(), @@ -473,7 +724,7 @@ fn get_selection_key(selection: &ast::Selection) -> SelectionKey { use std::ops::Not; /// Get the sub-selections of a selection. -fn get_selection_set(selection: &ast::Selection) -> Option<&Vec> { +fn get_ast_selection_set(selection: &ast::Selection) -> Option<&Vec> { match selection { ast::Selection::Field(field) => field .selection_set @@ -486,13 +737,13 @@ fn get_selection_set(selection: &ast::Selection) -> Option<&Vec> } fn same_ast_selection(x: &ast::Selection, y: &ast::Selection) -> bool { - let x_key = get_selection_key(x); - let y_key = get_selection_key(y); + let x_key = get_ast_selection_key(x); + let y_key = get_ast_selection_key(y); if x_key != y_key { return false; } - let x_selections = get_selection_set(x); - let y_selections = get_selection_set(y); + let x_selections = get_ast_selection_set(x); + let y_selections = get_ast_selection_set(y); match (x_selections, y_selections) { (Some(x), Some(y)) => same_ast_selection_set_sorted(x, y), (None, None) => true, @@ -500,20 +751,14 @@ fn same_ast_selection(x: &ast::Selection, y: &ast::Selection) -> bool { } } -fn hash_value(x: &T) -> u64 { - let mut hasher = DefaultHasher::new(); - x.hash(&mut hasher); - hasher.finish() -} - -fn hash_selection_key(selection: &ast::Selection) -> u64 { - hash_value(&get_selection_key(selection)) +fn hash_ast_selection_key(selection: &ast::Selection) -> u64 { + hash_value(&get_ast_selection_key(selection)) } fn same_ast_selection_set_sorted(x: &[ast::Selection], y: &[ast::Selection]) -> bool { fn sorted_by_selection_key(s: &[ast::Selection]) -> Vec<&ast::Selection> { let mut sorted: Vec<&ast::Selection> = s.iter().collect(); - sorted.sort_by_key(|x| hash_selection_key(x)); + sorted.sort_by_key(|x| hash_ast_selection_key(x)); sorted } @@ -536,7 +781,7 @@ mod ast_comparison_tests { let op_y = r#"query($qv1: Int!, $qv2: String!) { x(arg1: $qv1, arg2: $qv2) }"#; let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y)); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); } #[test] @@ -553,7 +798,7 @@ mod ast_comparison_tests { "#; let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y)); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); } #[test] @@ -562,7 +807,7 @@ mod ast_comparison_tests { let op_y = r#"{ y x { z w } }"#; let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y)); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); } #[test] @@ -571,6 +816,6 @@ mod ast_comparison_tests { let op_y = r#"{ q { ...f1 ...f2 } } fragment f2 on T { w z } fragment f1 on T { x y }"#; let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y)); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); } } diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 05b871e60e..47069283ca 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -582,6 +582,7 @@ impl FetchNode { errors.push(error); } } else { + error.path = Some(current_dir.clone()); errors.push(error); } } @@ -639,13 +640,17 @@ impl FetchNode { .errors .into_iter() .map(|error| { - let path = error.path.as_ref().map(|path| { - Path::from_iter(current_slice.iter().chain(path.iter()).cloned()) - }); + let path = error + .path + .as_ref() + .map(|path| { + Path::from_iter(current_slice.iter().chain(path.iter()).cloned()) + }) + .unwrap_or_else(|| current_dir.clone()); Error { locations: error.locations, - path, + path: Some(path), message: error.message, extensions: error.extensions, } diff --git a/apollo-router/src/query_planner/plan.rs b/apollo-router/src/query_planner/plan.rs index bf4471e23b..447adb7ba7 100644 --- a/apollo-router/src/query_planner/plan.rs +++ b/apollo-router/src/query_planner/plan.rs @@ -1,3 +1,5 @@ +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; use std::sync::Arc; use apollo_compiler::validation::Valid; @@ -9,6 +11,7 @@ use serde::Serialize; pub(crate) use self::fetch::OperationKind; use super::fetch; use super::subscription::SubscriptionNode; +use crate::cache::estimate_size; use crate::configuration::Batching; use crate::error::CacheResolverError; use crate::error::ValidationErrors; @@ -42,6 +45,10 @@ pub struct QueryPlan { pub(crate) formatted_query_plan: Option>, pub(crate) query: Arc, pub(crate) query_metrics: OperationLimits, + + /// The estimated size in bytes of the query plan + #[serde(default)] + pub(crate) estimated_size: Arc, } /// This default impl is useful for test users @@ -64,6 +71,7 @@ impl QueryPlan { formatted_query_plan: Default::default(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), } } } @@ -89,6 +97,14 @@ impl QueryPlan { self.root .query_hashes(batching_config, operation, variables, &self.query) } + + pub(crate) fn estimated_size(&self) -> usize { + if self.estimated_size.load(Ordering::SeqCst) == 0 { + self.estimated_size + .store(estimate_size(self), Ordering::SeqCst); + } + self.estimated_size.load(Ordering::SeqCst) + } } /// Query plans are composed of a set of nodes. @@ -607,3 +623,17 @@ pub(crate) struct DeferredNode { pub(crate) struct Depends { pub(crate) id: String, } + +#[cfg(test)] +mod test { + use crate::query_planner::QueryPlan; + + #[test] + fn test_estimated_size() { + let query_plan = QueryPlan::fake_builder().build(); + let size1 = query_plan.estimated_size(); + let size2 = query_plan.estimated_size(); + assert!(size1 > 0); + assert_eq!(size1, size2); + } +} diff --git a/apollo-router/src/query_planner/selection.rs b/apollo-router/src/query_planner/selection.rs index b6dd46ffa4..e2c5e9b013 100644 --- a/apollo-router/src/query_planner/selection.rs +++ b/apollo-router/src/query_planner/selection.rs @@ -23,6 +23,17 @@ pub(crate) enum Selection { InlineFragment(InlineFragment), } +impl Selection { + pub(crate) fn selection_set(&self) -> Option<&[Selection]> { + match self { + Selection::Field(Field { selections, .. }) => selections.as_deref(), + Selection::InlineFragment(InlineFragment { selections, .. }) => { + Some(selections.as_slice()) + } + } + } +} + /// The field that is used #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] @@ -39,6 +50,13 @@ pub(crate) struct Field { pub(crate) selections: Option>, } +impl Field { + // Mirroring `apollo_compiler::Field::response_name` + pub(crate) fn response_name(&self) -> &Name { + self.alias.as_ref().unwrap_or(&self.name) + } +} + /// An inline fragment. #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] diff --git a/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap b/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap index d49c351866..16ba934103 100644 --- a/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap +++ b/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap @@ -15,7 +15,7 @@ Fetch( output_rewrites: None, context_rewrites: None, schema_aware_hash: QueryHash( - "a4ab3ffe0fd7863aea8cd1e85d019d2c64ec0351d62f9759bed3c9dc707ea315", + "5c5036eef33484e505dd5a8666fd0a802e60d830964a4dbbf662526398563ffd", ), authorization: CacheKeyMetadata { is_authenticated: false, diff --git a/apollo-router/src/query_planner/tests.rs b/apollo-router/src/query_planner/tests.rs index fd7fb6d8b6..cfd44d6d08 100644 --- a/apollo-router/src/query_planner/tests.rs +++ b/apollo-router/src/query_planner/tests.rs @@ -87,6 +87,7 @@ async fn mock_subgraph_service_withf_panics_should_be_reported_as_service_closed referenced_fields_by_type: Default::default(), } .into(), + estimated_size: Default::default(), }; let mut mock_products_service = plugin::test::MockSubgraphService::new(); @@ -142,6 +143,7 @@ async fn fetch_includes_operation_name() { .into(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), }; let succeeded: Arc = Default::default(); @@ -202,6 +204,7 @@ async fn fetch_makes_post_requests() { .into(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), }; let succeeded: Arc = Default::default(); @@ -329,7 +332,8 @@ async fn defer() { referenced_fields_by_type: Default::default(), }.into(), query: Arc::new(Query::empty()), - query_metrics: Default::default() + query_metrics: Default::default(), + estimated_size: Default::default(), }; let mut mock_x_service = plugin::test::MockSubgraphService::new(); @@ -460,6 +464,7 @@ async fn defer_if_condition() { ), formatted_query_plan: None, query_metrics: Default::default(), + estimated_size: Default::default(), }; let mocked_accounts = MockSubgraph::builder() @@ -642,6 +647,7 @@ async fn dependent_mutations() { .into(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), }; let mut mock_a_service = plugin::test::MockSubgraphService::new(); @@ -1826,6 +1832,7 @@ fn broken_plan_does_not_panic() { .into(), query: Arc::new(Query::empty()), query_metrics: Default::default(), + estimated_size: Default::default(), }; let subgraph_schema = apollo_compiler::Schema::parse_and_validate(subgraph_schema, "").unwrap(); let mut subgraph_schemas = HashMap::new(); diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index fe6dca8fb8..4a5ce3f888 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use std::io; use std::sync::Arc; -use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; use axum::response::IntoResponse; use http::StatusCode; @@ -52,9 +51,6 @@ use crate::spec::Schema; use crate::ListenAddr; pub(crate) const STARTING_SPAN_NAME: &str = "starting"; -pub(crate) const OVERRIDE_LABEL_ARG_NAME: &str = "overrideLabel"; -pub(crate) const CONTEXT_DIRECTIVE: &str = "context"; -pub(crate) const JOIN_FIELD: &str = "join__field"; #[derive(Clone)] /// A path and a handler to be exposed as a web_endpoint for plugins @@ -141,7 +137,7 @@ pub(crate) trait RouterSuperServiceFactory: Send + Sync + 'static { &'a mut self, is_telemetry_disabled: bool, configuration: Arc, - schema: String, + schema: Arc, previous_router: Option<&'a Self::RouterFactory>, extra_plugins: Option)>>, ) -> Result; @@ -159,7 +155,7 @@ impl RouterSuperServiceFactory for YamlRouterFactory { &'a mut self, _is_telemetry_disabled: bool, configuration: Arc, - schema: String, + schema: Arc, previous_router: Option<&'a Self::RouterFactory>, extra_plugins: Option)>>, ) -> Result { @@ -179,17 +175,13 @@ impl RouterSuperServiceFactory for YamlRouterFactory { .get("telemetry") .cloned(); if let Some(plugin_config) = &mut telemetry_config { - inject_schema_id(Some(&Schema::schema_id(&schema)), plugin_config); + inject_schema_id(Some(&schema.schema_id), plugin_config); match factory .create_instance( PluginInit::builder() .config(plugin_config.clone()) - .supergraph_sdl(Arc::new(schema.clone())) - .supergraph_schema(Arc::new( - apollo_compiler::validation::Valid::assume_valid( - apollo_compiler::Schema::new(), - ), - )) + .supergraph_sdl(schema.raw_sdl.clone()) + .supergraph_schema(Arc::new(schema.supergraph_schema().clone())) .notify(configuration.notify.clone()) .build(), ) @@ -227,7 +219,7 @@ impl YamlRouterFactory { async fn inner_create<'a>( &'a mut self, configuration: Arc, - schema: String, + schema: Arc, previous_router: Option<&'a RouterCreator>, initial_telemetry_plugin: Option>, extra_plugins: Option)>>, @@ -242,13 +234,6 @@ impl YamlRouterFactory { ) .await?; - // Don't let the router start in experimental_query_planner_mode and - // unimplemented Rust QP features. - can_use_with_experimental_query_planner( - configuration.clone(), - supergraph_creator.schema(), - )?; - // Instantiate the parser here so we can use it to warm up the planner below let query_analysis_layer = QueryAnalysisLayer::new(supergraph_creator.schema(), Arc::clone(&configuration)).await; @@ -302,7 +287,7 @@ impl YamlRouterFactory { pub(crate) async fn inner_create_supergraph<'a>( &'a mut self, configuration: Arc, - schema: String, + schema: Arc, previous_supergraph: Option<&'a SupergraphCreator>, initial_telemetry_plugin: Option>, extra_plugins: Option)>>, @@ -339,7 +324,7 @@ impl YamlRouterFactory { }; let schema_changed = previous_supergraph - .map(|supergraph_creator| supergraph_creator.schema().raw_sdl.as_ref() == &schema) + .map(|supergraph_creator| supergraph_creator.schema().raw_sdl == schema.raw_sdl) .unwrap_or_default(); let config_changed = previous_supergraph @@ -448,8 +433,7 @@ pub(crate) async fn create_subgraph_services( shaping.enable_subgraph_http2(name), )?; - let http_service_factory = - HttpClientServiceFactory::new(Arc::new(http_service), plugins.clone()); + let http_service_factory = HttpClientServiceFactory::new(http_service, plugins.clone()); let subgraph_service = shaping.subgraph_service_internal( name, @@ -519,16 +503,11 @@ fn load_certs(certificates: &str) -> io::Result> { /// not meant to be used directly pub async fn create_test_service_factory_from_yaml(schema: &str, configuration: &str) { let config: Configuration = serde_yaml::from_str(configuration).unwrap(); + let schema = Arc::new(Schema::parse(schema, &config).unwrap()); let is_telemetry_disabled = false; let service = YamlRouterFactory - .create( - is_telemetry_disabled, - Arc::new(config), - schema.to_string(), - None, - None, - ) + .create(is_telemetry_disabled, Arc::new(config), schema, None, None) .await; assert_eq!( service.map(|_| ()).unwrap_err().to_string().as_str(), @@ -542,6 +521,40 @@ caused by ); } +#[allow(clippy::too_many_arguments)] +pub(crate) async fn add_plugin( + name: String, + factory: &PluginFactory, + plugin_config: &Value, + schema: Arc, + supergraph_schema: Arc>, + subgraph_schemas: Arc>>>, + notify: &crate::notification::Notify, + plugin_instances: &mut Plugins, + errors: &mut Vec, +) { + match factory + .create_instance( + PluginInit::builder() + .config(plugin_config.clone()) + .supergraph_sdl(schema) + .supergraph_schema(supergraph_schema) + .subgraph_schemas(subgraph_schemas) + .notify(notify.clone()) + .build(), + ) + .await + { + Ok(plugin) => { + let _ = plugin_instances.insert(name, plugin); + } + Err(err) => errors.push(ConfigurationError::PluginConfiguration { + plugin: name, + error: err.to_string(), + }), + } +} + pub(crate) async fn create_plugins( configuration: &Configuration, schema: &Schema, @@ -575,26 +588,18 @@ pub(crate) async fn create_plugins( // Use function-like macros to avoid borrow conflicts of captures macro_rules! add_plugin { ($name: expr, $factory: expr, $plugin_config: expr) => {{ - match $factory - .create_instance( - PluginInit::builder() - .config($plugin_config) - .supergraph_sdl(schema.as_string().clone()) - .supergraph_schema(supergraph_schema.clone()) - .subgraph_schemas(subgraph_schemas.clone()) - .notify(configuration.notify.clone()) - .build(), - ) - .await - { - Ok(plugin) => { - let _ = plugin_instances.insert($name, plugin); - } - Err(err) => errors.push(ConfigurationError::PluginConfiguration { - plugin: $name, - error: err.to_string(), - }), - } + add_plugin( + $name, + $factory, + &$plugin_config, + schema.as_string().clone(), + supergraph_schema.clone(), + subgraph_schemas.clone(), + &configuration.notify.clone(), + &mut plugin_instances, + &mut errors, + ) + .await; }}; } @@ -602,7 +607,6 @@ pub(crate) async fn create_plugins( ($name: literal, $opt_plugin_config: expr) => {{ let name = concat!("apollo.", $name); let span = tracing::info_span!(concat!("plugin: ", "apollo.", $name)); - async { let factory = apollo_plugin_factories .remove(name) @@ -696,7 +700,7 @@ pub(crate) async fn create_plugins( // This relative ordering is documented in `docs/source/customizations/native.mdx`: add_optional_apollo_plugin!("rhai"); add_optional_apollo_plugin!("coprocessor"); - add_optional_apollo_plugin!("preview_demand_control"); + add_optional_apollo_plugin!("demand_control"); add_user_plugins!(); // Macros above remove from `apollo_plugin_factories`, so anything left at the end @@ -756,107 +760,6 @@ fn inject_schema_id(schema_id: Option<&str>, configuration: &mut Value) { } } -// The Rust QP has not yet implemented setContext -// (`@context` directives), progressive overrides, and it -// doesn't support fed v1 *supergraphs*. -// -// If users are using the Rust QP as standalone (`new`) or in comparison mode (`both`), -// fail to start up the router emitting an error. -fn can_use_with_experimental_query_planner( - configuration: Arc, - schema: Arc, -) -> Result<(), ConfigurationError> { - match configuration.experimental_query_planner_mode { - crate::configuration::QueryPlannerMode::New - | crate::configuration::QueryPlannerMode::Both => { - // We have a *progressive* override when `join__directive` has a - // non-null value for `overrideLabel` field. - // - // This looks at object types' fields and their directive - // applications, looking specifically for `@join__direcitve` - // arguments list. - let has_progressive_overrides = schema - .supergraph_schema() - .types - .values() - .filter_map(|extended_type| { - // The override label args can be only on ObjectTypes - if let ExtendedType::Object(object_type) = extended_type { - Some(object_type) - } else { - None - } - }) - .flat_map(|object_type| &object_type.fields) - .filter_map(|(_, field)| { - let join_field_directives = field - .directives - .iter() - .filter(|d| d.name.as_str() == JOIN_FIELD) - .collect::>(); - if !join_field_directives.is_empty() { - Some(join_field_directives) - } else { - None - } - }) - .flatten() - .any(|join_directive| { - if let Some(override_label_arg) = - join_directive.argument_by_name(OVERRIDE_LABEL_ARG_NAME) - { - // Any argument value for `overrideLabel` that's not - // null can be considered as progressive override usage - if !override_label_arg.is_null() { - return true; - } - return false; - } - false - }); - if has_progressive_overrides { - return Err(ConfigurationError::InvalidConfiguration { - message: "`experimental_query_planner_mode` cannot be used with progressive overrides", - error: "remove uses of progressive overrides to try the experimental_query_planner_mode in `both` or `new`, otherwise switch back to `legacy`.".to_string(), - }); - } - - // We will only check for `@context` direcive, since - // `@fromContext` can only be used if `@context` is already - // applied, and we assume a correctly composed supergraph. - // - // `@context` can only be applied on Object Types, Interface - // Types and Unions. For simplicity of this function, we just - // check all 'extended_type` directives. - let has_set_context = schema - .supergraph_schema() - .types - .values() - .any(|extended_type| extended_type.directives().has(CONTEXT_DIRECTIVE)); - if has_set_context { - return Err(ConfigurationError::InvalidConfiguration { - message: "`experimental_query_planner_mode` cannot be used with `@context`", - error: "remove uses of `@context` to try the experimental_query_planner_mode in `both` or `new`, otherwise switch back to `legacy`.".to_string(), - }); - } - - // Fed1 supergraphs will not work with the rust query planner. - let is_fed1_supergraph = match schema.federation_version() { - Some(v) => v == 1, - None => false, - }; - if is_fed1_supergraph { - return Err(ConfigurationError::InvalidConfiguration { - message: "`experimental_query_planner_mode` cannot be used with fed1 supergraph", - error: "switch back to `experimental_query_planner_mode: legacy` to use the router with fed1 supergraph".to_string(), - }); - } - - Ok(()) - } - crate::configuration::QueryPlannerMode::Legacy => Ok(()), - } -} #[cfg(test)] mod test { use std::sync::Arc; @@ -867,11 +770,9 @@ mod test { use tower_http::BoxError; use crate::configuration::Configuration; - use crate::configuration::QueryPlannerMode; use crate::plugin::Plugin; use crate::plugin::PluginInit; use crate::register_plugin; - use crate::router_factory::can_use_with_experimental_query_planner; use crate::router_factory::inject_schema_id; use crate::router_factory::RouterSuperServiceFactory; use crate::router_factory::YamlRouterFactory; @@ -975,13 +876,14 @@ mod test { async fn create_service(config: Configuration) -> Result<(), BoxError> { let schema = include_str!("testdata/supergraph.graphql"); + let schema = Schema::parse(schema, &config)?; let is_telemetry_disabled = false; let service = YamlRouterFactory .create( is_telemetry_disabled, Arc::new(config), - schema.to_string(), + Arc::new(schema), None, None, ) @@ -1003,125 +905,4 @@ mod test { "8e2021d131b23684671c3b85f82dfca836908c6a541bbd5c3772c66e7f8429d8" ); } - - #[test] - fn test_cannot_use_context_with_experimental_query_planner() { - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Both, - ..Default::default() - }; - let schema = include_str!("testdata/supergraph_with_context.graphql"); - let schema = Arc::new(Schema::parse(schema, &config).unwrap()); - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: both cannot be used with @context" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::New, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: new cannot be used with @context" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Legacy, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: legacy should be able to be used with @context" - ); - } - - #[test] - fn test_cannot_use_progressive_overrides_with_experimental_query_planner() { - // PROGRESSIVE OVERRIDES - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Both, - ..Default::default() - }; - let schema = include_str!("testdata/supergraph_with_override_label.graphql"); - let schema = Arc::new(Schema::parse(schema, &config).unwrap()); - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: both cannot be used with progressive overrides" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::New, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: new cannot be used with progressive overrides" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Legacy, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: legacy should be able to be used with progressive overrides" - ); - } - - #[test] - fn test_cannot_use_fed1_supergraphs_with_experimental_query_planner() { - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Both, - ..Default::default() - }; - let schema = include_str!("testdata/supergraph.graphql"); - let schema = Arc::new(Schema::parse(schema, &config).unwrap()); - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: both cannot be used with fed1 supergraph" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::New, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_err(), - "experimental_query_planner_mode: new cannot be used with fed1 supergraph" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Legacy, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: legacy should be able to be used with fed1 supergraph" - ); - } - - #[test] - fn test_can_use_fed2_supergraphs_with_experimental_query_planner() { - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Both, - ..Default::default() - }; - let schema = include_str!("testdata/minimal_fed2_supergraph.graphql"); - let schema = Arc::new(Schema::parse(schema, &config).unwrap()); - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: both can be used" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::New, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: new can be used" - ); - let config = Configuration { - experimental_query_planner_mode: QueryPlannerMode::Legacy, - ..Default::default() - }; - assert!( - can_use_with_experimental_query_planner(Arc::new(config), schema.clone()).is_ok(), - "experimental_query_planner_mode: legacy can be used" - ); - } } diff --git a/apollo-router/src/services/execution/service.rs b/apollo-router/src/services/execution/service.rs index 1902fe80c6..9486e50ec1 100644 --- a/apollo-router/src/services/execution/service.rs +++ b/apollo-router/src/services/execution/service.rs @@ -353,16 +353,20 @@ impl ExecutionService { nullified_paths.extend(paths); - let referenced_enums = if let (ApolloMetricsReferenceMode::Extended, Some(Value::Object(response_body))) = (metrics_ref_mode, &response.data) { + let mut referenced_enums = context + .extensions() + .with_lock(|lock| lock.get::().cloned()) + .unwrap_or_default(); + if let (ApolloMetricsReferenceMode::Extended, Some(Value::Object(response_body))) = (metrics_ref_mode, &response.data) { extract_enums_from_response( query.clone(), operation_name, schema.api_schema(), response_body, + &mut referenced_enums, ) - } else { - ReferencedEnums::new() }; + context .extensions() .with_lock(|mut lock| lock.insert::(referenced_enums)); diff --git a/apollo-router/src/services/http.rs b/apollo-router/src/services/http.rs index 1579ea155f..7f5d782498 100644 --- a/apollo-router/src/services/http.rs +++ b/apollo-router/src/services/http.rs @@ -34,12 +34,12 @@ pub(crate) struct HttpResponse { #[derive(Clone)] pub(crate) struct HttpClientServiceFactory { - pub(crate) service: Arc, + pub(crate) service: HttpClientService, pub(crate) plugins: Arc, } impl HttpClientServiceFactory { - pub(crate) fn new(service: Arc, plugins: Arc) -> Self { + pub(crate) fn new(service: HttpClientService, plugins: Arc) -> Self { HttpClientServiceFactory { service, plugins } } @@ -60,17 +60,19 @@ impl HttpClientServiceFactory { .unwrap(); HttpClientServiceFactory { - service: Arc::new(service), + service, plugins: Arc::new(IndexMap::default()), } } pub(crate) fn create(&self, name: &str) -> BoxService { - let service = self.service.make(); + let service = self.service.clone(); self.plugins .iter() .rev() - .fold(service, |acc, (_, e)| e.http_client_service(name, acc)) + .fold(service.boxed(), |acc, (_, e)| { + e.http_client_service(name, acc) + }) } } diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index c2274f6bc8..5d8fae1ede 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -16,6 +16,7 @@ use http::header::{self}; use http::response::Parts; use http::HeaderValue; use http::Request; +use http::StatusCode; use hyper_rustls::ConfigBuilderExt; use itertools::Itertools; use mediatype::names::APPLICATION; @@ -871,9 +872,34 @@ pub(crate) async fn process_batch( // Perform the actual fetch. If this fails then we didn't manage to make the call at all, so we can't do anything with it. tracing::debug!("fetching from subgraph: {service}"); let (parts, content_type, body) = - do_fetch(client, &batch_context, &service, request, display_body) + match do_fetch(client, &batch_context, &service, request, display_body) .instrument(subgraph_req_span) - .await?; + .await + { + Ok(res) => res, + Err(err) => { + let resp = http::Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(err.to_graphql_error(None)) + .map_err(|err| FetchError::SubrequestHttpError { + status_code: None, + service: service.clone(), + reason: format!("cannot create the http response from error: {err:?}"), + })?; + let (parts, body) = resp.into_parts(); + let body = + serde_json::to_vec(&body).map_err(|err| FetchError::SubrequestHttpError { + status_code: None, + service: service.clone(), + reason: format!("cannot serialize the error: {err:?}"), + })?; + ( + parts, + Ok(ContentType::ApplicationJson), + Some(Ok(body.into())), + ) + } + }; let subgraph_response_event = batch_context .extensions() @@ -1283,9 +1309,21 @@ pub(crate) async fn call_single_http( // Perform the actual fetch. If this fails then we didn't manage to make the call at all, so we can't do anything with it. let (parts, content_type, body) = - do_fetch(client, &context, service_name, request, display_body) + match do_fetch(client, &context, service_name, request, display_body) .instrument(subgraph_req_span) - .await?; + .await + { + Ok(resp) => resp, + Err(err) => { + return Ok(SubgraphResponse::builder() + .subgraph_name(service_name.to_string()) + .error(err.to_graphql_error(None)) + .status_code(StatusCode::INTERNAL_SERVER_ERROR) + .context(context) + .extensions(Object::default()) + .build()); + } + }; let subgraph_response_event = context .extensions() @@ -1705,6 +1743,17 @@ mod tests { server.await.unwrap(); } + // starts a local server emulating a subgraph returning connection closed + async fn emulate_subgraph_panic(listener: TcpListener) { + async fn handle(_request: http::Request) -> Result, Infallible> { + panic!("test") + } + + let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); + let server = Server::from_tcp(listener).unwrap().serve(make_svc); + server.await.unwrap(); + } + // starts a local server emulating a subgraph returning bad response format async fn emulate_subgraph_ok_status_invalid_response(listener: TcpListener) { async fn handle(_request: http::Request) -> Result, Infallible> { @@ -2421,6 +2470,44 @@ mod tests { assert!(response.response.body().errors.is_empty()); } + #[tokio::test(flavor = "multi_thread")] + async fn test_subgraph_service_panic() { + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let socket_addr = listener.local_addr().unwrap(); + tokio::task::spawn(emulate_subgraph_panic(listener)); + let subgraph_service = SubgraphService::new( + "test", + true, + None, + Notify::default(), + HttpClientServiceFactory::from_config( + "test", + &Configuration::default(), + Http2Config::Enable, + ), + ) + .expect("can create a SubgraphService"); + + let url = Uri::from_str(&format!("http://{socket_addr}")).unwrap(); + let response = subgraph_service + .oneshot( + SubgraphRequest::builder() + .supergraph_request(supergraph_request("query")) + .subgraph_request(subgraph_http_request(url, "query")) + .operation_kind(OperationKind::Query) + .subgraph_name(String::from("test")) + .context(Context::new()) + .build(), + ) + .await + .unwrap(); + assert!(!response.response.body().errors.is_empty()); + assert_eq!( + response.response.body().errors[0].message, + "HTTP fetch failed from 'test': HTTP fetch failed from 'test': connection closed before message completed" + ); + } + #[tokio::test(flavor = "multi_thread")] async fn test_subgraph_service_invalid_response() { let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index a5ea8403d5..dec84074f8 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -454,6 +454,7 @@ async fn subscription_task( formatted_query_plan: query_plan.formatted_query_plan.clone(), query: query_plan.query.clone(), query_metrics: query_plan.query_metrics, + estimated_size: Default::default(), }) }), _ => { diff --git a/apollo-router/src/services/supergraph/snapshots/apollo_router__services__supergraph__tests__missing_entities.snap b/apollo-router/src/services/supergraph/snapshots/apollo_router__services__supergraph__tests__missing_entities.snap index 33f7508979..a4366f1d9a 100644 --- a/apollo-router/src/services/supergraph/snapshots/apollo_router__services__supergraph__tests__missing_entities.snap +++ b/apollo-router/src/services/supergraph/snapshots/apollo_router__services__supergraph__tests__missing_entities.snap @@ -1,5 +1,5 @@ --- -source: apollo-router/src/services/supergraph_service.rs +source: apollo-router/src/services/supergraph/tests.rs expression: stream.next_response().await.unwrap() --- { @@ -14,7 +14,11 @@ expression: stream.next_response().await.unwrap() }, "errors": [ { - "message": "error" + "message": "error", + "path": [ + "currentUser", + "activeOrganization" + ] } ] } diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs index 07f13f746a..05546910ca 100644 --- a/apollo-router/src/spec/schema.rs +++ b/apollo-router/src/spec/schema.rs @@ -5,7 +5,6 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Instant; -use apollo_compiler::ast; use apollo_compiler::schema::Implementers; use apollo_compiler::validation::Valid; use apollo_compiler::Name; @@ -38,32 +37,30 @@ pub(crate) struct Schema { pub(crate) struct ApiSchema(pub(crate) ValidFederationSchema); impl Schema { - pub(crate) fn parse_ast(sdl: &str) -> Result { + pub(crate) fn parse(raw_sdl: &str, config: &Configuration) -> Result { + Self::parse_arc(raw_sdl.to_owned().into(), config) + } + + pub(crate) fn parse_arc( + raw_sdl: Arc, + config: &Configuration, + ) -> Result { + let start = Instant::now(); let mut parser = apollo_compiler::parser::Parser::new(); - let result = parser.parse_ast(sdl, "schema.graphql"); + let result = parser.parse_ast(raw_sdl.as_ref(), "schema.graphql"); // Trace log recursion limit data let recursion_limit = parser.recursion_reached(); tracing::trace!(?recursion_limit, "recursion limit data"); - result.map_err(|invalid| { - SchemaError::Parse(ParseErrors { - errors: invalid.errors, - }) - }) - } - - pub(crate) fn parse_compiler_schema( - sdl: &str, - ) -> Result, SchemaError> { - Self::parse_ast(sdl)? + let definitions = result + .map_err(|invalid| { + SchemaError::Parse(ParseErrors { + errors: invalid.errors, + }) + })? .to_schema_validate() - .map_err(|errors| SchemaError::Validate(errors.into())) - } - - pub(crate) fn parse(sdl: &str, config: &Configuration) -> Result { - let start = Instant::now(); - let definitions = Self::parse_compiler_schema(sdl)?; + .map_err(|errors| SchemaError::Validate(errors.into()))?; let mut subgraphs = HashMap::new(); // TODO: error if not found? @@ -111,7 +108,7 @@ impl Schema { let implementers_map = definitions.implementers_map(); let supergraph = Supergraph::from_schema(definitions)?; - let schema_id = Arc::new(Schema::schema_id(sdl)); + let schema_id = Arc::new(Schema::schema_id(&raw_sdl)); let api_schema = supergraph .to_api_schema(ApiSchemaOptions { @@ -125,7 +122,7 @@ impl Schema { })?; Ok(Schema { - raw_sdl: Arc::new(sdl.to_owned()), + raw_sdl, supergraph, subgraphs, implementers_map, diff --git a/apollo-router/src/state_machine.rs b/apollo-router/src/state_machine.rs index 0a141e1669..e3ce6c3a67 100644 --- a/apollo-router/src/state_machine.rs +++ b/apollo-router/src/state_machine.rs @@ -308,7 +308,7 @@ impl State { server_handle: &mut Option, previous_router_service_factory: Option<&FA::RouterFactory>, configuration: Arc, - schema: Arc, + sdl: Arc, license: LicenseState, listen_addresses_guard: &mut OwnedRwLockWriteGuard, mut all_connections_stopped_signals: Vec>, @@ -317,12 +317,12 @@ impl State { S: HttpServerFactory, FA: RouterSuperServiceFactory, { - let report = { - let ast = Schema::parse_ast(&schema) - .map_err(|e| ServiceCreationError(e.to_string().into()))?; - // Check the license - LicenseEnforcementReport::build(&configuration, &ast) - }; + let schema = Arc::new( + Schema::parse_arc(sdl.clone(), &configuration) + .map_err(|e| ServiceCreationError(e.to_string().into()))?, + ); + // Check the license + let report = LicenseEnforcementReport::build(&configuration, &schema); match license { LicenseState::Licensed => { @@ -362,7 +362,7 @@ impl State { .create( state_machine.is_telemetry_disabled, configuration.clone(), - schema.to_string(), + schema, previous_router_service_factory, None, ) @@ -422,7 +422,7 @@ impl State { Ok(Running { configuration, _metrics: metrics, - schema, + schema: sdl, license, server_handle: Some(server_handle), router_service_factory, @@ -1119,7 +1119,7 @@ mod tests { &'a mut self, is_telemetry_disabled: bool, configuration: Arc, - schema: String, + schema: Arc, previous_router_service_factory: Option<&'a MockMyRouterFactory>, extra_plugins: Option)>>, ) -> Result; diff --git a/apollo-router/src/test_harness.rs b/apollo-router/src/test_harness.rs index 7e921ffaf3..a0b5384489 100644 --- a/apollo-router/src/test_harness.rs +++ b/apollo-router/src/test_harness.rs @@ -34,6 +34,7 @@ use crate::services::subgraph; use crate::services::supergraph; use crate::services::HasSchema; use crate::services::SupergraphCreator; +use crate::spec::Schema; use crate::uplink::license_enforcement::LicenseState; /// Mocks for services the Apollo Router must integrate with. @@ -291,10 +292,11 @@ impl<'a> TestHarness<'a> { let config = builder.configuration.unwrap_or_default(); let canned_schema = include_str!("../testing_schema.graphql"); let schema = builder.schema.unwrap_or(canned_schema); + let schema = Arc::new(Schema::parse(schema, &config)?); let supergraph_creator = YamlRouterFactory .inner_create_supergraph( config.clone(), - schema.to_string(), + schema, None, None, Some(builder.extra_plugins), diff --git a/apollo-router/src/uplink/license_enforcement.rs b/apollo-router/src/uplink/license_enforcement.rs index 21ab68d970..743fbbe543 100644 --- a/apollo-router/src/uplink/license_enforcement.rs +++ b/apollo-router/src/uplink/license_enforcement.rs @@ -11,9 +11,8 @@ use std::time::Duration; use std::time::SystemTime; use std::time::UNIX_EPOCH; -use apollo_compiler::ast::Definition; use apollo_compiler::schema::Directive; -use apollo_compiler::Node; +use apollo_compiler::schema::ExtendedType; use buildstructor::Builder; use displaydoc::Display; use itertools::Itertools; @@ -31,6 +30,7 @@ use thiserror::Error; use url::Url; use crate::plugins::authentication::convert_key_algorithm; +use crate::spec::Schema; use crate::spec::LINK_AS_ARGUMENT; use crate::spec::LINK_DIRECTIVE_NAME; use crate::spec::LINK_URL_ARGUMENT; @@ -101,7 +101,7 @@ struct ParsedLinkSpec { impl ParsedLinkSpec { fn from_link_directive( - link_directive: &Node, + link_directive: &Directive, ) -> Option> { link_directive .argument_by_name(LINK_URL_ARGUMENT) @@ -157,7 +157,7 @@ impl LicenseEnforcementReport { pub(crate) fn build( configuration: &Configuration, - schema: &apollo_compiler::ast::Document, + schema: &Schema, ) -> LicenseEnforcementReport { LicenseEnforcementReport { restricted_config_in_use: Self::validate_configuration( @@ -197,14 +197,14 @@ impl LicenseEnforcementReport { } fn validate_schema( - schema: &apollo_compiler::ast::Document, + schema: &Schema, schema_restrictions: &Vec, ) -> Vec { let link_specs = schema - .definitions - .iter() - .filter_map(|def| def.as_schema_definition()) - .flat_map(|def| def.directives.get_all(LINK_DIRECTIVE_NAME)) + .supergraph_schema() + .schema_definition + .directives + .get_all(LINK_DIRECTIVE_NAME) .filter_map(|link| { ParsedLinkSpec::from_link_directive(link).map(|maybe_spec| { maybe_spec.ok().map(|spec| (spec.spec_url.to_owned(), spec)) @@ -214,18 +214,8 @@ impl LicenseEnforcementReport { let mut schema_violations: Vec = Vec::new(); - for subgraph_url in schema - .definitions - .iter() - .filter_map(|def| def.as_enum_type_definition()) - .filter(|def| def.name == "join__Graph") - .flat_map(|def| def.values.iter()) - .flat_map(|val| val.directives.iter()) - .filter(|d| d.name == "join__graph") - .filter_map(|dir| (dir.arguments.iter().find(|arg| arg.name == "url"))) - .filter_map(|arg| arg.value.as_str()) - { - if subgraph_url.starts_with("unix://") { + for (_subgraph_name, subgraph_url) in schema.subgraphs() { + if subgraph_url.scheme_str() == Some("unix") { schema_violations.push(SchemaViolation::DirectiveArgument { url: "https://specs.apollo.dev/join/v0.3".to_string(), name: "join__Graph".to_string(), @@ -262,16 +252,19 @@ impl LicenseEnforcementReport { if version_req.matches(&link_spec.version) { let directive_name = link_spec.directive_name(name); if schema - .definitions - .iter() + .supergraph_schema() + .types + .values() .flat_map(|def| match def { // To traverse additional directive locations, add match arms for the respective definition types required. // As of writing this, this is only implemented for finding usages of progressive override on object type fields, but it can be extended to other directive locations trivially. - Definition::ObjectTypeDefinition(object_type_def) => { - let directives_on_object = - object_type_def.directives.get_all(&directive_name); + ExtendedType::Object(object_type_def) => { + let directives_on_object = object_type_def + .directives + .get_all(&directive_name) + .map(|component| &component.node); let directives_on_fields = - object_type_def.fields.iter().flat_map(|field| { + object_type_def.fields.values().flat_map(|field| { field.directives.get_all(&directive_name) }); @@ -391,7 +384,7 @@ impl LicenseEnforcementReport { .name("Batching support") .build(), ConfigurationRestriction::builder() - .path("$.preview_demand_control") + .path("$.demand_control") .name("Demand control plugin") .build(), ConfigurationRestriction::builder() @@ -682,9 +675,11 @@ mod test { use crate::uplink::license_enforcement::OneOrMany; use crate::Configuration; + #[track_caller] fn check(router_yaml: &str, supergraph_schema: &str) -> LicenseEnforcementReport { let config = Configuration::from_str(router_yaml).expect("router config must be valid"); - let schema = Schema::parse_ast(supergraph_schema).expect("supergraph schema must be valid"); + let schema = + Schema::parse(supergraph_schema, &config).expect("supergraph schema must be valid"); LicenseEnforcementReport::build(&config, &schema) } @@ -730,6 +725,7 @@ mod test { } #[test] + #[cfg(not(windows))] // http::uri::Uri parsing appears to reject unix:// on Windows fn test_restricted_unix_socket_via_schema() { let report = check( include_str!("testdata/oss.router.yaml"), diff --git a/apollo-router/src/uplink/mod.rs b/apollo-router/src/uplink/mod.rs index d6eb3262c5..6a8974699e 100644 --- a/apollo-router/src/uplink/mod.rs +++ b/apollo-router/src/uplink/mod.rs @@ -414,7 +414,7 @@ where tracing::info!( histogram.apollo_router_uplink_fetch_duration_seconds = now.elapsed().as_secs_f64(), - query = std::any::type_name::(), + query, url = url.to_string(), "kind" = "http_error", error = e.to_string(), @@ -441,7 +441,7 @@ fn query_name() -> &'static str { let mut query = std::any::type_name::(); query = query .strip_suffix("Query") - .expect("Uplink structs mut be named xxxQuery") + .expect("Uplink structs must be named xxxQuery") .get(query.rfind("::").map(|index| index + 2).unwrap_or_default()..) .expect("cannot fail"); query diff --git a/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap b/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap index 70f682b0ca..baa48d4a8a 100644 --- a/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap +++ b/apollo-router/src/uplink/snapshots/apollo_router__uplink__license_enforcement__test__restricted_features_via_config.snap @@ -55,7 +55,7 @@ Configuration yaml: .preview_file_uploads * Demand control plugin - .preview_demand_control + .demand_control * Apollo metrics extended references .telemetry.apollo.experimental_apollo_metrics_reference_mode diff --git a/apollo-router/src/uplink/testdata/restricted.router.yaml b/apollo-router/src/uplink/testdata/restricted.router.yaml index 14aa7bd994..b354a9a239 100644 --- a/apollo-router/src/uplink/testdata/restricted.router.yaml +++ b/apollo-router/src/uplink/testdata/restricted.router.yaml @@ -54,11 +54,14 @@ plugins: preview_entity_cache: enabled: true - redis: - urls: - - https://example.com + invalidation: + listen: 127.0.0.1:4000 + path: /invalidation subgraph: all: + redis: + urls: + - https://example.com enabled: false subgraphs: product: @@ -89,7 +92,7 @@ preview_file_uploads: enabled: true mode: stream -preview_demand_control: +demand_control: enabled: true mode: measure strategy: diff --git a/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_in_range.graphql b/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_in_range.graphql index 563cc14c98..381d34ddd5 100644 --- a/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_in_range.graphql +++ b/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_in_range.graphql @@ -4,6 +4,51 @@ schema query: Query } + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field( + graph: join__Graph + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean + override: String + usedOverridden: Boolean + overrideLabel: String +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "products", url: "http://localhost:4001/") + SUBGRAPH2 @join__graph(name: "reviews", url: "http://localhost:4002/") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + type Query @join__type(graph: SUBGRAPH1) @join__type(graph: SUBGRAPH2) { t: T @join__field(graph: SUBGRAPH1) } diff --git a/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_out_of_range.graphql b/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_out_of_range.graphql index a62a24953a..586124f47a 100644 --- a/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_out_of_range.graphql +++ b/apollo-router/src/uplink/testdata/schema_enforcement_directive_arg_version_out_of_range.graphql @@ -4,6 +4,41 @@ schema query: Query } +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +scalar join__FieldSet + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "products", url: "http://localhost:4001/") + SUBGRAPH2 @join__graph(name: "reviews", url: "http://localhost:4002/") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + type Query @join__type(graph: SUBGRAPH1) @join__type(graph: SUBGRAPH2) { t: T @join__field(graph: SUBGRAPH1) } diff --git a/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_in_range.graphql b/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_in_range.graphql index a265252bd4..ded63469ef 100644 --- a/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_in_range.graphql +++ b/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_in_range.graphql @@ -1,5 +1,26 @@ schema @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/authenticated/v0.1", for: SECURITY) { query: Query } + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar link__Import + +type Query { + field: Int +} diff --git a/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_out_of_range.graphql b/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_out_of_range.graphql index 5266cfeb47..231cb51c48 100644 --- a/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_out_of_range.graphql +++ b/apollo-router/src/uplink/testdata/schema_enforcement_spec_version_out_of_range.graphql @@ -1,5 +1,26 @@ schema @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) @link(url: "https://specs.apollo.dev/authenticated/v0.2", for: SECURITY) { query: Query } + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar link__Import + +type Query { + field: Int +} diff --git a/apollo-router/src/uplink/testdata/unix_socket.graphql b/apollo-router/src/uplink/testdata/unix_socket.graphql index 910a221a90..5ea6507f76 100644 --- a/apollo-router/src/uplink/testdata/unix_socket.graphql +++ b/apollo-router/src/uplink/testdata/unix_socket.graphql @@ -20,6 +20,11 @@ directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA +directive @authenticated on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM + +scalar federation__Scope +directive @requiresScopes(scopes: [[federation__Scope!]!]!) on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM + scalar join__FieldSet enum join__Graph { diff --git a/apollo-router/tests/apollo_reports.rs b/apollo-router/tests/apollo_reports.rs index a46f303188..006c3d8e97 100644 --- a/apollo-router/tests/apollo_reports.rs +++ b/apollo-router/tests/apollo_reports.rs @@ -92,10 +92,10 @@ async fn config( Some(serde_json::Value::Bool(use_legacy_request_span)) }) .expect("Could not sub in endpoint"); - config = jsonpath_lib::replace_with(config, "$.preview_demand_control.enabled", &mut |_| { + config = jsonpath_lib::replace_with(config, "$.demand_control.enabled", &mut |_| { Some(serde_json::Value::Bool(demand_control)) }) - .expect("Could not sub in preview_demand_control"); + .expect("Could not sub in demand_control"); config = jsonpath_lib::replace_with( config, diff --git a/apollo-router/tests/common.rs b/apollo-router/tests/common.rs index 6eddc3dc70..35a1115495 100644 --- a/apollo-router/tests/common.rs +++ b/apollo-router/tests/common.rs @@ -491,6 +491,7 @@ impl IntegrationTest { self.execute_query_internal( &json!({"query":"query {topProducts{name}}","variables":{}}), None, + None, ) } @@ -499,34 +500,44 @@ impl IntegrationTest { &self, query: &Value, ) -> impl std::future::Future { - self.execute_query_internal(query, None) + self.execute_query_internal(query, None, None) } #[allow(dead_code)] pub fn execute_bad_query( &self, ) -> impl std::future::Future { - self.execute_query_internal(&json!({"garbage":{}}), None) + self.execute_query_internal(&json!({"garbage":{}}), None, None) } #[allow(dead_code)] pub fn execute_huge_query( &self, ) -> impl std::future::Future { - self.execute_query_internal(&json!({"query":"query {topProducts{name, name, name, name, name, name, name, name, name, name}}","variables":{}}), None) + self.execute_query_internal(&json!({"query":"query {topProducts{name, name, name, name, name, name, name, name, name, name}}","variables":{}}), None, None) } #[allow(dead_code)] pub fn execute_bad_content_type( &self, ) -> impl std::future::Future { - self.execute_query_internal(&json!({"garbage":{}}), Some("garbage")) + self.execute_query_internal(&json!({"garbage":{}}), Some("garbage"), None) + } + + #[allow(dead_code)] + pub fn execute_query_with_headers( + &self, + query: &Value, + headers: HashMap, + ) -> impl std::future::Future { + self.execute_query_internal(query, None, Some(headers)) } fn execute_query_internal( &self, query: &Value, content_type: Option<&'static str>, + headers: Option>, ) -> impl std::future::Future { assert!( self.router.is_some(), @@ -540,11 +551,10 @@ impl IntegrationTest { async move { let span = info_span!("client_request"); let span_id = span.context().span().span_context().trace_id(); - async move { let client = reqwest::Client::new(); - let mut request = client + let mut builder = client .post(url) .header( CONTENT_TYPE, @@ -553,10 +563,15 @@ impl IntegrationTest { .header("apollographql-client-name", "custom_name") .header("apollographql-client-version", "1.0") .header("x-my-header", "test") - .header("head", "test") - .json(&query) - .build() - .unwrap(); + .header("head", "test"); + + if let Some(headers) = headers { + for (name, value) in headers { + builder = builder.header(name, value); + } + } + + let mut request = builder.json(&query).build().unwrap(); telemetry.inject_context(&mut request); request.headers_mut().remove(ACCEPT); match client.execute(request).await { diff --git a/apollo-router/tests/fixtures/apollo_reports.router.yaml b/apollo-router/tests/fixtures/apollo_reports.router.yaml index 81bcf4cd49..644e286ee7 100644 --- a/apollo-router/tests/fixtures/apollo_reports.router.yaml +++ b/apollo-router/tests/fixtures/apollo_reports.router.yaml @@ -3,7 +3,7 @@ include_subgraph_errors: rhai: scripts: tests/fixtures main: test_callbacks.rhai -preview_demand_control: +demand_control: mode: measure enabled: false strategy: diff --git a/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml b/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml index 387085b17e..e60791ebbc 100644 --- a/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml +++ b/apollo-router/tests/fixtures/apollo_reports_batch.router.yaml @@ -6,7 +6,7 @@ rhai: main: test_callbacks.rhai include_subgraph_errors: all: true -preview_demand_control: +demand_control: mode: measure enabled: false strategy: diff --git a/apollo-router/tests/fixtures/broken-supergraph.graphql b/apollo-router/tests/fixtures/broken-supergraph.graphql new file mode 100644 index 0000000000..eafc474b2b --- /dev/null +++ b/apollo-router/tests/fixtures/broken-supergraph.graphql @@ -0,0 +1,127 @@ +schema + # this is missing a link directive spec definition + # @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { + query: Query + mutation: Mutation +} + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +directive @join__field( + graph: join__Graph! + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__type( + graph: join__Graph! + key: join__FieldSet +) repeatable on OBJECT | INTERFACE + +directive @join__owner(graph: join__Graph!) on OBJECT | INTERFACE + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @tag( + name: String! +) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION + +directive @inaccessible on OBJECT | FIELD_DEFINITION | INTERFACE | UNION + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar join__FieldSet + +scalar federation__Scope + +enum join__Graph { + ACCOUNTS + @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") + INVENTORY + @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") + PRODUCTS + @join__graph(name: "products", url: "https://products.demo.starstuff.dev") + REVIEWS + @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") +} +type Mutation @join__type(graph: PRODUCTS) @join__type(graph: REVIEWS) { + createProduct(name: String, upc: ID!): Product @join__field(graph: PRODUCTS) + createReview(body: String, id: ID!, upc: ID!): Review + @join__field(graph: REVIEWS) +} + +type Product + @join__type(graph: PRODUCTS, key: "upc") + @join__type(graph: INVENTORY, key: "upc") + @join__type(graph: REVIEWS, key: "upc") { + inStock: Boolean + @join__field(graph: INVENTORY) + @tag(name: "private") + @inaccessible + name: String @join__field(graph: PRODUCTS) + price: Int @join__field(graph: PRODUCTS) + reviews: [Review] @join__field(graph: REVIEWS) + reviewsForAuthor(authorID: ID!): [Review] @join__field(graph: REVIEWS) + upc: String! + @join__field(graph: PRODUCTS) + @join__field(graph: INVENTORY, external: true) + @join__field(graph: REVIEWS, external: true) + weight: Int @join__field(graph: PRODUCTS) +} + +type Query @join__type(graph: ACCOUNTS) @join__type(graph: PRODUCTS) { + me: User @join__field(graph: ACCOUNTS) + topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS) +} + +type Review + @join__owner(graph: REVIEWS) + @join__type(graph: REVIEWS, key: "id") { + author: User @join__field(graph: REVIEWS) + body: String @join__field(graph: REVIEWS) + id: ID! + product: Product @join__field(graph: REVIEWS) +} + +type User + @join__owner(graph: ACCOUNTS) + @join__type(graph: ACCOUNTS, key: "id") + @join__type(graph: REVIEWS, key: "id") { + id: ID! + name: String @join__field(graph: ACCOUNTS) + + reviews: [Review] @join__field(graph: REVIEWS) + username: String @join__field(graph: ACCOUNTS) +} diff --git a/apollo-router/tests/fixtures/valid-supergraph.graphql b/apollo-router/tests/fixtures/valid-supergraph.graphql new file mode 100644 index 0000000000..fe43cc6964 --- /dev/null +++ b/apollo-router/tests/fixtures/valid-supergraph.graphql @@ -0,0 +1,126 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { + query: Query + mutation: Mutation +} + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +directive @join__field( + graph: join__Graph! + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__type( + graph: join__Graph! + key: join__FieldSet +) repeatable on OBJECT | INTERFACE + +directive @join__owner(graph: join__Graph!) on OBJECT | INTERFACE + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @tag( + name: String! +) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION + +directive @inaccessible on OBJECT | FIELD_DEFINITION | INTERFACE | UNION + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +scalar join__FieldSet + +scalar federation__Scope + +enum join__Graph { + ACCOUNTS + @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") + INVENTORY + @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") + PRODUCTS + @join__graph(name: "products", url: "https://products.demo.starstuff.dev") + REVIEWS + @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") +} +type Mutation @join__type(graph: PRODUCTS) @join__type(graph: REVIEWS) { + createProduct(name: String, upc: ID!): Product @join__field(graph: PRODUCTS) + createReview(body: String, id: ID!, upc: ID!): Review + @join__field(graph: REVIEWS) +} + +type Product + @join__type(graph: PRODUCTS, key: "upc") + @join__type(graph: INVENTORY, key: "upc") + @join__type(graph: REVIEWS, key: "upc") { + inStock: Boolean + @join__field(graph: INVENTORY) + @tag(name: "private") + @inaccessible + name: String @join__field(graph: PRODUCTS) + price: Int @join__field(graph: PRODUCTS) + reviews: [Review] @join__field(graph: REVIEWS) + reviewsForAuthor(authorID: ID!): [Review] @join__field(graph: REVIEWS) + upc: String! + @join__field(graph: PRODUCTS) + @join__field(graph: INVENTORY, external: true) + @join__field(graph: REVIEWS, external: true) + weight: Int @join__field(graph: PRODUCTS) +} + +type Query @join__type(graph: ACCOUNTS) @join__type(graph: PRODUCTS) { + me: User @join__field(graph: ACCOUNTS) + topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS) +} + +type Review + @join__owner(graph: REVIEWS) + @join__type(graph: REVIEWS, key: "id") { + author: User @join__field(graph: REVIEWS) + body: String @join__field(graph: REVIEWS) + id: ID! + product: Product @join__field(graph: REVIEWS) +} + +type User + @join__owner(graph: ACCOUNTS) + @join__type(graph: ACCOUNTS, key: "id") + @join__type(graph: REVIEWS, key: "id") { + id: ID! + name: String @join__field(graph: ACCOUNTS) + + reviews: [Review] @join__field(graph: REVIEWS) + username: String @join__field(graph: ACCOUNTS) +} diff --git a/apollo-router/tests/integration/batching.rs b/apollo-router/tests/integration/batching.rs index c50c85054b..071ca5cb7a 100644 --- a/apollo-router/tests/integration/batching.rs +++ b/apollo-router/tests/integration/batching.rs @@ -140,19 +140,20 @@ async fn it_batches_with_errors_in_single_graph() -> Result<(), BoxError> { if test_is_enabled() { // Make sure that we got back what we wanted assert_yaml_snapshot!(responses, @r###" - --- - - data: - entryA: - index: 0 - - errors: - - message: expected error in A - - data: - entryA: - index: 2 - - data: - entryA: - index: 3 - "###); + --- + - data: + entryA: + index: 0 + - errors: + - message: expected error in A + path: [] + - data: + entryA: + index: 2 + - data: + entryA: + index: 3 + "###); } Ok(()) @@ -189,24 +190,26 @@ async fn it_batches_with_errors_in_multi_graph() -> Result<(), BoxError> { if test_is_enabled() { assert_yaml_snapshot!(responses, @r###" - --- - - data: - entryA: - index: 0 - - data: - entryB: - index: 0 - - errors: - - message: expected error in A - - errors: - - message: expected error in B - - data: - entryA: - index: 2 - - data: - entryB: - index: 2 - "###); + --- + - data: + entryA: + index: 0 + - data: + entryB: + index: 0 + - errors: + - message: expected error in A + path: [] + - errors: + - message: expected error in B + path: [] + - data: + entryA: + index: 2 + - data: + entryB: + index: 2 + "###); } Ok(()) @@ -250,6 +253,7 @@ async fn it_handles_short_timeouts() -> Result<(), BoxError> { index: 0 - errors: - message: Request timed out + path: [] extensions: code: REQUEST_TIMEOUT - data: @@ -257,6 +261,7 @@ async fn it_handles_short_timeouts() -> Result<(), BoxError> { index: 1 - errors: - message: Request timed out + path: [] extensions: code: REQUEST_TIMEOUT "###); @@ -323,14 +328,17 @@ async fn it_handles_indefinite_timeouts() -> Result<(), BoxError> { index: 2 - errors: - message: Request timed out + path: [] extensions: code: REQUEST_TIMEOUT - errors: - message: Request timed out + path: [] extensions: code: REQUEST_TIMEOUT - errors: - message: Request timed out + path: [] extensions: code: REQUEST_TIMEOUT "###); @@ -554,22 +562,24 @@ async fn it_handles_cancelled_by_coprocessor() -> Result<(), BoxError> { if test_is_enabled() { assert_yaml_snapshot!(responses, @r###" - --- - - errors: - - message: Subgraph A is not allowed - extensions: - code: ERR_NOT_ALLOWED - - data: - entryB: - index: 0 - - errors: - - message: Subgraph A is not allowed - extensions: - code: ERR_NOT_ALLOWED - - data: - entryB: - index: 1 - "###); + --- + - errors: + - message: Subgraph A is not allowed + path: [] + extensions: + code: ERR_NOT_ALLOWED + - data: + entryB: + index: 0 + - errors: + - message: Subgraph A is not allowed + path: [] + extensions: + code: ERR_NOT_ALLOWED + - data: + entryB: + index: 1 + "###); } Ok(()) @@ -697,33 +707,34 @@ async fn it_handles_single_request_cancelled_by_coprocessor() -> Result<(), BoxE if test_is_enabled() { assert_yaml_snapshot!(responses, @r###" - --- - - data: - entryA: - index: 0 - - data: - entryB: - index: 0 - - data: - entryA: - index: 1 - - data: - entryB: - index: 1 - - errors: - - message: Subgraph A index 2 is not allowed - extensions: - code: ERR_NOT_ALLOWED - - data: - entryB: - index: 2 - - data: - entryA: - index: 3 - - data: - entryB: - index: 3 - "###); + --- + - data: + entryA: + index: 0 + - data: + entryB: + index: 0 + - data: + entryA: + index: 1 + - data: + entryB: + index: 1 + - errors: + - message: Subgraph A index 2 is not allowed + path: [] + extensions: + code: ERR_NOT_ALLOWED + - data: + entryB: + index: 2 + - data: + entryA: + index: 3 + - data: + entryB: + index: 3 + "###); } Ok(()) diff --git a/apollo-router/tests/integration/mod.rs b/apollo-router/tests/integration/mod.rs index 7ab2f50d95..f4c840d9e4 100644 --- a/apollo-router/tests/integration/mod.rs +++ b/apollo-router/tests/integration/mod.rs @@ -8,6 +8,7 @@ mod docs; mod file_upload; mod lifecycle; mod operation_limits; +mod query_planner; mod subgraph_response; mod traffic_shaping; diff --git a/apollo-router/tests/integration/query_planner.rs b/apollo-router/tests/integration/query_planner.rs new file mode 100644 index 0000000000..9c85c99690 --- /dev/null +++ b/apollo-router/tests/integration/query_planner.rs @@ -0,0 +1,466 @@ +use std::path::PathBuf; + +use crate::integration::common::graph_os_enabled; +use crate::integration::IntegrationTest; + +const PROMETHEUS_METRICS_CONFIG: &str = include_str!("telemetry/fixtures/prometheus.router.yaml"); +const LEGACY_QP: &str = "experimental_query_planner_mode: legacy"; +const NEW_QP: &str = "experimental_query_planner_mode: new"; +const BOTH_QP: &str = "experimental_query_planner_mode: both"; +const BOTH_BEST_EFFORT_QP: &str = "experimental_query_planner_mode: both_best_effort"; + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_legacy_qp() { + let mut router = IntegrationTest::builder() + .config(LEGACY_QP) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_new_qp() { + let mut router = IntegrationTest::builder() + .config(NEW_QP) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + failed to initialize the query planner: \ + Supergraphs composed with federation version 1 are not supported.", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_both_qp() { + let mut router = IntegrationTest::builder() + .config(BOTH_QP) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + failed to initialize the query planner: \ + Supergraphs composed with federation version 1 are not supported.", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_both_best_effort_qp() { + let mut router = IntegrationTest::builder() + .config(BOTH_BEST_EFFORT_QP) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "Falling back to the legacy query planner: \ + failed to initialize the query planner: \ + Supergraphs composed with federation version 1 are not supported. \ + Please recompose your supergraph with federation version 2 or greater", + ) + .await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_legacy_qp_reload_to_new_keep_previous_config() { + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{NEW_QP}"); + router.update_config(&config).await; + router + .assert_log_contains("error while reloading, continuing with previous configuration") + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="fed1",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_legacy_qp_reload_to_both_best_effort_keep_previous_config() { + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("../examples/graphql/local.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{BOTH_BEST_EFFORT_QP}"); + router.update_config(&config).await; + router + .assert_log_contains( + "Falling back to the legacy query planner: \ + failed to initialize the query planner: \ + Supergraphs composed with federation version 1 are not supported. \ + Please recompose your supergraph with federation version 2 or greater", + ) + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="fed1",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn fed2_schema_with_new_qp() { + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{NEW_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("../examples/graphql/supergraph-fed2.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_is_success="true",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn progressive_override_with_legacy_qp() { + if !graph_os_enabled() { + return; + } + let mut router = IntegrationTest::builder() + .config(LEGACY_QP) + .supergraph("src/plugins/progressive_override/testdata/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn progressive_override_with_new_qp() { + if !graph_os_enabled() { + return; + } + let mut router = IntegrationTest::builder() + .config(NEW_QP) + .supergraph("src/plugins/progressive_override/testdata/supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + failed to initialize the query planner: \ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with progressive overrides. \ + Remove uses of progressive overrides to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn progressive_override_with_legacy_qp_change_to_new_qp_keeps_old_config() { + if !graph_os_enabled() { + return; + } + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("src/plugins/progressive_override/testdata/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{NEW_QP}"); + router.update_config(&config).await; + router + .assert_log_contains("error while reloading, continuing with previous configuration") + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="overrides",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn progressive_override_with_legacy_qp_reload_to_both_best_effort_keep_previous_config() { + if !graph_os_enabled() { + return; + } + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("src/plugins/progressive_override/testdata/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{BOTH_BEST_EFFORT_QP}"); + router.update_config(&config).await; + router + .assert_log_contains( + "Falling back to the legacy query planner: \ + failed to initialize the query planner: \ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with progressive overrides. \ + Remove uses of progressive overrides to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.", + ) + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="overrides",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn context_with_legacy_qp() { + if !graph_os_enabled() { + return; + } + let mut router = IntegrationTest::builder() + .config(PROMETHEUS_METRICS_CONFIG) + .supergraph("tests/fixtures/set_context/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn context_with_new_qp() { + if !graph_os_enabled() { + return; + } + let mut router = IntegrationTest::builder() + .config(NEW_QP) + .supergraph("tests/fixtures/set_context/supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + failed to initialize the query planner: \ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with `@context`. \ + Remove uses of `@context` to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn context_with_legacy_qp_change_to_new_qp_keeps_old_config() { + if !graph_os_enabled() { + return; + } + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("tests/fixtures/set_context/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{NEW_QP}"); + router.update_config(&config).await; + router + .assert_log_contains("error while reloading, continuing with previous configuration") + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="context",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn context_with_legacy_qp_reload_to_both_best_effort_keep_previous_config() { + if !graph_os_enabled() { + return; + } + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("tests/fixtures/set_context/supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router.execute_default_query().await; + + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{BOTH_BEST_EFFORT_QP}"); + router.update_config(&config).await; + router + .assert_log_contains( + "Falling back to the legacy query planner: \ + failed to initialize the query planner: \ + `experimental_query_planner_mode: new` or `both` cannot yet \ + be used with `@context`. \ + Remove uses of `@context` to try the experimental query planner, \ + otherwise switch back to `legacy` or `both_best_effort`.", + ) + .await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_error_kind="context",init_is_success="false",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn invalid_schema_with_legacy_qp_fails_startup() { + let mut router = IntegrationTest::builder() + .config(LEGACY_QP) + .supergraph("tests/fixtures/broken-supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + Federation error: Invalid supergraph: must be a core schema", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn invalid_schema_with_new_qp_fails_startup() { + let mut router = IntegrationTest::builder() + .config(NEW_QP) + .supergraph("tests/fixtures/broken-supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + Federation error: Invalid supergraph: must be a core schema", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn invalid_schema_with_both_qp_fails_startup() { + let mut router = IntegrationTest::builder() + .config(BOTH_QP) + .supergraph("tests/fixtures/broken-supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + Federation error: Invalid supergraph: must be a core schema", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn invalid_schema_with_both_best_effort_qp_fails_startup() { + let mut router = IntegrationTest::builder() + .config(BOTH_BEST_EFFORT_QP) + .supergraph("tests/fixtures/broken-supergraph.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "could not create router: \ + Federation error: Invalid supergraph: must be a core schema", + ) + .await; + router.assert_shutdown().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn valid_schema_with_new_qp_change_to_broken_schema_keeps_old_config() { + let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{NEW_QP}"); + let mut router = IntegrationTest::builder() + .config(config) + .supergraph("tests/fixtures/valid-supergraph.graphql") + .build() + .await; + router.start().await; + router.assert_started().await; + router + .assert_metrics_contains( + r#"apollo_router_lifecycle_query_planner_init_total{init_is_success="true",otel_scope_name="apollo/router"} 1"#, + None, + ) + .await; + router.execute_default_query().await; + router + .update_schema(&PathBuf::from("tests/fixtures/broken-supergraph.graphql")) + .await; + router + .assert_log_contains("error while reloading, continuing with previous configuration") + .await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index 110b850857..f95ba37c65 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -26,7 +26,7 @@ async fn query_planner_cache() -> Result<(), BoxError> { // 2. run `docker compose up -d` and connect to the redis container by running `docker-compose exec redis /bin/bash`. // 3. Run the `redis-cli` command from the shell and start the redis `monitor` command. // 4. Run this test and yank the updated cache key from the redis logs. - let known_cache_key = "plan:0:v2.8.5:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:3106dfc3339d8c3f3020434024bff0f566a8be5995199954db5a7525a7d7e67a"; + let known_cache_key = "plan:0:v2.9.0:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:8ecc6cbc98bab2769e6666a72ba47a4ebd90e6f62256ddcbdc7f352a805e0fe6"; let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap(); let client = RedisClient::new(config, None, None, None); @@ -360,13 +360,17 @@ async fn entity_cache() -> Result<(), BoxError> { .configuration_json(json!({ "preview_entity_cache": { "enabled": true, - "redis": { - "urls": ["redis://127.0.0.1:6379"], - "ttl": "2s" + "invalidation": { + "listen": "127.0.0.1:4000", + "path": "/invalidation" }, "subgraph": { "all": { - "enabled": false + "enabled": false, + "redis": { + "urls": ["redis://127.0.0.1:6379"], + "ttl": "2s" + }, }, "subgraphs": { "products": { @@ -407,13 +411,13 @@ async fn entity_cache() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("version:1.0:subgraph:products:type:Query:hash:0df945dc1bc08f7fc02e8905b4c72aa9112f29bb7a214e4a38d199f0aa635b48:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:products:type:Query:hash:0b4d791a3403d76643db0a9e4a8d304b1cd1f8c4ab68cb58ab7ccdc116a1da1c:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap()); - let s: String = client.get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c").await.unwrap(); + let s: String = client.get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:04c47a3b857394fb0feef5b999adc073b8ab7416e3bc871f54c0b885daae8359:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c").await.unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap()); @@ -470,13 +474,17 @@ async fn entity_cache() -> Result<(), BoxError> { .configuration_json(json!({ "preview_entity_cache": { "enabled": true, - "redis": { - "urls": ["redis://127.0.0.1:6379"], - "ttl": "2s" + "invalidation": { + "listen": "127.0.0.1:4000", + "path": "/invalidation" }, "subgraph": { "all": { "enabled": false, + "redis": { + "urls": ["redis://127.0.0.1:6379"], + "ttl": "2s" + }, }, "subgraphs": { "products": { @@ -517,7 +525,7 @@ async fn entity_cache() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("version:1.0:subgraph:reviews:type:Product:entity:d9a4cd73308dd13ca136390c10340823f94c335b9da198d2339c886c738abf0d:hash:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:reviews:type:Product:entity:d9a4cd73308dd13ca136390c10340823f94c335b9da198d2339c886c738abf0d:hash:04c47a3b857394fb0feef5b999adc073b8ab7416e3bc871f54c0b885daae8359:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -673,13 +681,17 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { .configuration_json(json!({ "preview_entity_cache": { "enabled": true, - "redis": { - "urls": ["redis://127.0.0.1:6379"], - "ttl": "2s" + "invalidation": { + "listen": "127.0.0.1:4000", + "path": "/invalidation" }, "subgraph": { "all": { "enabled": false, + "redis": { + "urls": ["redis://127.0.0.1:6379"], + "ttl": "2s" + }, }, "subgraphs": { "products": { @@ -734,7 +746,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("version:1.0:subgraph:products:type:Query:hash:0df945dc1bc08f7fc02e8905b4c72aa9112f29bb7a214e4a38d199f0aa635b48:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:products:type:Query:hash:0b4d791a3403d76643db0a9e4a8d304b1cd1f8c4ab68cb58ab7ccdc116a1da1c:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -755,7 +767,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { ); let s: String = client - .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:04c47a3b857394fb0feef5b999adc073b8ab7416e3bc871f54c0b885daae8359:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -799,7 +811,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:3b6ef3c8fd34c469d59f513942c5f4c8f91135e828712de2024e2cd4613c50ae:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:f7d6d3af2706afe346e3d5fd353e61bd186d2fc64cb7b3c13a62162189519b5f:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -909,7 +921,7 @@ async fn connection_failure_blocks_startup() { async fn query_planner_redis_update_query_fragments() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_query_fragments.router.yaml"), - "plan:0:v2.8.5:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:9054d19854e1d9e282ac7645c612bc70b8a7143d43b73d44dade4a5ec43938b4", + "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:cda2b4e476fdce9c4c435627b26cedd177cfbe04ab335fc3e3d895c0d79d965e", ) .await; } @@ -928,7 +940,7 @@ async fn query_planner_redis_update_planner_mode() { async fn query_planner_redis_update_introspection() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_introspection.router.yaml"), - "plan:0:v2.8.5:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:04b3051125b5994fba6b0a22b2d8b4246cadc145be030c491a3431655d2ba07a", + "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:259dd917e4de09b5469629849b91e8ffdfbed2587041fad68b5963369bb13283", ) .await; } @@ -937,7 +949,7 @@ async fn query_planner_redis_update_introspection() { async fn query_planner_redis_update_defer() { test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_defer.router.yaml"), - "plan:0:v2.8.5:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:3b7241b0db2cd878b79c0810121953ba544543f3cb2692aaf1a59184470747b0", + "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:e4376fe032160ce16399e520c6e815da6cb5cf4dc94a06175b86b64a9bf80201", ) .await; } @@ -948,7 +960,7 @@ async fn query_planner_redis_update_type_conditional_fetching() { include_str!( "fixtures/query_planner_redis_config_update_type_conditional_fetching.router.yaml" ), - "plan:0:v2.8.5:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0ca695a8c4c448b65fa04229c663f44150af53b184ebdcbb0ad6862290efed76", + "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:83d899fcb42d2202c39fc8350289b8247021da00ecf3d844553c190c49410507", ) .await; } @@ -959,7 +971,7 @@ async fn query_planner_redis_update_reuse_query_fragments() { include_str!( "fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml" ), - "plan:0:v2.8.5:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:f7c04319556397ec4b550aa5aaa96c73689cee09026b661b6a9fc20b49e6fa77", + "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:d48f92f892bd67071694c0538a7e657ff8e0c52e1718f475190c17b503e9e8c3", ) .await; } @@ -982,7 +994,7 @@ async fn test_redis_query_plan_config_update(updated_config: &str, new_cache_key router.assert_started().await; router.clear_redis_cache().await; - let starting_key = "plan:0:v2.8.5:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:4a5827854a6d2efc85045f0d5bede402e15958390f1073d2e77df56188338e5a"; + let starting_key = "plan:0:v2.9.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0966f1528d47cee30b6140a164be16148dd360ee10b87744991e9d35af8e8a27"; router.execute_default_query().await; router.assert_redis_cache_contains(starting_key, None).await; router.update_config(updated_config).await; diff --git a/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap b/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap index f90305be82..d7330676f2 100644 --- a/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap +++ b/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap @@ -13,7 +13,7 @@ expression: query_plan "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "121b9859eba2d8fa6dde0a54b6e3781274cf69f7ffb0af912e92c01c6bfff6ca", + "schemaAwareHash": "d38dcce02eea33b3834447eefedabb09d3b14f3b01ad512e881f9e65137f0565", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_rate_limit-2.snap b/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_rate_limit-2.snap index 584b125252..07df294289 100644 --- a/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_rate_limit-2.snap +++ b/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_rate_limit-2.snap @@ -2,4 +2,4 @@ source: apollo-router/tests/integration/traffic_shaping.rs expression: response --- -"{\"data\":null,\"errors\":[{\"message\":\"Your request has been rate limited\",\"extensions\":{\"code\":\"REQUEST_RATE_LIMITED\"}}]}" +"{\"data\":null,\"errors\":[{\"message\":\"Your request has been rate limited\",\"path\":[],\"extensions\":{\"code\":\"REQUEST_RATE_LIMITED\"}}]}" diff --git a/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_timeout.snap b/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_timeout.snap index 671e207784..407674dfff 100644 --- a/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_timeout.snap +++ b/apollo-router/tests/integration/snapshots/integration_tests__integration__traffic_shaping__subgraph_timeout.snap @@ -1,5 +1,5 @@ --- source: apollo-router/tests/integration/traffic_shaping.rs -expression: response.text().await? +expression: response --- -"{\"data\":null,\"errors\":[{\"message\":\"Request timed out\",\"extensions\":{\"code\":\"REQUEST_TIMEOUT\"}}]}" +"{\"data\":null,\"errors\":[{\"message\":\"Request timed out\",\"path\":[],\"extensions\":{\"code\":\"REQUEST_TIMEOUT\"}}]}" diff --git a/apollo-router/tests/integration/subgraph_response.rs b/apollo-router/tests/integration/subgraph_response.rs index 5e6e831d3c..2dd8fc68d6 100644 --- a/apollo-router/tests/integration/subgraph_response.rs +++ b/apollo-router/tests/integration/subgraph_response.rs @@ -118,6 +118,7 @@ async fn test_invalid_error_locations() -> Result<(), BoxError> { "data": null, "errors": [{ "message":"service 'products' response was malformed: invalid `locations` within error: invalid type: boolean `true`, expected u32", + "path": [], "extensions": { "service": "products", "reason": "invalid `locations` within error: invalid type: boolean `true`, expected u32", diff --git a/apollo-router/tests/integration/telemetry/fixtures/json.uuid.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/json.uuid.router.yaml new file mode 100644 index 0000000000..7b9a97af99 --- /dev/null +++ b/apollo-router/tests/integration/telemetry/fixtures/json.uuid.router.yaml @@ -0,0 +1,30 @@ +telemetry: + instrumentation: + spans: + mode: spec_compliant + events: + router: + # Standard events + request: info + response: info + error: info + exporters: + tracing: + propagation: + trace_context: true + jaeger: true + jaeger: + enabled: true + batch_processor: + scheduled_delay: 100ms + agent: + endpoint: default + logging: + experimental_when_header: + - name: content-type + value: "application/json" + body: true + stdout: + format: + json: + display_trace_id: uuid diff --git a/apollo-router/tests/integration/telemetry/fixtures/text.uuid.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/text.uuid.router.yaml new file mode 100644 index 0000000000..13b6084b49 --- /dev/null +++ b/apollo-router/tests/integration/telemetry/fixtures/text.uuid.router.yaml @@ -0,0 +1,31 @@ +telemetry: + instrumentation: + spans: + mode: spec_compliant + events: + router: + # Standard events + request: info + response: info + error: info + exporters: + tracing: + propagation: + trace_context: true + jaeger: true + jaeger: + enabled: true + batch_processor: + scheduled_delay: 100ms + agent: + endpoint: default + logging: + experimental_when_header: + - name: content-type + value: "application/json" + body: true + stdout: + format: + text: + display_trace_id: uuid + display_span_id: true diff --git a/apollo-router/tests/integration/telemetry/fixtures/trace_id_via_header.router.yaml b/apollo-router/tests/integration/telemetry/fixtures/trace_id_via_header.router.yaml new file mode 100644 index 0000000000..a213522b36 --- /dev/null +++ b/apollo-router/tests/integration/telemetry/fixtures/trace_id_via_header.router.yaml @@ -0,0 +1,33 @@ +telemetry: + + instrumentation: + + spans: + + mode: spec_compliant + router: + attributes: + # This should match the trace ID in the request + id_from_header: + trace_id: open_telemetry + events: + router: + # Standard events + request: info + + apollo: + field_level_instrumentation_sampler: always_off + exporters: + tracing: + propagation: + request: + header_name: "id_from_header" + logging: + stdout: + format: + text: + display_trace_id: true + display_span_id: true + ansi_escape_codes: false + display_current_span: true + diff --git a/apollo-router/tests/integration/telemetry/logging.rs b/apollo-router/tests/integration/telemetry/logging.rs index c0d8998f51..74cecef1c5 100644 --- a/apollo-router/tests/integration/telemetry/logging.rs +++ b/apollo-router/tests/integration/telemetry/logging.rs @@ -1,5 +1,6 @@ use serde_json::json; use tower::BoxError; +use uuid::Uuid; use crate::integration::common::graph_os_enabled; use crate::integration::common::IntegrationTest; @@ -8,6 +9,7 @@ use crate::integration::common::Telemetry; #[tokio::test(flavor = "multi_thread")] async fn test_json() -> Result<(), BoxError> { if !graph_os_enabled() { + eprintln!("test skipped"); return Ok(()); } @@ -34,6 +36,66 @@ async fn test_json() -> Result<(), BoxError> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn test_json_uuid_format() -> Result<(), BoxError> { + if !graph_os_enabled() { + eprintln!("test skipped"); + return Ok(()); + } + + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Jaeger) + .config(include_str!("fixtures/json.uuid.router.yaml")) + .build() + .await; + + router.start().await; + router.assert_started().await; + + let query = json!({"query":"query ExampleQuery {topProducts{name}}","variables":{}}); + router.execute_query(&query).await; + router.assert_log_contains("trace_id").await; + let (trace_id, _) = router.execute_query(&query).await; + router + .assert_log_contains(&format!("{}", Uuid::from_bytes(trace_id.to_bytes()))) + .await; + router.execute_query(&query).await; + router.assert_log_contains("span_id").await; + router.graceful_shutdown().await; + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_text_uuid_format() -> Result<(), BoxError> { + if !graph_os_enabled() { + eprintln!("test skipped"); + return Ok(()); + } + + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Jaeger) + .config(include_str!("fixtures/text.uuid.router.yaml")) + .build() + .await; + + router.start().await; + router.assert_started().await; + + let query = json!({"query":"query ExampleQuery {topProducts{name}}","variables":{}}); + router.execute_query(&query).await; + router.assert_log_contains("trace_id").await; + let (trace_id, _) = router.execute_query(&query).await; + router + .assert_log_contains(&format!("{}", Uuid::from_bytes(trace_id.to_bytes()))) + .await; + router.execute_query(&query).await; + router.assert_log_contains("span_id").await; + router.graceful_shutdown().await; + + Ok(()) +} + #[tokio::test(flavor = "multi_thread")] async fn test_json_sampler_off() -> Result<(), BoxError> { if !graph_os_enabled() { @@ -66,6 +128,7 @@ async fn test_json_sampler_off() -> Result<(), BoxError> { #[tokio::test(flavor = "multi_thread")] async fn test_text() -> Result<(), BoxError> { if !graph_os_enabled() { + eprintln!("test skipped"); return Ok(()); } @@ -93,6 +156,7 @@ async fn test_text() -> Result<(), BoxError> { #[tokio::test(flavor = "multi_thread")] async fn test_text_sampler_off() -> Result<(), BoxError> { if !graph_os_enabled() { + eprintln!("test skipped"); return Ok(()); } diff --git a/apollo-router/tests/integration/telemetry/mod.rs b/apollo-router/tests/integration/telemetry/mod.rs index 0a31187c58..8df0a1d753 100644 --- a/apollo-router/tests/integration/telemetry/mod.rs +++ b/apollo-router/tests/integration/telemetry/mod.rs @@ -5,5 +5,6 @@ mod jaeger; mod logging; mod metrics; mod otlp; +mod propagation; #[cfg(any(not(feature = "ci"), all(target_arch = "x86_64", target_os = "linux")))] mod zipkin; diff --git a/apollo-router/tests/integration/telemetry/propagation.rs b/apollo-router/tests/integration/telemetry/propagation.rs new file mode 100644 index 0000000000..e458f1986c --- /dev/null +++ b/apollo-router/tests/integration/telemetry/propagation.rs @@ -0,0 +1,40 @@ +use serde_json::json; +use tower::BoxError; + +use crate::integration::common::graph_os_enabled; +use crate::integration::common::IntegrationTest; +use crate::integration::common::Telemetry; + +#[tokio::test(flavor = "multi_thread")] +async fn test_trace_id_via_header() -> Result<(), BoxError> { + if !graph_os_enabled() { + eprintln!("test skipped"); + return Ok(()); + } + async fn make_call(router: &mut IntegrationTest, trace_id: &str) { + let _ = router.execute_query_with_headers(&json!({"query":"query {topProducts{name, name, name, name, name, name, name, name, name, name}}","variables":{}}), + [("id_from_header".to_string(), trace_id.to_string())].into()).await; + } + + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::None) + .config(include_str!("fixtures/trace_id_via_header.router.yaml")) + .build() + .await; + + let trace_id = "00000000000000000000000000000001"; + router.start().await; + router.assert_started().await; + make_call(&mut router, trace_id).await; + router + .assert_log_contains(&format!("trace_id: {}", trace_id)) + .await; + + make_call(&mut router, trace_id).await; + router + .assert_log_contains(&format!("\"id_from_header\": \"{}\"", trace_id)) + .await; + + router.graceful_shutdown().await; + Ok(()) +} diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml index b297fee443..e283bbdace 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/configuration.yaml @@ -12,6 +12,12 @@ preview_entity_cache: all: enabled: true subgraphs: - reviews: + invalidation-entity-key-reviews: ttl: 120s - enabled: true \ No newline at end of file + enabled: true + +telemetry: + exporters: + logging: + stdout: + format: text \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json similarity index 72% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json index b505259570..1bb1bc0210 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/skipped.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json @@ -7,11 +7,14 @@ "schema_path": "./supergraph.graphql", "configuration_path": "./configuration.yaml", "subgraphs": { - "products": { + "invalidation-entity-key-products": { "requests": [ { "request": { - "body": {"query":"{topProducts{__typename upc}}"} + "body": { + "query":"query InvalidationEntityKey__invalidation_entity_key_products__0{topProducts{__typename upc}}", + "operationName": "InvalidationEntityKey__invalidation_entity_key_products__0" + } }, "response": { "headers": { @@ -23,12 +26,13 @@ } ] }, - "reviews": { + "invalidation-entity-key-reviews": { "requests": [ { "request": { "body": { - "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "operationName": "InvalidationEntityKey__invalidation_entity_key_reviews__1", "variables":{"representations":[{"upc":"0","__typename":"Product"},{"upc":"1","__typename":"Product"}]} } }, @@ -59,7 +63,7 @@ { "type": "Request", "request": { - "query": "{ topProducts { reviews { body } } }" + "query": "query InvalidationEntityKey { topProducts { reviews { body } } }" }, "expected_response": { "data":{ @@ -81,11 +85,13 @@ { "type": "ReloadSubgraphs", "subgraphs": { - "reviews": { + "invalidation-entity-key-reviews": { "requests": [ { "request": { - "body": {"query":"mutation{invalidateProductReview}"} + "body": { + "query":"mutation InvalidationEntityKey__invalidation_entity_key_reviews__0{invalidateProductReview}" + } }, "response": { "headers": { @@ -96,7 +102,7 @@ "extensions": { "invalidation": [{ "kind": "entity", - "subgraph": "reviews", + "subgraph": "invalidation-entity-key-reviews", "type": "Product", "key": { "upc": "1" @@ -109,7 +115,7 @@ { "request": { "body": { - "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", "variables":{"representations":[{"upc":"1","__typename":"Product"}]} } }, @@ -129,7 +135,7 @@ { "type": "Request", "request": { - "query": "{ topProducts { reviews { body } } }" + "query": "query InvalidationEntityKey { topProducts { reviews { body } } }" }, "expected_response": { "data":{ @@ -151,7 +157,7 @@ { "type": "Request", "request": { - "query": "mutation { invalidateProductReview }" + "query": "mutation InvalidationEntityKey { invalidateProductReview }" }, "expected_response": { "data":{ @@ -162,20 +168,28 @@ { "type": "Request", "request": { - "query": "{ topProducts { reviews { body } } }" + "query": "query InvalidationEntityKey { topProducts { reviews { body } } }" }, "expected_response":{ "data":{ - "topProducts":[{"reviews":null},{"reviews":null}] + "topProducts":[ + {"reviews": [{ + "body": "A" + },{ + "body": "B" + }]}, + {"reviews":null}] }, "errors":[ { - "message":"HTTP fetch failed from 'reviews': 500: Internal Server Error", - "extensions":{"code":"SUBREQUEST_HTTP_ERROR","service":"reviews","reason":"500: Internal Server Error","http":{"status":500}} + "message":"HTTP fetch failed from 'invalidation-entity-key-reviews': 500: Internal Server Error", + "path": ["topProducts", 1], + "extensions":{"code":"SUBREQUEST_HTTP_ERROR","service":"invalidation-entity-key-reviews","reason":"500: Internal Server Error","http":{"status":500}} }, { - "message":"service 'reviews' response was malformed: {}", - "extensions":{"service":"reviews","reason":"{}","code":"SUBREQUEST_MALFORMED_RESPONSE"} + "message":"service 'invalidation-entity-key-reviews' response was malformed: {}", + "path": ["topProducts", 1], + "extensions":{"service":"invalidation-entity-key-reviews","reason":"{}","code":"SUBREQUEST_MALFORMED_RESPONSE"} } ] } @@ -183,12 +197,12 @@ { "type": "ReloadSubgraphs", "subgraphs": { - "reviews": { + "invalidation-entity-key-reviews": { "requests": [ { "request": { "body": { - "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", + "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{body}}}}", "variables":{"representations":[{"upc":"1","__typename":"Product"}]} } }, @@ -213,7 +227,7 @@ { "type": "Request", "request": { - "query": "{ topProducts { reviews { body } } }" + "query": "query InvalidationEntityKey { topProducts { reviews { body } } }" }, "expected_response": { "data":{ diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql index 8f4b1aa05b..630e59c38b 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/supergraph.graphql @@ -37,10 +37,10 @@ enum core__Purpose { scalar join__FieldSet enum join__Graph { - ACCOUNTS @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") - INVENTORY @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") - PRODUCTS @join__graph(name: "products", url: "https://products.demo.starstuff.dev") - REVIEWS @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") + ACCOUNTS @join__graph(name: "invalidation-entity-key-accounts", url: "https://accounts.demo.starstuff.dev") + INVENTORY @join__graph(name: "invalidation-entity-key-inventory", url: "https://inventory.demo.starstuff.dev") + PRODUCTS @join__graph(name: "invalidation-entity-key-products", url: "https://products.demo.starstuff.dev") + REVIEWS @join__graph(name: "invalidation-entity-key-reviews", url: "https://reviews.demo.starstuff.dev") } type Mutation { updateMyAccount: User @join__field(graph: ACCOUNTS) diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/README.md b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/README.md similarity index 100% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/README.md rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/README.md diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/configuration.yaml similarity index 63% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/configuration.yaml rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/configuration.yaml index b297fee443..85e106df9f 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/configuration.yaml +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/configuration.yaml @@ -5,6 +5,9 @@ include_subgraph_errors: preview_entity_cache: enabled: true + invalidation: + listen: 127.0.0.1:4000 + path: /invalidation redis: urls: ["redis://localhost:6379",] @@ -14,4 +17,10 @@ preview_entity_cache: subgraphs: reviews: ttl: 120s - enabled: true \ No newline at end of file + enabled: true + +telemetry: + exporters: + logging: + stdout: + format: text \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/skipped.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/plan.json similarity index 77% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/skipped.json rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/plan.json index cadc7ac809..9bbbd1d90c 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/skipped.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/plan.json @@ -7,11 +7,11 @@ "schema_path": "./supergraph.graphql", "configuration_path": "./configuration.yaml", "subgraphs": { - "accounts": { + "invalidation-subgraph-name-accounts": { "requests": [ { "request": { - "body": {"query":"{me{name}}"} + "body": {"query":"query InvalidationSubgraphName__invalidation_subgraph_name_accounts__0{me{name}}"} }, "response": { "headers": { @@ -28,7 +28,7 @@ { "type": "Request", "request": { - "query": "{ me { name } }" + "query": "query InvalidationSubgraphName { me { name } }" }, "expected_response": { "data":{ @@ -41,11 +41,11 @@ { "type": "ReloadSubgraphs", "subgraphs": { - "accounts": { + "invalidation-subgraph-name-accounts": { "requests": [ { "request": { - "body": {"query":"mutation{updateMyAccount{name}}"} + "body": {"query":"mutation InvalidationSubgraphName__invalidation_subgraph_name_accounts__0{updateMyAccount{name}}"} }, "response": { "headers": { @@ -56,7 +56,7 @@ "extensions": { "invalidation": [{ "kind": "subgraph", - "subgraph": "accounts" + "subgraph": "invalidation-subgraph-name-accounts" }] } } @@ -69,7 +69,7 @@ { "type": "Request", "request": { - "query": "{ me { name } }" + "query": "query InvalidationSubgraphName { me { name } }" }, "expected_response": { "data":{ @@ -82,7 +82,7 @@ { "type": "Request", "request": { - "query": "mutation { updateMyAccount { name } }" + "query": "mutation InvalidationSubgraphName { updateMyAccount { name } }" }, "expected_response": { "data":{ @@ -95,15 +95,15 @@ { "type": "ReloadSubgraphs", "subgraphs": { - "accounts": { + "invalidation-subgraph-name-accounts": { "requests": [ { "request": { - "body": {"query":"{me{name}}"} + "body": {"query":"query InvalidationSubgraphName__invalidation_subgraph_name_accounts__0{me{name}}"} }, "response": { "headers": { - "Cache-Control": "public, max-age=10", + "Cache-Control": "no-store, max-age=0", "Content-Type": "application/json" }, "body": {"data": { "me": { "name": "invalidation-subgraph2" } } } @@ -116,7 +116,7 @@ { "type": "Request", "request": { - "query": "{ me { name } }" + "query": "query InvalidationSubgraphName{ me { name } }" }, "expected_response": { "data":{ diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/supergraph.graphql b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/supergraph.graphql new file mode 100644 index 0000000000..c8184433b1 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/supergraph.graphql @@ -0,0 +1,91 @@ + +schema + @core(feature: "https://specs.apollo.dev/core/v0.2"), + @core(feature: "https://specs.apollo.dev/join/v0.1", for: EXECUTION) + @core(feature: "https://specs.apollo.dev/inaccessible/v0.1", for: SECURITY) +{ + query: Query + mutation: Mutation +} + +directive @core(as: String, feature: String!, for: core__Purpose) repeatable on SCHEMA + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet) on FIELD_DEFINITION + +directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on OBJECT | INTERFACE + +directive @join__owner(graph: join__Graph!) on OBJECT | INTERFACE + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @tag(name: String!) repeatable on FIELD_DEFINITION | INTERFACE | OBJECT | UNION + +directive @inaccessible on OBJECT | FIELD_DEFINITION | INTERFACE | UNION + +enum core__Purpose { + """ + `EXECUTION` features provide metadata necessary to for operation execution. + """ + EXECUTION + + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY +} + +scalar join__FieldSet + +enum join__Graph { + ACCOUNTS @join__graph(name: "invalidation-subgraph-name-accounts", url: "https://accounts.demo.starstuff.dev") + INVENTORY @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") + PRODUCTS @join__graph(name: "products", url: "https://products.demo.starstuff.dev") + REVIEWS @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") +} +type Mutation { + updateMyAccount: User @join__field(graph: ACCOUNTS) + createProduct(name: String, upc: ID!): Product @join__field(graph: PRODUCTS) + createReview(body: String, id: ID!, upc: ID!): Review @join__field(graph: REVIEWS) +} + +type Product + @join__owner(graph: PRODUCTS) + @join__type(graph: PRODUCTS, key: "upc") + @join__type(graph: INVENTORY, key: "upc") + @join__type(graph: REVIEWS, key: "upc") +{ + inStock: Boolean @join__field(graph: INVENTORY) @tag(name: "private") @inaccessible + name: String @join__field(graph: PRODUCTS) + price: Int @join__field(graph: PRODUCTS) + reviews: [Review] @join__field(graph: REVIEWS) + reviewsForAuthor(authorID: ID!): [Review] @join__field(graph: REVIEWS) + shippingEstimate: Int @join__field(graph: INVENTORY, requires: "price weight") + upc: String! @join__field(graph: PRODUCTS) + weight: Int @join__field(graph: PRODUCTS) +} + +type Query { + me: User @join__field(graph: ACCOUNTS) + topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS) +} + +type Review + @join__owner(graph: REVIEWS) + @join__type(graph: REVIEWS, key: "id") +{ + author: User @join__field(graph: REVIEWS, provides: "username") + body: String @join__field(graph: REVIEWS) + id: ID! @join__field(graph: REVIEWS) + product: Product @join__field(graph: REVIEWS) +} + +type User + @join__owner(graph: ACCOUNTS) + @join__type(graph: ACCOUNTS, key: "id") + @join__type(graph: REVIEWS, key: "id") +{ + id: ID! @join__field(graph: ACCOUNTS) + name: String @join__field(graph: ACCOUNTS) + reviews: [Review] @join__field(graph: REVIEWS) + username: String @join__field(graph: ACCOUNTS) +} diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml index b297fee443..96577bbb28 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/configuration.yaml @@ -8,10 +8,23 @@ preview_entity_cache: redis: urls: ["redis://localhost:6379",] + invalidation: + # FIXME: right now we cannot configure it to use the same port used for the GraphQL endpoint if it is chosen at random + listen: 127.0.0.1:12345 + path: /invalidation-sample-subgraph-type subgraph: all: enabled: true + invalidation: + enabled: true + shared_key: "1234" subgraphs: reviews: ttl: 120s - enabled: true \ No newline at end of file + enabled: true + +telemetry: + exporters: + logging: + stdout: + format: text \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/skipped.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/plan.json similarity index 65% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/skipped.json rename to apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/plan.json index f6996f21b8..72e39a7b80 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/skipped.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/plan.json @@ -7,11 +7,11 @@ "schema_path": "./supergraph.graphql", "configuration_path": "./configuration.yaml", "subgraphs": { - "accounts": { + "invalidation-subgraph-type-accounts": { "requests": [ { "request": { - "body": {"query":"query InvalidationSubgraphType__accounts__0{me{name id}}","operationName":"InvalidationSubgraphType__accounts__0"} + "body": {"query":"query InvalidationSubgraphType__invalidation_subgraph_type_accounts__0{me{name id}}","operationName":"InvalidationSubgraphType__invalidation_subgraph_type_accounts__0"} }, "response": { "headers": { @@ -42,29 +42,8 @@ { "type": "ReloadSubgraphs", "subgraphs": { - "accounts": { - "requests": [ - { - "request": { - "body": {"query":"mutation{updateMyAccount{name}}"} - }, - "response": { - "headers": { - "Content-Type": "application/json" - }, - "body": { - "data": { "updateMyAccount": { "name": "invalidation-subgraph-type2" } }, - "extensions": { - "invalidation": [{ - "kind": "type", - "subgraph": "accounts", - "type": "Query" - }] - } - } - } - } - ] + "invalidation-subgraph-type-accounts": { + "requests": [] } } }, @@ -83,26 +62,28 @@ } }, { - "type": "Request", + "type": "EndpointRequest", + "url": "http://127.0.0.1:12345/invalidation-sample-subgraph-type", "request": { - "query": "mutation { updateMyAccount { name } }" - }, - "expected_response": { - "data":{ - "updateMyAccount":{ - "name":"invalidation-subgraph-type2" - } - } + "method": "POST", + "headers": { + "Authorization": "1234" + }, + "body": [{ + "kind": "type", + "subgraph": "invalidation-subgraph-type-accounts", + "type": "Query" + }] } }, { "type": "ReloadSubgraphs", "subgraphs": { - "accounts": { + "invalidation-subgraph-type-accounts": { "requests": [ { "request": { - "body": {"query":"query InvalidationSubgraphType__accounts__0{me{name id}}", "operationName":"InvalidationSubgraphType__accounts__0"} + "body": {"query":"query InvalidationSubgraphType__invalidation_subgraph_type_accounts__0{me{name id}}", "operationName":"InvalidationSubgraphType__invalidation_subgraph_type_accounts__0"} }, "response": { "headers": { @@ -130,6 +111,21 @@ } } }, + { + "type": "EndpointRequest", + "url": "http://127.0.0.1:12345/invalidation-sample-subgraph-type", + "request": { + "method": "POST", + "headers": { + "Authorization": "1234" + }, + "body": [{ + "kind": "type", + "subgraph": "invalidation-subgraph-type-accounts", + "type": "Query" + }] + } + }, { "type": "Stop" } diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/supergraph.graphql b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/supergraph.graphql index 1196414b6f..a9554a070d 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/supergraph.graphql +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/supergraph.graphql @@ -37,7 +37,7 @@ enum core__Purpose { scalar join__FieldSet enum join__Graph { - ACCOUNTS @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev") + ACCOUNTS @join__graph(name: "invalidation-subgraph-type-accounts", url: "https://accounts.demo.starstuff.dev") INVENTORY @join__graph(name: "inventory", url: "https://inventory.demo.starstuff.dev") PRODUCTS @join__graph(name: "products", url: "https://products.demo.starstuff.dev") REVIEWS @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev") diff --git a/apollo-router/tests/samples/enterprise/entity-cache/private/README.md b/apollo-router/tests/samples/enterprise/entity-cache/private/README.md new file mode 100644 index 0000000000..5e9504f9bb --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/private/README.md @@ -0,0 +1,3 @@ +# Entity cache: private data caching + +This tests private data caching in the entity cache: diff --git a/apollo-router/tests/samples/enterprise/entity-cache/private/configuration.yaml b/apollo-router/tests/samples/enterprise/entity-cache/private/configuration.yaml new file mode 100644 index 0000000000..65dd9ebad1 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/private/configuration.yaml @@ -0,0 +1,21 @@ +override_subgraph_url: + products: http://localhost:4005 +include_subgraph_errors: + all: true + +rhai: + scripts: "tests/samples/enterprise/entity-cache/private" + main: "private.rhai" + +preview_entity_cache: + enabled: true + redis: + urls: + ["redis://localhost:6379",] + subgraph: + all: + enabled: true + ttl: 10s + subgraphs: + accounts: + private_id: "user" \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/private/plan.json b/apollo-router/tests/samples/enterprise/entity-cache/private/plan.json new file mode 100644 index 0000000000..b466291766 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/private/plan.json @@ -0,0 +1,125 @@ +{ + "enterprise": true, + "redis": true, + "actions": [ + { + "type": "Start", + "schema_path": "./supergraph.graphql", + "configuration_path": "./configuration.yaml", + "subgraphs": { + "accounts": { + "requests": [ + { + "request": { + "body": {"query":"query private__accounts__0{me{name}}"} + }, + "response": { + "headers": { + "Cache-Control": "private, max-age=10", + "Content-Type": "application/json" + }, + "body": {"data": { "me": { "name": "test" } } } + } + } + ] + } + } + }, + { + "type": "Request", + "request": { + "query": "query private { me { name } }" + }, + "headers": { + "x-user": "1" + }, + "expected_response": { + "data":{ + "me":{ + "name":"test" + } + } + } + }, + { + "type": "ReloadSubgraphs", + "subgraphs": { + "accounts": { + "requests": [ + { + "request": { + "body": {"query":"query private__accounts__0{me{name}}"} + }, + "response": { + "headers": { + "Cache-Control": "private, max-age=10", + "Content-Type": "application/json" + }, + "body": {"data": { "me": { "name": "test2" } } } + } + } + ] + } + } + }, + { + "type": "Request", + "request": { + "query": "query private { me { name } }" + }, + "headers": { + "x-user": "2" + }, + "expected_response": { + "data":{ + "me":{ + "name":"test2" + } + } + } + }, + { + "type": "ReloadSubgraphs", + "subgraphs": { + "accounts": { + "requests": [] + } + } + }, + { + "type": "Request", + "request": { + "query": "query private { me { name } }" + }, + "headers": { + "x-user": "1" + }, + "expected_response": { + "data":{ + "me":{ + "name":"test" + } + } + } + }, + { + "type": "Request", + "request": { + "query": "query private { me { name } }" + }, + "headers": { + "x-user": "2" + }, + "expected_response": { + "data":{ + "me":{ + "name":"test2" + } + } + } + }, + { + "type": "Stop" + } + ] +} \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/private/private.rhai b/apollo-router/tests/samples/enterprise/entity-cache/private/private.rhai new file mode 100644 index 0000000000..d45c355969 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/entity-cache/private/private.rhai @@ -0,0 +1,21 @@ +fn supergraph_service(service) { + const request_callback = Fn("process_request"); + service.map_request(request_callback); +} + +// This will convert all cookie pairs into headers. +// If you only wish to convert certain cookies, you +// can add logic to modify the processing. +fn process_request(request) { + +print(`headers: ${request.headers}`); + // Find our cookies + if "x-user" in request.headers { + let user = request.headers["x-user"]; + print(`found user {user}`); + + request.context["user"] = user; + } else { + print("no user found"); + } +} diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/supergraph.graphql b/apollo-router/tests/samples/enterprise/entity-cache/private/supergraph.graphql similarity index 100% rename from apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph/supergraph.graphql rename to apollo-router/tests/samples/enterprise/entity-cache/private/supergraph.graphql diff --git a/apollo-router/tests/samples/enterprise/query-planning-redis/configuration.yaml b/apollo-router/tests/samples/enterprise/query-planning-redis/configuration.yaml index 97518c7956..b00c6e7d58 100644 --- a/apollo-router/tests/samples/enterprise/query-planning-redis/configuration.yaml +++ b/apollo-router/tests/samples/enterprise/query-planning-redis/configuration.yaml @@ -8,3 +8,9 @@ supergraph: cache: redis: urls: ["redis://localhost:6379",] + +telemetry: + exporters: + logging: + stdout: + format: text \ No newline at end of file diff --git a/apollo-router/tests/samples_tests.rs b/apollo-router/tests/samples_tests.rs index 4507089a66..e3fd0d5264 100644 --- a/apollo-router/tests/samples_tests.rs +++ b/apollo-router/tests/samples_tests.rs @@ -165,17 +165,22 @@ impl TestExecution { Action::Request { request, query_path, + headers, expected_response, } => { self.request( request.clone(), query_path.as_deref(), + headers, expected_response, path, out, ) .await } + Action::EndpointRequest { url, request } => { + self.endpoint_request(url, request.clone(), out).await + } Action::Stop => self.stop(out).await, } } @@ -407,6 +412,7 @@ impl TestExecution { &mut self, mut request: Value, query_path: Option<&str>, + headers: &HashMap, expected_response: &Value, path: &Path, out: &mut String, @@ -431,7 +437,9 @@ impl TestExecution { } writeln!(out, "query: {}\n", serde_json::to_string(&request).unwrap()).unwrap(); - let (_, response) = router.execute_query(&request).await; + let (_, response) = router + .execute_query_with_headers(&request, headers.clone()) + .await; let body = response.bytes().await.map_err(|e| { writeln!(out, "could not get graphql response data: {e}").unwrap(); let f: Failed = out.clone().into(); @@ -464,13 +472,13 @@ impl TestExecution { writeln!(out, "assertion `left == right` failed").unwrap(); writeln!( out, - " left: {}", + "expected: {}", serde_json::to_string(&expected_response).unwrap() ) .unwrap(); writeln!( out, - "right: {}", + "received: {}", serde_json::to_string(&graphql_response).unwrap() ) .unwrap(); @@ -479,6 +487,43 @@ impl TestExecution { Ok(()) } + + async fn endpoint_request( + &mut self, + url: &url::Url, + request: HttpRequest, + out: &mut String, + ) -> Result<(), Failed> { + let client = reqwest::Client::new(); + + let mut builder = client.request( + request + .method + .as_deref() + .unwrap_or("POST") + .try_into() + .unwrap(), + url.clone(), + ); + for (name, value) in request.headers { + builder = builder.header(name, value); + } + + let request = builder.json(&request.body).build().unwrap(); + let response = client.execute(request).await.map_err(|e| { + writeln!( + out, + "could not send request to Router endpoint at {url}: {e}" + ) + .unwrap(); + let f: Failed = out.clone().into(); + f + })?; + + writeln!(out, "Endpoint returned: {response:?}").unwrap(); + + Ok(()) + } } fn open_file(path: &Path, out: &mut String) -> Result { @@ -535,8 +580,14 @@ enum Action { Request { request: Value, query_path: Option, + #[serde(default)] + headers: HashMap, expected_response: Value, }, + EndpointRequest { + url: url::Url, + request: HttpRequest, + }, Stop, } @@ -547,12 +598,12 @@ struct Subgraph { #[derive(Clone, Debug, Deserialize)] struct SubgraphRequestMock { - request: SubgraphRequest, - response: SubgraphResponse, + request: HttpRequest, + response: HttpResponse, } #[derive(Clone, Debug, Deserialize)] -struct SubgraphRequest { +struct HttpRequest { method: Option, path: Option, #[serde(default)] @@ -561,7 +612,7 @@ struct SubgraphRequest { } #[derive(Clone, Debug, Deserialize)] -struct SubgraphResponse { +struct HttpResponse { status: Option, #[serde(default)] headers: HashMap, diff --git a/apollo-router/tests/snapshots/set_context__set_context.snap b/apollo-router/tests/snapshots/set_context__set_context.snap index 2e11680753..18bfcbfcc9 100644 --- a/apollo-router/tests/snapshots/set_context__set_context.snap +++ b/apollo-router/tests/snapshots/set_context__set_context.snap @@ -34,7 +34,7 @@ expression: response "operationKind": "query", "operationName": "Query__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "d7cb2d1809789d49360ca0a60570555f83855f00547675f366915c9d9d90fef9", + "schemaAwareHash": "0163c552923b61fbde6dbcd879ffc2bb887175dc41bbf75a272875524e664e8d", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -80,7 +80,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "66b954f39aead8436321c671eb71e56ce15bbe0c7b82f06b2f8f70473ce1cb6e", + "schemaAwareHash": "e64d79913c52a4a8b95bfae44986487a1ac73118f27df3b602972a5cbb1f360a", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap b/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap index 703d8f9c59..099d36a7cb 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap @@ -25,7 +25,7 @@ expression: response "operationKind": "query", "operationName": "Query_fetch_dependent_failure__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "595c36c322602fefc4658fc0070973b51800c2d2debafae5571a7c9811d80745", + "schemaAwareHash": "6bcaa7a2d52a416d5278eaef6be102427f328b6916075f193c87459516a7fb6d", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -71,7 +71,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "37bef7ad43bb477cdec4dfc02446bd2e11a6919dc14ab90e266af85fefde4abd", + "schemaAwareHash": "0e56752501c8cbf53429c5aa2df95765ea2c7cba95db9213ce42918699232651", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_list.snap b/apollo-router/tests/snapshots/set_context__set_context_list.snap index 095326167e..d6dd312f0a 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_list.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_list.snap @@ -40,7 +40,7 @@ expression: response "operationKind": "query", "operationName": "Query__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "4f746b9319e3ca4f234269464b6815eb97782f2ffe36774b998e7fb78f30abef", + "schemaAwareHash": "805348468cefee0e3e745cb1bcec0ab4bd44ba55f6ddb91e52e0bc9b437c2dee", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -86,7 +86,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "66b954f39aead8436321c671eb71e56ce15bbe0c7b82f06b2f8f70473ce1cb6e", + "schemaAwareHash": "e64d79913c52a4a8b95bfae44986487a1ac73118f27df3b602972a5cbb1f360a", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap b/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap index e7fbee2a8b..c390c1db88 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap @@ -44,7 +44,7 @@ expression: response "operationKind": "query", "operationName": "QueryLL__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "babf88ea82c1330e535966572a55b03a2934097cd1cf905303b86ae7c197ccaf", + "schemaAwareHash": "53e85332dda78d566187c8886c207b81acfe3ab5ea0cafd3d71fb0b153026d80", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -90,7 +90,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "a9b24549250c12e38c398c32e9218134fab000be3b934ebc6bb38ea096343646", + "schemaAwareHash": "8ed6f85b6a77c293c97171b4a98f7dd563e98a737d4c3a9f5c54911248498ec7", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap b/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap index 8eaa5b0202..e9743a7902 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap @@ -32,7 +32,7 @@ expression: response "operationKind": "query", "operationName": "Query__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "d7cb2d1809789d49360ca0a60570555f83855f00547675f366915c9d9d90fef9", + "schemaAwareHash": "0163c552923b61fbde6dbcd879ffc2bb887175dc41bbf75a272875524e664e8d", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -78,7 +78,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "66b954f39aead8436321c671eb71e56ce15bbe0c7b82f06b2f8f70473ce1cb6e", + "schemaAwareHash": "e64d79913c52a4a8b95bfae44986487a1ac73118f27df3b602972a5cbb1f360a", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap b/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap index 1df052723e..3208b9bf0a 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap @@ -32,7 +32,7 @@ expression: response "operationKind": "query", "operationName": "Query_type_mismatch__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "7eae890e61f5ae512e112f5260abe0de3504041c92dbcc7aae0891c9bdf2222b", + "schemaAwareHash": "34c8f7c0f16220c5d4b589c8da405f49510e092756fa98629c73dea06fd7c243", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -78,7 +78,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "d8ea99348ab32931371c85c09565cfb728d2e48cf017201cd79cb9ef860eb9c2", + "schemaAwareHash": "feb578fd1831280f376d8961644e670dd8c3508d0a18fcf69a6de651e25e9ca8", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_union.snap b/apollo-router/tests/snapshots/set_context__set_context_union.snap index e382988a8b..6c995c1e8b 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_union.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_union.snap @@ -31,7 +31,7 @@ expression: response "operationKind": "query", "operationName": "QueryUnion__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "b9124cd1daa6e8347175ffe2108670a31c73cbc983e7812ee39f415235541005", + "schemaAwareHash": "3e768a1879f4ced427937721980688052b471dbfee0d653b212c85f2732591cc", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -80,7 +80,7 @@ expression: response "typeCondition": "V" } ], - "schemaAwareHash": "c50ca82d402a330c1b35a6d76332094c40b00d6dec6f6b2a9b0a32ced68f4e95", + "schemaAwareHash": "0c190d5db5b15f89fa45de844d2cec59725986e44fcb0dbdb9ab870a197cf026", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_1" @@ -134,7 +134,7 @@ expression: response "typeCondition": "V" } ], - "schemaAwareHash": "ec99886497fee9b4f13565e19cadb13ae85c83de93acb53f298944b7a29e630e", + "schemaAwareHash": "2d7376a8d1f7f2a929361e838bb0435ed4c4a6194fa8754af52d4b6dc7140508", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_1" diff --git a/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap b/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap index 605fd4570a..49dcf6bf9b 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap @@ -34,7 +34,7 @@ expression: response "operationKind": "query", "operationName": "Query_fetch_failure__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "1813ba1c272be0201096b4c4c963a07638e4f4b4ac1b97e0d90d634f2fcbac11", + "schemaAwareHash": "84a7305d62d79b5bbca976c5522d6b32c5bbcbf76b495e4430f9cdcb51c80a57", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -73,7 +73,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "1fdff97ad7facf07690c3e75e3dc7f1b11ff509268ef999250912a728e7a94c9", + "schemaAwareHash": "acb960692b01a756fcc627cafef1c47ead8afa60fa70828e5011ba9f825218ab", "serviceName": "Subgraph2", "variableUsages": [] }, @@ -125,7 +125,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "c9c571eac5df81ff34e5e228934d029ed322640c97ab6ad061cbee3cd81040dc", + "schemaAwareHash": "9fd65f6f213899810bce20180de6754354a25dc3c1bc97d0b7214a177cf8b0bb", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_with_null.snap b/apollo-router/tests/snapshots/set_context__set_context_with_null.snap index 1e361f0a83..badc32bc8a 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_with_null.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_with_null.snap @@ -29,7 +29,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "19bd66a3ecc2d9495dffce2279774de3275cb027254289bb61b0c1937a7738b4", + "schemaAwareHash": "4c0c9f83a57e9a50ff1f6dd601ec0a1588f1485d5cfb1015822af4017263e807", "authorization": { "is_authenticated": false, "scopes": [], @@ -82,7 +82,7 @@ expression: response "renameKeyTo": "contextualArgument_1_0" } ], - "schemaAwareHash": "010ba25ca76f881bd9f0d5e338f9c07829d4d00e183828b6577d593aea0cf21e", + "schemaAwareHash": "8db802e78024d406645f1ddc8972255e917bc738bfbed281691a45e34c92debb", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_disabled.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_disabled.snap index 84b137aa01..224cd2fb09 100644 --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_disabled.snap +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_disabled.snap @@ -79,7 +79,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "0144f144d271437ed45f9d20706be86ffbf1e124d77c7add3db17d4a1498ce97", + "schemaAwareHash": "5201830580c9c5fadd9c59aea072878f84465c1ae9d905207fa281aa7c1d5340", "authorization": { "is_authenticated": false, "scopes": [], @@ -137,7 +137,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "23759b36e5149924c757a8b9586adec2c0f6be04ecdf2c3c3ea277446daa690b", + "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled.snap index e41aeefee5..da66cee5c2 100644 --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled.snap +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled.snap @@ -79,7 +79,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "0144f144d271437ed45f9d20706be86ffbf1e124d77c7add3db17d4a1498ce97", + "schemaAwareHash": "5201830580c9c5fadd9c59aea072878f84465c1ae9d905207fa281aa7c1d5340", "authorization": { "is_authenticated": false, "scopes": [], @@ -141,7 +141,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "23759b36e5149924c757a8b9586adec2c0f6be04ecdf2c3c3ea277446daa690b", + "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4", "authorization": { "is_authenticated": false, "scopes": [], @@ -201,7 +201,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "8ee58ad8b4823bcbda9126d2565e1cb04bf91ff250b1098476a1d7614a870121", + "schemaAwareHash": "7e6f6850777335eb1421a30a45f6888bb9e5d0acf8f55d576d55d1c4b7d23ec7", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_generate_query_fragments.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_generate_query_fragments.snap index d92517b39d..e5e2cc616a 100644 --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_generate_query_fragments.snap +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_generate_query_fragments.snap @@ -79,7 +79,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "844dc4e409cdca1334abe37c347bd4e330123078dd7e65bda8dbb57ea5bdf59c", + "schemaAwareHash": "0e1644746fe4beab7def35ec8cc12bde39874c6bb8b9dfd928456196b814a111", "authorization": { "is_authenticated": false, "scopes": [], @@ -141,7 +141,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "ad82ce0af279c6a012d6b349ff823ba1467902223312aed1cdfc494ec3100b3e", + "schemaAwareHash": "6510f6b9672829bd9217618b78ef6f329fbddb125f88184d04e6faaa982ff8bb", "authorization": { "is_authenticated": false, "scopes": [], @@ -201,7 +201,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "7c267302cf4a44a4463820237830155ab50be32c8860371d8a5c8ca905476360", + "schemaAwareHash": "6bc34c108f7cf81896971bffad76dc5275d46231b4dfe492ccc205dda9a4aa16", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list.snap index acffc62599..9d70336225 100644 --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list.snap +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list.snap @@ -141,7 +141,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "1343b4972ec8be54afe990c69711ce790992a814f9654e34e2ee2b25e4097e45", + "schemaAwareHash": "51a7aadec14b66d9f6c737be7418bac0be1af89fcc55dac55d9e9b125bc3682d", "authorization": { "is_authenticated": false, "scopes": [], @@ -204,7 +204,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "23759b36e5149924c757a8b9586adec2c0f6be04ecdf2c3c3ea277446daa690b", + "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4", "authorization": { "is_authenticated": false, "scopes": [], @@ -265,7 +265,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "8ee58ad8b4823bcbda9126d2565e1cb04bf91ff250b1098476a1d7614a870121", + "schemaAwareHash": "7e6f6850777335eb1421a30a45f6888bb9e5d0acf8f55d576d55d1c4b7d23ec7", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list_of_list.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list_of_list.snap index 2b8feaafc3..5a6a4b30bc 100644 --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list_of_list.snap +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_list_of_list_of_list.snap @@ -145,7 +145,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "3698f4e74ead34f43a949e1e8459850337a1a07245f8ed627b9203904b4cfff4", + "schemaAwareHash": "e6f45a784fb669930586f13fc587f55798089a87ee4b23a7d1736e0516367a6a", "authorization": { "is_authenticated": false, "scopes": [], @@ -209,7 +209,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "23759b36e5149924c757a8b9586adec2c0f6be04ecdf2c3c3ea277446daa690b", + "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4", "authorization": { "is_authenticated": false, "scopes": [], @@ -271,7 +271,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "8ee58ad8b4823bcbda9126d2565e1cb04bf91ff250b1098476a1d7614a870121", + "schemaAwareHash": "7e6f6850777335eb1421a30a45f6888bb9e5d0acf8f55d576d55d1c4b7d23ec7", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_shouldnt_make_article_fetch.snap b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_shouldnt_make_article_fetch.snap index 5020d447b4..acd8fb6676 100644 --- a/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_shouldnt_make_article_fetch.snap +++ b/apollo-router/tests/snapshots/type_conditions__type_conditions_enabled_shouldnt_make_article_fetch.snap @@ -54,7 +54,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "0144f144d271437ed45f9d20706be86ffbf1e124d77c7add3db17d4a1498ce97", + "schemaAwareHash": "5201830580c9c5fadd9c59aea072878f84465c1ae9d905207fa281aa7c1d5340", "authorization": { "is_authenticated": false, "scopes": [], @@ -116,7 +116,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "23759b36e5149924c757a8b9586adec2c0f6be04ecdf2c3c3ea277446daa690b", + "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4", "authorization": { "is_authenticated": false, "scopes": [], @@ -176,7 +176,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "8ee58ad8b4823bcbda9126d2565e1cb04bf91ff250b1098476a1d7614a870121", + "schemaAwareHash": "7e6f6850777335eb1421a30a45f6888bb9e5d0acf8f55d576d55d1c4b7d23ec7", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 8c5397d82c..83c20bb40e 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.52.1 + image: ghcr.io/apollographql/router:v1.53.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index e3f00a1416..94900947d1 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.52.1 + image: ghcr.io/apollographql/router:v1.53.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index df90c46e89..f719e3de99 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.52.1 + image: ghcr.io/apollographql/router:v1.53.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/configuration/distributed-caching.mdx b/docs/source/configuration/distributed-caching.mdx index 1c97c9b47a..e1c365e0a3 100644 --- a/docs/source/configuration/distributed-caching.mdx +++ b/docs/source/configuration/distributed-caching.mdx @@ -137,7 +137,7 @@ supergraph: urls: ["redis://..."] #highlight-line username: admin/123 # Optional, can be part of the urls directly, mainly useful if you have special character like '/' in your password that doesn't work in url. This field takes precedence over the username in the URL password: admin # Optional, can be part of the urls directly, mainly useful if you have special character like '/' in your password that doesn't work in url. This field takes precedence over the password in the URL - timeout: 5ms # Optional, by default: 2ms + timeout: 2s # Optional, by default: 500ms ttl: 24h # Optional namespace: "prefix" # Optional #tls: @@ -147,7 +147,7 @@ supergraph: #### Timeout -Connecting and sending commands to Redis are subject to a timeout, set by default to 2ms, that can be overriden. +Connecting and sending commands to Redis are subject to a timeout, set by default to 500ms, that can be overriden. #### TTL diff --git a/docs/source/configuration/entity-caching.mdx b/docs/source/configuration/entity-caching.mdx index 125eb28b56..a1f1c2610e 100644 --- a/docs/source/configuration/entity-caching.mdx +++ b/docs/source/configuration/entity-caching.mdx @@ -91,28 +91,29 @@ For example: # Enable entity caching globally preview_entity_cache: enabled: true - - # Configure Redis - redis: - urls: ["redis://..."] - timeout: 5ms # Optional, by default: 2ms - ttl: 24h # Optional, by default no expiration - subgraph: all: enabled: true + # Configure Redis + redis: + urls: ["redis://..."] + timeout: 2s # Optional, by default: 500ms + ttl: 24h # Optional, by default no expiration # Configure entity caching per subgraph, overrides options from the "all" section subgraphs: products: ttl: 120s # overrides the global TTL inventory: enabled: false # disable for a specific subgraph + accounts: + private_id: "user_id" ``` ### Configure time to live (TTL) -Besides configuring a global TTL for all the entries in Redis, the GraphOS Router also honors the [`Cache-Control` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control) returned with the subgraph response. It generates a `Cache-Control` header for the client response by aggregating the TTL information from all response parts. -A TTL has to be configured for all subgraphs using entity caching, either defined in the per subgraph configuration or inherited from the global configuration. +To decide whether to cache an entity, the router honors the [`Cache-Control` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control) returned with the subgraph response. Because `Cache-Control` might not contain a `max-age` or `s-max-age` option, a default TTL must either be defined per subgraph configuration or inherited from the global configuration. + +The router also generates a `Cache-Control` header for the client response by aggregating the TTL information from all response parts. If a subgraph doesn't return the header, its response is assumed to be `no-store`. ### Customize Redis cache key @@ -131,6 +132,118 @@ This entry contains an object with the `all` field to affect all subgraph reques ``` +### Private information caching + +A subgraph can return a response with the header `Cache-Control: private`, indicating that it contains user-personalized data. Although this usually forbids intermediate servers from storing data, the router may be able to recognize different users and store their data in different parts of the cache. + +To set up private information caching, you can configure the `private_id` option. `private_id` is a string pointing at a field in the request context that contains data used to recognize users (for example, user id, or `sub` claim in JWT). + +As an example, if you are using the router's JWT authentication plugin, you can first configure the `private_id` option in the `accounts` subgraph to point to the `user_id` key in context, then use a Rhai script to set that key from the JWT's `sub` claim: + +```yaml title="router.yaml" +preview_entity_cache: + enabled: true + subgraph: + all: + enabled: true + redis: + urls: ["redis://..."] + subgraphs: + accounts: + private_id: "user_id" +authentication: + router: + jwt: + jwks: + - url: https://auth-server/jwks.json +``` + +```rhai title="main.rhai" +fn supergraph_service(service) { + let request_callback = |request| { + let claims = request.context[Router.APOLLO_AUTHENTICATION_JWT_CLAIMS]; + + if claims != () { + let private_id = claims["sub"]; + request.context["user_id"] = private_id; + } + }; + + service.map_request(request_callback); +} +``` + +The router implements the following sequence to determine whether a particular query returns private data: + +- Upon seeing a query for the first time, the router requests the cache as if it were a public-only query. +- When the subgraph returns the response with private data, the router recognizes it and stores the data in a user-specific part of the cache. +- The router stores the query in a list of known queries with private data. +- When the router subsequently sees a known query: + - If the private id isn't provided, the router doesn't interrogate the cache, but it instead transmits the subgraph response directly. + - If the private id is provided, the router queries the part of the cache for the current user and checks the subgraph if nothing is available. + +### Observability + +The router supports a [`cache` selector](./telemetry/instrumentation/selectors#subgraph) in telemetry for the subgraph service. The selector returns the number of cache hits or misses by an entity for a subgraph request. + +## Spans + +You can add a new attribute on the subgraph span for the number of cache hits. For example: + +```yaml title="router.yaml" +telemetry: + instrumentation: + spans: + subgraph: + attributes: + cache.hit: + cache: hit +``` + +## Metrics + +The router provides the `telemetry.instrumentation.instruments.cache` instrument to enable cache metrics: + +```yaml title="router.yaml" +telemetry: + instrumentation: + instruments: + cache: # Cache instruments configuration + apollo.router.operations.entity.cache: # A counter which counts the number of cache hit and miss for subgraph requests + attributes: + entity.type: true # Include the entity type name. default: false + subgraph.name: # Custom attributes to include the subgraph name in the metric + subgraph_name: true + supergraph.operation.name: # Add custom attribute to display the supergraph operation name + supergraph_operation_name: string + # You can add more custom attributes using subgraph selectors +``` + +You can use custom instruments to create metrics for the subgraph service. The following example creates a custom instrument to generate a histogram that measures the subgraph request duration when there's at least one cache hit for the "inventory" subgraph: + +```yaml title="router.yaml" +telemetry: + instrumentation: + instruments: + subgraph: + only_cache_hit_on_subgraph_inventory: + type: histogram + value: duration + unit: hit + description: histogram of subgraph request duration when we have cache hit on subgraph inventory + condition: + all: + - eq: + - subgraph_name: true # subgraph selector + - inventory + - gt: # If the number of cache hit is greater than 0 + - cache: hit + # entity_type: Product # Here you could also only check for the entity type Product, it's `all` by default if we don't specify this config. + - 0 + +``` + + ## Implementation notes ### Cache-Control header requirement @@ -141,6 +254,10 @@ The Router currently cannot know which types or fields should be cached, so it r To prevent transient errors from affecting the cache for a long duration, subgraph responses with errors are not cached. +### Cached entities with unavailable subgraph + +If some entities were obtained from the cache, but the subgraphs that provided them are unavailable, the router will return a response with the cached entities, and the other entities nullified (schema permitting), along with an error message for the nullified entities. + ### Authorization and entity caching When used alongside the router's [authorization directives](./authorization), cache entries are separated by authorization context. If a query contains fields that need a specific scope, the requests providing that scope have different cache entries from those not providing the scope. This means that data requiring authorization can still be safely cached and even shared across users, without needing invalidation when a user's roles change because their requests are automatically directed to a different part of the cache. diff --git a/docs/source/configuration/experimental_query_planner_mode.mdx b/docs/source/configuration/experimental_query_planner_mode.mdx new file mode 100644 index 0000000000..67e1a2556d --- /dev/null +++ b/docs/source/configuration/experimental_query_planner_mode.mdx @@ -0,0 +1,34 @@ +--- +title: Experimental Query Planner Mode +subtitle: Switch between legacy and native query planning +noIndex: true +--- + +The router (GraphOS Router and Apollo Router Core) is in the early stages of +transitioning to a native query planner, replacing the existing legacy planner. + +As part of the efforts to ensure correctness and stability of the new planner, +v1.53.0 of the router enables both planners to run in parallel in order to +compare them. After the comparison, the router discards the native planner's results and +uses only the legacy planner to execute requests. + +The native planner uses a single thread in the cold path of the router. It has a +bounded queue of 10 queries. If the queue is full, the router simply does not run the +comparison to avoid excessive resource consumption. + +You can disable the native query planner by configuring your `router.yaml` to use just +`legacy` planning. You may want to disable it to avoid spikes in CPU utilization, for +example if an erroneous operation fails to complete planning in the native planner's +background thread. + +```yaml title="router.yaml" +experimental_query_planner_mode: legacy +``` + +The supported modes of `experimental_query_planner_mode` are the following: +* `new`. Enables only the native query planner. +* `both_best_effort` - default. Enables comparison between legacy and new native + query planners. The legacy query planner is used for execution. If any + unsupported features are detected, the router falls back to legacy with an + `info` log. +* `legacy`. Enables only the legacy query planner. diff --git a/docs/source/configuration/in-memory-caching.mdx b/docs/source/configuration/in-memory-caching.mdx index 6c2e129ad5..000fade2e5 100644 --- a/docs/source/configuration/in-memory-caching.mdx +++ b/docs/source/configuration/in-memory-caching.mdx @@ -66,15 +66,18 @@ supergraph: To get more information on the planning and warm-up process use the following metrics (where `` can be `redis` for distributed cache or `memory`): * counters: - * `apollo_router_cache_size{kind="query planner", storage="}`: current size of the cache (only for in-memory cache) - * `apollo_router_cache_hit_count{kind="query planner", storage="}` - * `apollo_router_cache_miss_count{kind="query planner", storage="}` + * `apollo_router_cache_hit_count{kind="query planner", storage=""}` + * `apollo_router_cache_miss_count{kind="query planner", storage=""}` * histograms: * `apollo.router.query_planning.plan.duration`: time spent planning queries * `apollo_router_schema_loading_time`: time spent loading a schema - * `apollo_router_cache_hit_time{kind="query planner", storage="}`: time to get a value from the cache - * `apollo_router_cache_miss_time{kind="query planner", storage="}` + * `apollo_router_cache_hit_time{kind="query planner", storage=""}`: time to get a value from the cache + * `apollo_router_cache_miss_time{kind="query planner", storage=""}` + +* gauges + * `apollo_router_cache_size{kind="query planner", storage="memory"}`: current size of the cache (only for in-memory cache) + * `apollo.router.cache.storage.estimated_size{kind="query planner", storage="memory"}`: estimated storage size of the cache (only for in-memory query planner cache) Typically, we would look at `apollo_router_cache_size` and the cache hit rate to define the right size of the in memory cache, then look at `apollo_router_schema_loading_time` and `apollo.router.query_planning.plan.duration` to decide how much time we want to spend warming up queries. diff --git a/docs/source/configuration/telemetry/exporters/tracing/overview.mdx b/docs/source/configuration/telemetry/exporters/tracing/overview.mdx index 1bab45a2c2..c7b81cca70 100644 --- a/docs/source/configuration/telemetry/exporters/tracing/overview.mdx +++ b/docs/source/configuration/telemetry/exporters/tracing/overview.mdx @@ -144,9 +144,32 @@ telemetry: # If you have your own way to generate a trace id and you want to pass it via a custom request header request: + # The name of the header to read the trace id from header_name: my-trace-id + # The format of the trace when propagating to subgraphs. + format: uuid ``` +#### `request` configuration reference + +| Option | Values | Default | Description | +|---------------|---------------------------------------------------------------|-----------------------------------|-------------------------------------| +| `header_name` | | | The name of the http header to use for propagation. | +| `format` | `hexadecimal`\|`open_telemetry`\|`decimal`\|`datadog`\|`uuid` | `hexadecimal` | The output format of the `trace_id` | + +Valid values for `format`: +* `hexadecimal` - 32-character hexadecimal string (e.g. `0123456789abcdef0123456789abcdef`) +* `open_telemetry` - 32-character hexadecimal string (e.g. `0123456789abcdef0123456789abcdef`) +* `decimal` - 16-character decimal string (e.g. `1234567890123456`) +* `datadog` - 16-character decimal string (e.g. `1234567890123456`) +* `uuid` - 36-character UUID string (e.g. `01234567-89ab-cdef-0123-456789abcdef`) + + + +Incoming trace IDs must be in `open_telemetry` or `uuid` format. + + + ### Limits You may set limits on spans to prevent sending too much data to your APM. For example: diff --git a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx index 1a3358a38e..d29cbf1fca 100644 --- a/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx +++ b/docs/source/configuration/telemetry/instrumentation/standard-instruments.mdx @@ -38,6 +38,7 @@ These instruments can be consumed by configuring a [metrics exporter](../exporte - `apollo_router_cache_miss_count` - Number of cache misses - `apollo_router_cache_hit_time` - Time to hit the cache in seconds - `apollo_router_cache_miss_time` - Time to miss the cache in seconds +- `apollo.router.cache.storage.estimated_size` - The estimated storage size of the cache in bytes (query planner in memory only). All cache metrics listed above have the following attributes: @@ -65,6 +66,8 @@ The coprocessor operations metric has the following attributes: - `apollo.router.query_planning.plan.duration` - Histogram of plan durations isolated to query planning time only. - `apollo.router.query_planning.total.duration` - Histogram of plan durations including queue time. - `apollo.router.query_planning.queued` - A gauge of the number of queued plans requests. +- `apollo.router.v8.heap.used` - heap memory used by V8, in bytes. +- `apollo.router.v8.heap.total` - total heap allocated by V8, in bytes. ### Uplink diff --git a/docs/source/configuration/traffic-shaping.mdx b/docs/source/configuration/traffic-shaping.mdx index dfeb0cb5e1..c6d027fbca 100644 --- a/docs/source/configuration/traffic-shaping.mdx +++ b/docs/source/configuration/traffic-shaping.mdx @@ -192,9 +192,9 @@ Traffic shaping always executes these steps in the same order, to ensure a consi - preparing the subgraph request - variable deduplication -- rate limiting -- request retry -- timeout - query deduplication +- timeout +- request retry +- rate limiting - compression - sending the request to the subgraph diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx index a2393c5225..591e30db0d 100644 --- a/docs/source/containerization/kubernetes.mdx +++ b/docs/source/containerization/kubernetes.mdx @@ -285,3 +285,26 @@ The gateway propagates subgraph errors to clients, but the router doesn't by def include_subgraph_errors: all: true ``` + +## Troubleshooting + +### Pods terminating due to memory pressure + +If your deployment of routers is terminating due to memory pressure, you can add router cache metrics to monitor and remediate your system: + +1. Add and track the following metrics to your monitoring system: + + * `apollo.router.cache.storage.estimated_size` + * `apollo_router_cache_size` + * ratio of `apollo_router_cache_hits` to `apollo_router_cache_misses` + +2. Observe and monitor the metrics: + + * Observe the `apollo.router.cache.storage.estimated_size` to see if it grows over time and correlates with pod memory usage. + * Observe the ratio of cache hits to misses to determine if the cache is being effective. + +3. Based on your observations, try some remediating adjustments: + + * Lower the cache size if the cache reaches near 100% hit-rate but the cache size is still growing. + * Increase the pod memory if the cache hit rate is low and the cache size is still growing. + * Lower the cache size if the latency of query planning cache misses is acceptable and memory availability is limited. diff --git a/docs/source/executing-operations/demand-control.mdx b/docs/source/executing-operations/demand-control.mdx index 1aa0957b81..abc7983256 100644 --- a/docs/source/executing-operations/demand-control.mdx +++ b/docs/source/executing-operations/demand-control.mdx @@ -13,465 +13,210 @@ minVersion: 1.48.0 - +## What is demand control? -The Demand Control feature is in [preview](/resources/product-launch-stages/#product-launch-stages) for organizations with an Enterprise plan. Get in touch with your Apollo contact to request access. +Demand control provides a way to secure your supergraph from overly complex operations, based on the [IBM GraphQL Cost Directive specification](https://ibm.github.io/graphql-specs/cost-spec.html). -We welcome your feedback during the preview, especially feedback about the following: +Application clients can send overly costly operations that overload your supergraph infrastructure. These operations may be costly due to their complexity and/or their need for expensive resolvers. In either case, demand control can help you protect your infrastructure from these expensive operations. When your router receives a request, it calculates a cost for that operation. If the cost is greater than your configured maximum, the operation is rejected. -
+## Calculating cost -- Whether the available tools are sufficient to enable you to understand how users are querying your supergraph. +When calculating the cost of an operation, the router sums the costs of the sub-requests that it plans to send to your subgraphs. +* For each operation, the cost is the sum of its base cost plus the costs of its fields. +* For each field, the cost is defined recursively as its own base cost plus the cost of its selections. In the IBM specification, this is called [field cost](https://ibm.github.io/graphql-specs/cost-spec.html#sec-Field-Cost). -- Whether the demand control workflow is easy to follow and implement. - -- Whether any features are missing that preclude you from using demand control in production. - -
- -Protect your graph from malicious or demanding clients with GraphOS Router's demand control features. Estimate, calculate, observe, and reject high cost GraphQL operations. - -## About demand control - -Applications clients can send complex operations through your router that can overload your supergraph's infrastructure. The clients may be unintentionally or maliciously overloading your supergraph. - -When a client makes a request to the router, the router makes requests to your subgraphs to gather data for the final response. A client, however, may send an operation that's too complex for your subgraphs to process without degrading performance. - -Complex operations include operations that are deeply nested or have many results. Too many complex operations might overload your subgraphs and degrade the responsiveness and latency of your supergraph. - -To prevent complex operations from degrading performance, the GraphOS Router supports analyzing and rejecting requests based on operation complexity. Like [safelisting operations with persisted query lists (PQL)](/graphos/operations/persisted-queries), demand control enables you to reject operations that you don't want to be served by your graph. - -With demand control configured, the router computes a complexity value, or _cost_, per operation. You can collect telemetry and metrics to determine the range of costs of operations served by the router. You can then configure a maximum cost limit per operation, above which the router rejects the operation. - -## Demand control workflow - -Follow this workflow to configure and tune demand control for your router: - -1. Measure the cost of your existing operations. -2. Improve the cost estimation model. -3. Adjust your `preview_demand_control` configuration and enforce cost limits. - -### Measure cost of existing operations - -Start by measuring the costs of the operations served by your router. - -1. In your `router.yaml`, configure demand control to `measure` mode and define telemetry to monitor the results. For example: - - Set `preview_demand_control.mode` to `measure`. - - Define a custom histogram of operation costs. - -```yaml title="Example router.yaml to measure operation costs" -# Demand control enabled in measure mode. -preview_demand_control: - enabled: true - # Use measure mode to monitor the costs of your operations without rejecting any. - mode: measure - - strategy: - # Static estimated strategy has a fixed cost for elements. - static_estimated: - # The assumed returned list size for operations. Set this to the maximum number of items in a GraphQL list - list_size: 10 - # The maximum cost of a single operation, above which the operation is rejected. - max: 1000 - -# Basic telemetry configuration for cost. -telemetry: - exporters: - metrics: - common: - views: - # Define a custom view because cost is different than the default latency-oriented view of OpenTelemetry - - name: cost.* - aggregation: - histogram: - buckets: - - 0 - - 10 - - 100 - - 1000 - - 10000 - - 100000 - - 1000000 - - # Example configured for Prometheus. Customize for your APM. - prometheus: - enabled: true +The cost of each operation type: - # Basic instrumentation - instrumentation: - instruments: - supergraph: - cost.actual: true # The actual cost - cost.estimated: # The estimated cost - attributes: - cost.result: true # Of the estimated costs which of these would have been rejected - cost.delta: true # Actual - estimated +| | Mutation | Query | Subscription | +| ---- | -------- | ----- | ------------ | +| type | 10 | 0 | 0 | -``` +The cost of each GraphQL element type, per operation type: - +| | Mutation | Query | Subscription | +| --------- | -------- | ----- | ------------ | +| Object | 1 | 1 | 1 | +| Interface | 1 | 1 | 1 | +| Union | 1 | 1 | 1 | +| Scalar | 0 | 0 | 0 | +| Enum | 0 | 0 | 0 | -When analyzing the costs of operations, if your histograms are not granular enough or don't cover a sufficient range, you can modify the views in your telemetry configuration: +Using these defaults, the following operation would have a cost of 4. -```yaml -telemetry: - exporters: - metrics: - common: - views: - - name: cost.* - aggregation: - histogram: - buckets: - - 0 # Define the buckets here - - 10 - - 100 - - 1000 # More granularity for costs in the 1000s - - 2000 - - 3000 - - 4000 +```graphql +query BookQuery { + book(id: 1) { + title + author { + name + } + publisher { + name + address { + zipCode + } + } + } +} ``` - - -2. Send some requests through your router and observe the `cost.*` metrics via your APM. - -You should be able to configure your APM to look for `cost.*` histograms and get the proportion of requests that would be rejected via the `cost.result` attribute on the `cost.estimated` total. This will allow you to see histograms of cost. - -An example histogram of operation costs from a Prometheus endpoint: + ```text disableCopy=true showLineNumbers=false -# TYPE cost_actual histogram -cost_actual_bucket{otel_scope_name="apollo/router",le="0"} 0 -cost_actual_bucket{otel_scope_name="apollo/router",le="10"} 3 -cost_actual_bucket{otel_scope_name="apollo/router",le="100"} 5 -cost_actual_bucket{otel_scope_name="apollo/router",le="1000"} 11 -cost_actual_bucket{otel_scope_name="apollo/router",le="10000"} 19 -cost_actual_bucket{otel_scope_name="apollo/router",le="100000"} 20 -cost_actual_bucket{otel_scope_name="apollo/router",le="1000000"} 20 -cost_actual_bucket{otel_scope_name="apollo/router",le="+Inf"} 20 -cost_actual_sum{otel_scope_name="apollo/router"} 1097 -cost_actual_count{otel_scope_name="apollo/router"} 20 -# TYPE cost_delta histogram -cost_delta_bucket{otel_scope_name="apollo/router",le="0"} 0 -cost_delta_bucket{otel_scope_name="apollo/router",le="10"} 2 -cost_delta_bucket{otel_scope_name="apollo/router",le="100"} 9 -cost_delta_bucket{otel_scope_name="apollo/router",le="1000"} 7 -cost_delta_bucket{otel_scope_name="apollo/router",le="10000"} 19 -cost_delta_bucket{otel_scope_name="apollo/router",le="100000"} 20 -cost_delta_bucket{otel_scope_name="apollo/router",le="1000000"} 20 -cost_delta_bucket{otel_scope_name="apollo/router",le="+Inf"} 20 -cost_delta_sum{otel_scope_name="apollo/router"} 21934 -cost_delta_count{otel_scope_name="apollo/router"} 1 -# TYPE cost_estimated histogram -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="0"} 0 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="10"} 5 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="100"} 5 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="1000"} 9 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="10000"} 11 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="100000"} 20 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="1000000"} 20 -cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="+Inf"} 20 -cost_estimated_sum{cost_result="COST_OK",otel_scope_name="apollo/router"} -cost_estimated_count{cost_result="COST_OK",otel_scope_name="apollo/router"} 20 +1 Query (0) + 1 book object (1) + 1 author object (1) + 1 publisher object (1) + 1 address object (1) = 4 total cost ``` -An example chart of a histogram: - - - - -You can also chart the percentage of operations that would be allowed or rejected with the current configuration: - - - -Although estimated costs won't necessarily match actual costs, you can use the metrics to ascertain the following: -- Whether any operations have underestimated costs -- What to set `static_estimated.list_size` as the actual maximum list size -- What to set `static_estimated.max` as the maximum cost of an allowed operation - -In this example, just under half of the requests would be rejected with the current configuration. The cost of queries are also underestimated because `cost.delta` is non-zero. - -3. To figure out what operations are being rejected, define a telemetry custom instrument that reports when an operation has been rejected because its cost exceeded the configured cost limit: - -```yaml title="router.yaml" -telemetry: - instrumentation: - instruments: - supergraph: - # custom instrument - cost.rejected.operations: - type: histogram - value: - # Estimated cost is used to populate the histogram - cost: estimated - description: "Estimated cost per rejected operation." - unit: delta - condition: - eq: - # Only show rejected operations. - - cost: result - - "COST_ESTIMATED_TOO_EXPENSIVE" - attributes: - graphql.operation.name: true # Graphql operation name is added as an attribute - -``` - -This custom instrument may not be suitable when you have many operation names, such as a public internet-facing API. You can add conditions to reduce the number of returned operations. For example, use a condition that outputs results only when the cost delta is greater than a threshold: - -```yaml title="router.yaml" -telemetry: - instrumentation: - instruments: - supergraph: - # custom instrument - cost.rejected.operations: - type: histogram - value: - # Estimated cost is used to populate the histogram - cost: estimated - description: "Estimated cost per rejected operation." - unit: delta - condition: - all: - - eq: # Only show rejected operations - - cost: result - - "COST_ESTIMATED_TOO_EXPENSIVE" -#highlight-start - - gt: # Only show cost delta > 100 - - cost: delta - - 100 -#highlight-end -``` + -4. You should now be able to configure your APM to see which operations are too costly. Visualizing the histogram can be useful, such as with top-N or heatmap tools. +### Customizing cost -For example, the following table has the estimated cost of operations: +Since version 1.53.0, the router supports customizing the cost calculation with the `@cost` directive. The `@cost` directive has a single argument, `weight`, which overrides the default weights from the table above. -| Operation name | Estimated cost | -|----------------------|----------------| -| `ExtractAll` | 9020 | -| `GetAllProducts` | 1435 | -| `GetLatestProducts` | 120 | -| `GetRecentlyUpdated` | 99 | -| `FindProductByName` | 87 | + -The `ExtractAll` operation has a very large estimated cost, so it's a good candidate to be rejected. +The Apollo Federation [`@cost` directive](/federation/federated-schemas/federated-directives/#cost) differs from the IBM specification in that the `weight` argument is of type `Int!` instead of `String!`. -Also, the value of the `cost.delta` metric—the difference between the actual and estimated cost—shows whether the assumed list size used for cost estimation is too large or small. In this example, the positive `cost.delta` means that the actual list size is greater than the estimated list size. Therefore the `static_estimated.list_size` can be reduced to closer match the actual. + -### Improve cost estimation model +Annotating your schema with the `@cost` directive customizes how the router scores operations. For example, imagine that the `Address` resolver for an example query is particularly expensive. We can annotate the schema with the `@cost` directive with a larger weight: -You should iteratively improve your cost estimation model. Accurate cost estimation is critical to identifying and preventing queries that could harm your subgraphs. +```graphql +type Query { + book(id: ID): Book +} -The previous step identified a noticeable difference between actual and estimated costs with the example operations. You can better understand the difference—and consequently tune the configured list size—by adding telemetry instruments for fields in your GraphQL schema. +type Book { + title: String + author: Author + publisher: Publisher +} -For example, you can generate a histogram for every field in your GraphQL schema: +type Author { + name: String +} -```yaml title="router.yaml" -telemetry: - exporters: - metrics: - common: - views: - - name: graphql.* - aggregation: - histogram: - buckets: - - 0 - - 10 - - 100 - - 1000 - - 10000 - - 100000 - - 1000000 - instrumentation: - instruments: - graphql: - list.length: true +type Publisher { + name: String + address: Address +} +type Address + @cost(weight: 5) { #highlight-line + zipCode: Int! +} ``` -This configuration generates many metrics and may be too costly for your APM. To reduce the amount of metrics generated, you can set conditions on the instrument. +This increases the cost of `BookQuery` from 4 to 8. -For this example, you can set a condition that restricts the instrument to an operation with a certain name. You can also show only histograms of list sizes of GraphQL fields: - -```yaml title="router.yaml" -telemetry: - instrumentation: - instruments: - graphql: - graphql.list.length.restricted: # custom instrument - unit: length - description: "histogram of list lengths" - type: histogram - value: - list_length: value - condition: - all: - - eq: - - operation_name: string - - "GetAllProducts" -``` - -The output from a Prometheus endpoint may look like the following: + ```text disableCopy=true showLineNumbers=false -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="0"} 0 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="10"} 9 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="100"} 20 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="1000"} 20 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="10000"} 20 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="100000"} 20 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="1000000"} 20 -graphql_list_length_restricted_bucket{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router",le="+Inf"} 20 -graphql_list_length_restricted_sum{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router"} 218 -graphql_list_length_restricted_count{graphql_field_name="allProducts",graphql_type_name="Query",otel_scope_name="apollo/router"} 20 -``` - -You can configure your APM to chart the histogram: - - - -The chart shows that the actual list sizes for the `allProducts` field are at most 100, so you should update your `static_estimated.list_size` to be 100: - -```yaml title="router.yaml" -preview_demand_control: - enabled: true - mode: measure - strategy: - static_estimated: - list_size: 100 # Updated to measured actual max list size - max: 1000 +1 Query (0) + 1 book object (1) + 1 author object (1) + 1 publisher object (1) + 1 address object (5) = 8 total cost ``` -Rerunning the router and remeasuring costs with the updated `static_estimated.list_size` should result in new histograms and percentages of rejected operations. For example: - - + +### Handling list fields - +During the static analysis phase of demand control, the router doesn't know the size of the list fields in a given query. It must use estimates for list sizes. The closer the estimated list size is to the actual list size for a field, the closer the estimated cost will be to the actual cost. -Although there are no more cost deltas reported, the estimated costs have increased. You still have to adjust the maximum cost. + -Looking at the top N operations, you may see that the estimated costs have been updated. For example: +The difference between estimated and actual operation cost calculations is due only to the difference between assumed and actual sizes of list fields. -| Operation name | Estimated cost | -|----------------------|----------------| -| `ExtractAll` | 390200 | -| `GetAllProducts` | 44350 | -| `GetLatestProducts` | 11200 | -| `GetRecentlyUpdated` | 4990 | -| `FindProductByName` | 1870 | + -All operations except `ExtractAll` are in a range of acceptable costs. +There are two ways to indicate the expected list sizes to the router: +* Set the global maximum in your router configuration file (see [Configuring demand control](#configuring-demand-control)). -### Enforce cost limits +* Use the Apollo Federation [@listSize directive](/federation/federated-schemas/federated-directives/#listsize). -After determining the cost estimation model of your operations, you should update and enforce the new cost limits. +The `@listSize` directive supports field-level granularity in setting list size. By using its `assumedSize` argument, you can set a statically defined list size for a field. If you are using paging parameters which control the size of the list, use the `slicingArguments` argument. -From the previous step, you can set the maximum cost to a value that allows all operations except `ExtractAll`: +Continuing with our example above, let's add two queryable fields. First, we will add a field which returns the top five best selling books: -```yaml title="router.yaml" -preview_demand_control: - enabled: true - mode: enforce # Change mode from measure to enforce - strategy: - static_estimated: - list_size: 100 - max: 50000 # Updated max cost allows all operations except ExtractAll +```graphql +type Query { + book(id: ID): Book + bestsellers: [Book] @listSize(assumedSize: 5) +} ``` -## Next steps - - -Continue to monitor the costs of operations and take action if the estimation model becomes inaccurate. For example, update the estimation model if the maximum number of list items changes. - -You can set alerts in your APM for events that may require changing your demand control settings. Events to alert include: -- Unexpected increase in the number of requests rejected by demand control. -- Increased max list size of your data. -- Increased delta metric. - - - -Using paging APIs can help avoid situations where a list field returns an arbitrarily large number of elements. - - - -## Calculating operation cost - -When your router receives a request, its query planner generates and sends a series of sub-requests to subgraphs. - -To calculate the total cost of an operation, the router sums the total costs based on sub-request's operation type and the types of GraphQL elements of its fields. - -The cost of each operation type: - -| | Mutation | Query | Subscription | -| --------- | -------- | ----- | ------------ | -| type | 10 | 0 | 0 | - - -The cost of each GraphQL element type, per operation type: - -| | Mutation | Query | Subscription | -| --------- | -------- | ----- | ------------ | -| Object | 1 | 1 | 1 | -| Interface | 1 | 1 | 1 | -| Union | 1 | 1 | 1 | -| Scalar | 0 | 0 | 0 | -| Enum | 0 | 0 | 0 | - -For example, assume the following query gets a response with six products and ten reviews: +With this schema, the following query has a cost of 40: ```graphql -query ExampleQuery { - topProducts { - name - reviews { - author { - name +query BestsellersQuery { + bestsellers { + title + author { + name + } + publisher { + name + address { + zipCode } } } } ``` -Assuming each review having exactly one author, the total cost of the query is 26. - - + ```text disableCopy=true showLineNumbers=false -1 Query (0 cost) + 6 product objects (6) + 6 name scalars (0) + 10 review objects (10) + 10 author objects (10) + 10 name scalars (0) = 26 total cost +1 Query (0) + 5 book objects (5 * (1 book object (1) + 1 author object (1) + 1 publisher object (1) + 1 address object (5))) = 40 total cost ``` -### Estimated and actual costs +The second field we will add is a paginated resolver. It returns the latest additions to the inventory: -For an operation with list fields, the router must run the operation to get the actual number of items in its lists. Without actual list sizes, the cost of an operation can only be estimated before it's executed, where you assume the size of lists. - -After an operation is executed, the actual cost per operation can be calculated with the actual list sizes. - - - -The difference between estimated and actual operation cost calculations is due only to the difference between assumed and actual sizes of list fields. +```graphql +type Query { + book(id: ID): Book + bestsellers: [Book] @listSize(assumedSize: 5) + #highlight-start + newestAdditions(after: ID, limit: Int!): [Book] + @listSize(slicingArguments: ["limit"]) + #highlight-end +} +``` - +The number of books returned by this resolver is determined by the `limit` argument. -### Measurement and enforcement modes +```graphql +query NewestAdditions { + newestAdditions(limit: 3) { + title + author { + name + } + publisher { + name + address { + zipCode + } + } + } +} +``` -When rolling out demand control, you first need to gather information about the queries that are already being executed against your graph so you can decide when to reject requests. +The router will estimate the cost of this query as 24. If the limit was increased to 7, then the cost would increase to 56. -The router's demand control features support a measurement mode that enables you to gather this information without impacting your running services. You can define telemetry instruments to monitor your operations and decide on their maximum cost threshold. +```text disableCopy=true showLineNumbers=false +When requesting 3 books: +1 Query (0) + 3 book objects (3 * (1 book object (1) + 1 author object (1) + 1 publisher object (1) + 1 address object (5))) = 24 total cost -After gathering enough data, you can then configure your router with maximum cost and list size limits and set demand control to enforcement mode, where it rejects operations with costs exceeding the limit. +When requesting 7 books: +1 Query (0) + 3 book objects (7 * (1 book object (1) + 1 author object (1) + 1 publisher object (1) + 1 address object (5))) = 56 total cost +``` ## Configuring demand control -To enable demand control in the router, configure the `preview_demand_control` option in `router.yaml`: +To enable demand control in the router, configure the `demand_control` option in `router.yaml`: ```yaml title="router.yaml" -preview_demand_control: +demand_control: enabled: true mode: measure strategy: @@ -480,18 +225,19 @@ preview_demand_control: max: 1000 ``` -When `preview_demand_control` is enabled, the router measures the cost of each operation and can enforce operation cost limits, based on additional configuration. +When `demand_control` is enabled, the router measures the cost of each operation and can enforce operation cost limits, based on additional configuration. -Customize `preview_demand_control` with the following settings: +Customize `demand_control` with the following settings: -| Option | Valid values | Default value | Description | -| ------------------- | ----------------------- | ------------- | ---------------------------------------------------------------------------------------------------- | -| `enabled` | boolean | `false` | Set `true` to measure operation costs or enforce operation cost limits. | -| `mode` | `measure`, `enforce` | -- | - `measure` collects information about the cost of operations.
- `enforce` rejects operations exceeding configured cost limits | -| `strategy` | `static_estimated` | -- | `static_estimated` estimates the cost of an operation before it is sent to a subgraph | -| `static_estimated.list_size` | integer | -- | The assumed maximum size of a list for fields that return lists. | -| `static_estimated.max` | integer | -- | The maximum cost of an accepted operation. An operation with a higher cost than this is rejected. | +| Option | Valid values | Default value | Description | +| ---------------------------- | -------------------- | ------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `enabled` | boolean | `false` | Set `true` to measure operation costs or enforce operation cost limits. | +| `mode` | `measure`, `enforce` | -- | - `measure` collects information about the cost of operations.
- `enforce` rejects operations exceeding configured cost limits | +| `strategy` | `static_estimated` | -- | `static_estimated` estimates the cost of an operation before it is sent to a subgraph | +| `static_estimated.list_size` | integer | -- | The assumed maximum size of a list for fields that return lists. | +| `static_estimated.max` | integer | -- | The maximum cost of an accepted operation. An operation with a higher cost than this is rejected. | +When enabling `demand_control` for the first time, set it to `measure` mode. This will allow you to observe the cost of your operations before setting your maximum cost. ## Telemetry for demand control @@ -511,30 +257,29 @@ You can define router telemetry to gather cost information and gain insights int | Instrument | Description | | ---------------- | ---------------------------------------------------------- | -| `cost.actual` | The actual cost of an operation, measured after execution. | -| `cost.estimated` | The estimated cost of an operation before execution. | -| `cost.delta` | The difference between the actual and estimated cost. | +| `cost.actual` | The actual cost of an operation, measured after execution. | +| `cost.estimated` | The estimated cost of an operation before execution. | +| `cost.delta` | The difference between the actual and estimated cost. | ### Attributes Attributes for `cost` can be applied to instruments, spans, and events—anywhere `supergraph` attributes are used. -| Attribute | Value | Description | -| --------------- | ----- | ---------------------------------------------------------- | -| `cost.actual` | boolean | The actual cost of an operation, measured after execution. | -| `cost.estimated` | boolean | The estimated cost of an operation before execution. | -| `cost.delta` | boolean | The difference between the actual and estimated cost. | -| `cost.result` | boolean | The return code of the cost calculation. `COST_OK` or an [error code](../errors/#demand-control) | +| Attribute | Value | Description | +| ---------------- | ------- | ------------------------------------------------------------------------------------------------ | +| `cost.actual` | boolean | The actual cost of an operation, measured after execution. | +| `cost.estimated` | boolean | The estimated cost of an operation before execution. | +| `cost.delta` | boolean | The difference between the actual and estimated cost. | +| `cost.result` | boolean | The return code of the cost calculation. `COST_OK` or an [error code](../errors/#demand-control) | ### Selectors Selectors for `cost` can be applied to instruments, spans, and events—anywhere `supergraph` attributes are used. -| Key | Value | Default | Description | -| ---- | ---------- | ------- | -------------------------------------------------- | +| Key | Value | Default | Description | +| ------ | ---------------------------------------- | ------- | ----------------------------------------------------------------- | | `cost` | `estimated`, `actual`, `delta`, `result` | | The estimated, actual, or delta cost values, or the result string | - ### Examples #### Example instrument @@ -586,3 +331,111 @@ telemetry: graphql.operation.name: true cost.delta: true ``` + +#### Filtering by cost result + +In router telemetry, you can customize instruments that filter their output based on cost results. + +For example, you can record the estimated cost when `cost.result` is `COST_ESTIMATED_TOO_EXPENSIVE`: + +```yaml title="router.yaml" +telemetry: + instrumentation: + instruments: + supergraph: + # custom instrument + cost.rejected.operations: + type: histogram + value: + # Estimated cost is used to populate the histogram + cost: estimated + description: "Estimated cost per rejected operation." + unit: delta + condition: + eq: + # Only show rejected operations. + - cost: result + - "COST_ESTIMATED_TOO_EXPENSIVE" + attributes: + graphql.operation.name: true # Graphql operation name is added as an attribute +``` + +### Configuring instrument output + +When analyzing the costs of operations, if your histograms are not granular enough or don't cover a sufficient range, you can modify the views in your telemetry configuration: + +```yaml +telemetry: + exporters: + metrics: + common: + views: + # Define a custom view because cost is different than the default latency-oriented view of OpenTelemetry + - name: cost.* + aggregation: + histogram: + buckets: + - 0 + - 10 + - 100 + - 1000 + - 10000 + - 100000 + - 1000000 +``` + + + +```text disableCopy=true showLineNumbers=false +# TYPE cost_actual histogram +cost_actual_bucket{otel_scope_name="apollo/router",le="0"} 0 +cost_actual_bucket{otel_scope_name="apollo/router",le="10"} 3 +cost_actual_bucket{otel_scope_name="apollo/router",le="100"} 5 +cost_actual_bucket{otel_scope_name="apollo/router",le="1000"} 11 +cost_actual_bucket{otel_scope_name="apollo/router",le="10000"} 19 +cost_actual_bucket{otel_scope_name="apollo/router",le="100000"} 20 +cost_actual_bucket{otel_scope_name="apollo/router",le="1000000"} 20 +cost_actual_bucket{otel_scope_name="apollo/router",le="+Inf"} 20 +cost_actual_sum{otel_scope_name="apollo/router"} 1097 +cost_actual_count{otel_scope_name="apollo/router"} 20 +# TYPE cost_delta histogram +cost_delta_bucket{otel_scope_name="apollo/router",le="0"} 0 +cost_delta_bucket{otel_scope_name="apollo/router",le="10"} 2 +cost_delta_bucket{otel_scope_name="apollo/router",le="100"} 9 +cost_delta_bucket{otel_scope_name="apollo/router",le="1000"} 7 +cost_delta_bucket{otel_scope_name="apollo/router",le="10000"} 19 +cost_delta_bucket{otel_scope_name="apollo/router",le="100000"} 20 +cost_delta_bucket{otel_scope_name="apollo/router",le="1000000"} 20 +cost_delta_bucket{otel_scope_name="apollo/router",le="+Inf"} 20 +cost_delta_sum{otel_scope_name="apollo/router"} 21934 +cost_delta_count{otel_scope_name="apollo/router"} 1 +# TYPE cost_estimated histogram +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="0"} 0 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="10"} 5 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="100"} 5 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="1000"} 9 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="10000"} 11 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="100000"} 20 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="1000000"} 20 +cost_estimated_bucket{cost_result="COST_OK",otel_scope_name="apollo/router",le="+Inf"} 20 +cost_estimated_sum{cost_result="COST_OK",otel_scope_name="apollo/router"} +cost_estimated_count{cost_result="COST_OK",otel_scope_name="apollo/router"} 20 +``` + + + +An example chart of a histogram: + + + +You can also chart the percentage of operations that would be allowed or rejected with the current configuration: + + diff --git a/examples/supergraph-sdl/rust/Cargo.toml b/examples/supergraph-sdl/rust/Cargo.toml index 2326e049e7..18e0c2d85c 100644 --- a/examples/supergraph-sdl/rust/Cargo.toml +++ b/examples/supergraph-sdl/rust/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" [dependencies] anyhow = "1" -apollo-compiler = "=1.0.0-beta.19" +apollo-compiler = "=1.0.0-beta.20" apollo-router = { path = "../../../apollo-router" } async-trait = "0.1" tower = { version = "0.4", features = ["full"] } diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index d332194093..f01ccac721 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -12,15 +12,15 @@ cargo-fuzz = true [dependencies] libfuzzer-sys = "0.4" apollo-compiler.workspace = true -apollo-parser = "0.7.6" -apollo-smith = "0.9.0" +apollo-parser.workspace = true +apollo-smith.workspace = true env_logger = "0.10.2" log = "0.4" reqwest = { workspace = true, features = ["json", "blocking"] } serde_json.workspace = true tokio.workspace = true # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.5.31+v2.8.5" +router-bridge = "=0.6.0+v2.9.0" [dev-dependencies] anyhow = "1" diff --git a/fuzz/subgraph/Cargo.toml b/fuzz/subgraph/Cargo.toml index a6be7c3c72..e04f35066e 100644 --- a/fuzz/subgraph/Cargo.toml +++ b/fuzz/subgraph/Cargo.toml @@ -4,10 +4,10 @@ version = "0.1.0" edition = "2021" [dependencies] -actix-web = { version = "4", features = ["default"] } -async-graphql = "5" -async-graphql-actix-web = "5" -env_logger = "0.9.0" +axum = "0.6.20" +async-graphql = "6" +async-graphql-axum = "6" +env_logger = "0.10" futures = "0.3.17" lazy_static = "1.4.0" log = "0.4.16" @@ -15,3 +15,4 @@ moka = { version = "0.8.5", features = ["future"] } rand = { version = "0.8.5", features = ["std_rng"] } serde_json = "1.0.79" tokio = { version = "1.22.0", features = ["time", "full"] } +tower = "0.4.0" diff --git a/fuzz/subgraph/src/main.rs b/fuzz/subgraph/src/main.rs index d6e497a9a6..0be9550c7c 100644 --- a/fuzz/subgraph/src/main.rs +++ b/fuzz/subgraph/src/main.rs @@ -1,66 +1,37 @@ -use std::time::Duration; - -use actix_web::get; -use actix_web::post; -use actix_web::web; -use actix_web::web::Data; -use actix_web::App; -use actix_web::HttpResponse; -use actix_web::HttpServer; -use actix_web::Result; -use async_graphql::http::playground_source; -use async_graphql::http::GraphQLPlaygroundConfig; use async_graphql::EmptySubscription; -use async_graphql::Schema; -use async_graphql_actix_web::GraphQLRequest; +use async_graphql_axum::GraphQLRequest; +use async_graphql_axum::GraphQLResponse; +use axum::routing::post; +use axum::Extension; +use axum::Router; +use tower::ServiceBuilder; use crate::model::Mutation; use crate::model::Query; mod model; -#[post("/")] -async fn index( - schema: web::Data>, - mut req: GraphQLRequest, -) -> HttpResponse { +type Schema = async_graphql::Schema; + +async fn graphql_handler(schema: Extension, mut req: GraphQLRequest) -> GraphQLResponse { //Zero out the random variable req.0.variables.remove(&async_graphql::Name::new("random")); println!("query: {}", req.0.query); - - let response = schema.execute(req.into_inner()).await; - let response_json = serde_json::to_string(&response).unwrap(); - - HttpResponse::Ok() - .content_type("application/json") - .body(response_json) -} - -#[get("*")] -async fn index_playground() -> Result { - Ok(HttpResponse::Ok() - .content_type("text/html; charset=utf-8") - .body(playground_source( - GraphQLPlaygroundConfig::new("/").subscription_endpoint("/"), - ))) + schema.execute(req.into_inner()).await.into() } #[tokio::main] -async fn main() -> std::io::Result<()> { +async fn main() { env_logger::init(); println!("about to listen to http://localhost:4005"); - HttpServer::new(move || { - let schema = Schema::build(Query, Mutation, EmptySubscription).finish(); - App::new() - .app_data(Data::new(schema)) - //.wrap(EnsureKeepAlive) - //.wrap(DelayFor::default()) - .service(index) - .service(index_playground) - }) - .keep_alive(Duration::from_secs(75)) - .bind("0.0.0.0:4005")? - .run() - .await + let schema = Schema::build(Query, Mutation, EmptySubscription).finish(); + let router = Router::new() + .route("/", post(graphql_handler)) + .layer(ServiceBuilder::new().layer(Extension(schema))); + + axum::Server::bind(&"0.0.0.0:4005".parse().expect("Fixed address is valid")) + .serve(router.into_make_service()) + .await + .expect("Server failed to start") } diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 4d7abff27b..40ba75ba53 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.52.1 +version: 1.53.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.52.1" +appVersion: "v1.53.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 706e25fe89..e962582547 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.52.1](https://img.shields.io/badge/Version-1.52.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.52.1](https://img.shields.io/badge/AppVersion-v1.52.1-informational?style=flat-square) +![Version: 1.53.0](https://img.shields.io/badge/Version-1.53.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.53.0](https://img.shields.io/badge/AppVersion-v1.53.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.52.1 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.52.1 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.52.1 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.53.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ @@ -79,6 +79,7 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | probes.readiness | object | `{"initialDelaySeconds":0}` | Configure readiness probe | | replicaCount | int | `1` | | | resources | object | `{}` | | +| rollingUpdate | object | `{}` | Sets the [rolling update strategy parameters](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment). Can take absolute values or % values. | | router | object | `{"args":["--hot-reload"],"configuration":{"health_check":{"listen":"0.0.0.0:8088"},"supergraph":{"listen":"0.0.0.0:4000"}}}` | See https://www.apollographql.com/docs/router/configuration/overview/#yaml-config-file for yaml structure | | securityContext | object | `{}` | | | service.annotations | object | `{}` | | diff --git a/helm/chart/router/templates/deployment.yaml b/helm/chart/router/templates/deployment.yaml index d6af1a19b4..3268dd443e 100644 --- a/helm/chart/router/templates/deployment.yaml +++ b/helm/chart/router/templates/deployment.yaml @@ -172,3 +172,13 @@ spec: topologySpreadConstraints: {{- toYaml . | nindent 8 }} {{- end }} + {{- if .Values.rollingUpdate }} + strategy: + rollingUpdate: + {{- if (hasKey .Values.rollingUpdate "maxUnavailable") }} + maxUnavailable: {{ .Values.rollingUpdate.maxUnavailable }} + {{- end }} + {{- if (hasKey .Values.rollingUpdate "maxSurge") }} + maxSurge: {{ .Values.rollingUpdate.maxSurge }} + {{- end }} + {{- end }} diff --git a/helm/chart/router/values.yaml b/helm/chart/router/values.yaml index b90f46dc66..b4d437bb7c 100644 --- a/helm/chart/router/values.yaml +++ b/helm/chart/router/values.yaml @@ -221,6 +221,13 @@ autoscaling: # type: cpu # targetUtilizationPercentage: 75 +# -- Sets the [rolling update strategy parameters](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment). Can take absolute values or % values. +rollingUpdate: + {} +# Defaults if not set are: +# maxUnavailable: 25% +# maxSurge: 25% + nodeSelector: {} tolerations: [] diff --git a/licenses.html b/licenses.html index 32b2ee310b..37df6f6803 100644 --- a/licenses.html +++ b/licenses.html @@ -44,7 +44,7 @@

Third Party Licenses

Overview of licenses:

    -
  • Apache License 2.0 (488)
  • +
  • Apache License 2.0 (468)
  • MIT License (163)
  • BSD 3-Clause "New" or "Revised" License (11)
  • ISC License (8)
  • @@ -65,6 +65,7 @@

    Used by:

  • aws-config
  • aws-credential-types
  • aws-runtime
  • +
  • aws-sigv4
  • aws-smithy-async
  • aws-smithy-http
  • aws-smithy-json
  • @@ -3575,230 +3576,6 @@

    Used by:

    Copyright 2017 Juniper Networks, Inc. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                     Apache License
    -                           Version 2.0, January 2004
    -                        http://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    -
    -      "License" shall mean the terms and conditions for use, reproduction,
    -      and distribution as defined by Sections 1 through 9 of this document.
    -
    -      "Licensor" shall mean the copyright owner or entity authorized by
    -      the copyright owner that is granting the License.
    -
    -      "Legal Entity" shall mean the union of the acting entity and all
    -      other entities that control, are controlled by, or are under common
    -      control with that entity. For the purposes of this definition,
    -      "control" means (i) the power, direct or indirect, to cause the
    -      direction or management of such entity, whether by contract or
    -      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -      outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -      "You" (or "Your") shall mean an individual or Legal Entity
    -      exercising permissions granted by this License.
    -
    -      "Source" form shall mean the preferred form for making modifications,
    -      including but not limited to software source code, documentation
    -      source, and configuration files.
    -
    -      "Object" form shall mean any form resulting from mechanical
    -      transformation or translation of a Source form, including but
    -      not limited to compiled object code, generated documentation,
    -      and conversions to other media types.
    -
    -      "Work" shall mean the work of authorship, whether in Source or
    -      Object form, made available under the License, as indicated by a
    -      copyright notice that is included in or attached to the work
    -      (an example is provided in the Appendix below).
    -
    -      "Derivative Works" shall mean any work, whether in Source or Object
    -      form, that is based on (or derived from) the Work and for which the
    -      editorial revisions, annotations, elaborations, or other modifications
    -      represent, as a whole, an original work of authorship. For the purposes
    -      of this License, Derivative Works shall not include works that remain
    -      separable from, or merely link (or bind by name) to the interfaces of,
    -      the Work and Derivative Works thereof.
    -
    -      "Contribution" shall mean any work of authorship, including
    -      the original version of the Work and any modifications or additions
    -      to that Work or Derivative Works thereof, that is intentionally
    -      submitted to Licensor for inclusion in the Work by the copyright owner
    -      or by an individual or Legal Entity authorized to submit on behalf of
    -      the copyright owner. For the purposes of this definition, "submitted"
    -      means any form of electronic, verbal, or written communication sent
    -      to the Licensor or its representatives, including but not limited to
    -      communication on electronic mailing lists, source code control systems,
    -      and issue tracking systems that are managed by, or on behalf of, the
    -      Licensor for the purpose of discussing and improving the Work, but
    -      excluding communication that is conspicuously marked or otherwise
    -      designated in writing by the copyright owner as "Not a Contribution."
    -
    -      "Contributor" shall mean Licensor and any individual or Legal Entity
    -      on behalf of whom a Contribution has been received by Licensor and
    -      subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      copyright license to reproduce, prepare Derivative Works of,
    -      publicly display, publicly perform, sublicense, and distribute the
    -      Work and such Derivative Works in Source or Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      (except as stated in this section) patent license to make, have made,
    -      use, offer to sell, sell, import, and otherwise transfer the Work,
    -      where such license applies only to those patent claims licensable
    -      by such Contributor that are necessarily infringed by their
    -      Contribution(s) alone or by combination of their Contribution(s)
    -      with the Work to which such Contribution(s) was submitted. If You
    -      institute patent litigation against any entity (including a
    -      cross-claim or counterclaim in a lawsuit) alleging that the Work
    -      or a Contribution incorporated within the Work constitutes direct
    -      or contributory patent infringement, then any patent licenses
    -      granted to You under this License for that Work shall terminate
    -      as of the date such litigation is filed.
    -
    -   4. Redistribution. You may reproduce and distribute copies of the
    -      Work or Derivative Works thereof in any medium, with or without
    -      modifications, and in Source or Object form, provided that You
    -      meet the following conditions:
    -
    -      (a) You must give any other recipients of the Work or
    -          Derivative Works a copy of this License; and
    -
    -      (b) You must cause any modified files to carry prominent notices
    -          stating that You changed the files; and
    -
    -      (c) You must retain, in the Source form of any Derivative Works
    -          that You distribute, all copyright, patent, trademark, and
    -          attribution notices from the Source form of the Work,
    -          excluding those notices that do not pertain to any part of
    -          the Derivative Works; and
    -
    -      (d) If the Work includes a "NOTICE" text file as part of its
    -          distribution, then any Derivative Works that You distribute must
    -          include a readable copy of the attribution notices contained
    -          within such NOTICE file, excluding those notices that do not
    -          pertain to any part of the Derivative Works, in at least one
    -          of the following places: within a NOTICE text file distributed
    -          as part of the Derivative Works; within the Source form or
    -          documentation, if provided along with the Derivative Works; or,
    -          within a display generated by the Derivative Works, if and
    -          wherever such third-party notices normally appear. The contents
    -          of the NOTICE file are for informational purposes only and
    -          do not modify the License. You may add Your own attribution
    -          notices within Derivative Works that You distribute, alongside
    -          or as an addendum to the NOTICE text from the Work, provided
    -          that such additional attribution notices cannot be construed
    -          as modifying the License.
    -
    -      You may add Your own copyright statement to Your modifications and
    -      may provide additional or different license terms and conditions
    -      for use, reproduction, or distribution of Your modifications, or
    -      for any such Derivative Works as a whole, provided Your use,
    -      reproduction, and distribution of the Work otherwise complies with
    -      the conditions stated in this License.
    -
    -   5. Submission of Contributions. Unless You explicitly state otherwise,
    -      any Contribution intentionally submitted for inclusion in the Work
    -      by You to the Licensor shall be under the terms and conditions of
    -      this License, without any additional terms or conditions.
    -      Notwithstanding the above, nothing herein shall supersede or modify
    -      the terms of any separate license agreement you may have executed
    -      with Licensor regarding such Contributions.
    -
    -   6. Trademarks. This License does not grant permission to use the trade
    -      names, trademarks, service marks, or product names of the Licensor,
    -      except as required for reasonable and customary use in describing the
    -      origin of the Work and reproducing the content of the NOTICE file.
    -
    -   7. Disclaimer of Warranty. Unless required by applicable law or
    -      agreed to in writing, Licensor provides the Work (and each
    -      Contributor provides its Contributions) on an "AS IS" BASIS,
    -      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -      implied, including, without limitation, any warranties or conditions
    -      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -      PARTICULAR PURPOSE. You are solely responsible for determining the
    -      appropriateness of using or redistributing the Work and assume any
    -      risks associated with Your exercise of permissions under this License.
    -
    -   8. Limitation of Liability. In no event and under no legal theory,
    -      whether in tort (including negligence), contract, or otherwise,
    -      unless required by applicable law (such as deliberate and grossly
    -      negligent acts) or agreed to in writing, shall any Contributor be
    -      liable to You for damages, including any direct, indirect, special,
    -      incidental, or consequential damages of any character arising as a
    -      result of this License or out of the use or inability to use the
    -      Work (including but not limited to damages for loss of goodwill,
    -      work stoppage, computer failure or malfunction, or any and all
    -      other commercial damages or losses), even if such Contributor
    -      has been advised of the possibility of such damages.
    -
    -   9. Accepting Warranty or Additional Liability. While redistributing
    -      the Work or Derivative Works thereof, You may choose to offer,
    -      and charge a fee for, acceptance of support, warranty, indemnity,
    -      or other liability obligations and/or rights consistent with this
    -      License. However, in accepting such obligations, You may act only
    -      on Your own behalf and on Your sole responsibility, not on behalf
    -      of any other Contributor, and only if You agree to indemnify,
    -      defend, and hold each Contributor harmless for any liability
    -      incurred by, or claims asserted against, such Contributor by reason
    -      of your accepting any such warranty or additional liability.
    -
    -   END OF TERMS AND CONDITIONS
    -
    -   APPENDIX: How to apply the Apache License to your work.
    -
    -      To apply the Apache License to your work, attach the following
    -      boilerplate notice, with the fields enclosed by brackets "{}"
    -      replaced with your own identifying information. (Don't include
    -      the brackets!)  The text should be enclosed in the appropriate
    -      comment syntax for the file format. We also recommend that a
    -      file or class name and description of purpose be included on the
    -      same "printed page" as the copyright notice for easier
    -      identification within third-party archives.
    -
    -   Copyright 2017-NOW Actix Team
    -
        Licensed under the Apache License, Version 2.0 (the "License");
        you may not use this file except in compliance with the License.
        You may obtain a copy of the License at
    @@ -6734,7 +6511,6 @@ 

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -8642,6 +8418,7 @@ 

    Used by:

  • cc
  • cfg-if
  • cfg-if
  • +
  • ci_info
  • cmake
  • concurrent-queue
  • const-random
  • @@ -8658,7 +8435,6 @@

    Used by:

  • derive_arbitrary
  • displaydoc
  • either
  • -
  • env_logger
  • envmnt
  • equivalent
  • error-chain
  • @@ -8682,7 +8458,6 @@

    Used by:

  • hdrhistogram
  • heck
  • heck
  • -
  • hermit-abi
  • hermit-abi
  • httparse
  • humantime-serde
  • @@ -11509,6 +11284,16 @@

    Used by:

    additional terms or conditions.
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    ../../LICENSE-APACHE
    +
  • Apache License 2.0

    Used by:

    @@ -12156,16 +11941,10 @@

    Used by:

    Apache License 2.0

    Used by:

      -
    • apollo-compiler
    • -
    • apollo-encoder
    • -
    • apollo-parser
    • -
    • apollo-smith
    • -
    • apollo-smith
    • -
    • async-graphql-actix-web
    • +
    • async-graphql-axum
    • async-graphql-derive
    • async-graphql-parser
    • async-graphql-value
    • -
    • buildstructor
    • deno-proc-macro-rules
    • deno-proc-macro-rules-macros
    • dunce
    • @@ -12175,7 +11954,6 @@

      Used by:

    • graphql_query_derive
    • http-serde
    • ident_case
    • -
    • language-tags
    • libssh2-sys
    • linkme-impl
    • md5
    • @@ -12285,6 +12063,27 @@

      Used by:

      http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + +
    • +

      Apache License 2.0

      +

      Used by:

      + +
      Copyright [2022] [Bryn Cooke]
      +
      +Licensed under the Apache License, Version 2.0 (the "License");
      +you may not use this file except in compliance with the License.
      +You may obtain a copy of the License at
      +
      +    http://www.apache.org/licenses/LICENSE-2.0
      +
       Unless required by applicable law or agreed to in writing, software
       distributed under the License is distributed on an "AS IS" BASIS,
       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      @@ -13648,34 +13447,6 @@ 

      Used by:

      LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
      -
    • -
    • -

      MIT License

      -

      Used by:

      - -
      Copyright (c) 2015-2019 Doug Tangren
      -
      -Permission is hereby granted, free of charge, to any person obtaining
      -a copy of this software and associated documentation files (the
      -"Software"), to deal in the Software without restriction, including
      -without limitation the rights to use, copy, modify, merge, publish,
      -distribute, sublicense, and/or sell copies of the Software, and to
      -permit persons to whom the Software is furnished to do so, subject to
      -the following conditions:
      -
      -The above copyright notice and this permission notice shall be
      -included in all copies or substantial portions of the Software.
      -
      -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
      -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
      -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
      -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
      -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
      -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
      -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
       
    • @@ -14059,6 +13830,66 @@

      Used by:

      shall be included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +
    • +
    • +

      MIT License

      +

      Used by:

      + +
      Copyright (c) 2019 Carl Lerche
      +
      +Permission is hereby granted, free of charge, to any
      +person obtaining a copy of this software and associated
      +documentation files (the "Software"), to deal in the
      +Software without restriction, including without
      +limitation the rights to use, copy, modify, merge,
      +publish, distribute, sublicense, and/or sell copies of
      +the Software, and to permit persons to whom the Software
      +is furnished to do so, subject to the following
      +conditions:
      +
      +The above copyright notice and this permission notice
      +shall be included in all copies or substantial portions
      +of the Software.
      +
      +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
      +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
      +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
      +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
      +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
      +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
      +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
      +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
      +DEALINGS IN THE SOFTWARE.
      +
      +Copyright (c) 2018 David Tolnay
      +
      +Permission is hereby granted, free of charge, to any
      +person obtaining a copy of this software and associated
      +documentation files (the "Software"), to deal in the
      +Software without restriction, including without
      +limitation the rights to use, copy, modify, merge,
      +publish, distribute, sublicense, and/or sell copies of
      +the Software, and to permit persons to whom the Software
      +is furnished to do so, subject to the following
      +conditions:
      +
      +The above copyright notice and this permission notice
      +shall be included in all copies or substantial portions
      +of the Software.
      +
       THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
       ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
       TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
      @@ -15065,8 +14896,6 @@ 

      Used by:

      MIT License

      Used by:

        -
      • async-stream
      • -
      • async-stream-impl
      • base64-simd
      • convert_case
      • cookie-factory
      • @@ -15506,7 +15335,6 @@

        Used by:

        MIT License

        Used by:

        The MIT License (MIT)
        @@ -16000,6 +15828,8 @@ 

        Used by:

        MIT License

        Used by:

          +
        • aho-corasick
        • +
        • byteorder
        • globset
        • memchr
        • regex-automata
        • @@ -16408,7 +16238,6 @@

          Used by:

          Mozilla Public License 2.0

          Used by:

          Mozilla Public License Version 2.0
          @@ -16791,6 +16620,7 @@ 

          Used by:

          Mozilla Public License 2.0

          Used by:

          Mozilla Public License Version 2.0
          diff --git a/rust-toolchain.toml b/rust-toolchain.toml
          index b94b409b13..0c7dc7c811 100644
          --- a/rust-toolchain.toml
          +++ b/rust-toolchain.toml
          @@ -1,4 +1,4 @@
           [toolchain]
           # renovate-automation: rustc version
          -channel = "1.76.0"
          +channel = "1.76.0" # If updated, remove `rowan` dependency from apollo-router/Cargo.toml
           components = [ "rustfmt", "clippy" ]
          diff --git a/scripts/install.sh b/scripts/install.sh
          index a6e078d154..f250d420a1 100755
          --- a/scripts/install.sh
          +++ b/scripts/install.sh
          @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa
           
           # Router version defined in apollo-router's Cargo.toml
           # Note: Change this line manually during the release steps.
          -PACKAGE_VERSION="v1.52.1"
          +PACKAGE_VERSION="v1.53.0"
           
           download_binary() {
               downloader --check