diff --git a/.circleci/config.yml b/.circleci/config.yml index 50d08f1d50..051f824bd0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,8 +5,8 @@ version: 2.1 # These "CircleCI Orbs" are reusable bits of configuration that can be shared # across projects. See https://circleci.com/orbs/ for more information. orbs: - gh: circleci/github-cli@2.3.0 - slack: circleci/slack@4.12.6 + gh: circleci/github-cli@2.6.0 + slack: circleci/slack@5.1.1 secops: apollo/circleci-secops-orb@2.0.7 executors: @@ -24,9 +24,9 @@ executors: amd_linux_test: &amd_linux_test_executor docker: - image: cimg/base:stable - - image: cimg/redis:7.2.4 + - image: cimg/redis:7.4.1 - image: jaegertracing/all-in-one:1.54.0 - - image: openzipkin/zipkin:2.23.2 + - image: openzipkin/zipkin:3.4.3 - image: ghcr.io/datadog/dd-apm-test-agent/ddapm-test-agent:v1.17.0 resource_class: xlarge environment: @@ -376,9 +376,12 @@ commands: - run: name: Install cargo deny, about, edit command: | + # Until we are able to update rustc to at least 1.81.0, + # we need special handling of the cargo-about command. + rustup install 1.83.0 + cargo +1.83.0 install --locked --version 0.6.6 cargo-about if [[ ! -f "$HOME/.cargo/bin/cargo-deny$EXECUTABLE_SUFFIX" ]]; then cargo install --locked --version 0.14.21 cargo-deny - cargo install --locked --version 0.6.1 cargo-about cargo install --locked --version 0.12.2 cargo-edit cargo install --locked --version 0.12.0 cargo-fuzz fi diff --git a/.github/workflows/update_apollo_protobuf.yaml b/.github/workflows/update_apollo_protobuf.yaml index cdb6aa84b4..fc9659c1b8 100644 --- a/.github/workflows/update_apollo_protobuf.yaml +++ b/.github/workflows/update_apollo_protobuf.yaml @@ -9,13 +9,13 @@ jobs: Update-Protobuf-Schema: runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Make changes to pull request run: | curl -f https://usage-reporting.api.apollographql.com/proto/reports.proto > ./apollo-router/src/plugins/telemetry/proto/reports.proto - name: Create Pull Request id: cpr - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: commit-message: Update Apollo Protobuf schema committer: GitHub diff --git a/.github/workflows/update_uplink_schema.yml b/.github/workflows/update_uplink_schema.yml index dd89b1ecdb..2b80c65946 100644 --- a/.github/workflows/update_uplink_schema.yml +++ b/.github/workflows/update_uplink_schema.yml @@ -9,7 +9,7 @@ jobs: Update-Uplink-Schema: runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: Install Rover run: | curl -sSL https://rover.apollo.dev/nix/v0.14.1 | sh @@ -19,7 +19,7 @@ jobs: rover graph introspect https://uplink.api.apollographql.com/ | perl -pe 'chomp if eof' > ./apollo-router/src/uplink/uplink.graphql - name: Create Pull Request id: cpr - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: commit-message: Update Uplink schema committer: GitHub diff --git a/CHANGELOG.md b/CHANGELOG.md index cbab02293c..39162412b6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,266 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.59.0] - 2024-12-17 + +> [!IMPORTANT] +> If you have enabled [distributed query plan caching](https://www.apollographql.com/docs/router/configuration/distributed-caching/#distributed-query-plan-caching), updates to the query planner in this release will result in query plan caches being regenerated rather than reused. On account of this, you should anticipate additional cache regeneration cost when updating to this router version while the new query plans come into service. + +## šŸš€ Features + +### General availability of native query planner + +The router's native, Rust-based, query planner is now [generally available](https://www.apollographql.com/docs/graphos/reference/feature-launch-stages#general-availability) and enabled by default. + +The native query planner achieves better performance for a variety of graphs. In our tests, we observe: + +* 10x median improvement in query planning time (observed via `apollo.router.query_planning.plan.duration`) +* 2.9x improvement in routerā€™s CPU utilization +* 2.2x improvement in routerā€™s memory usage + +> Note: you can expect generated plans and subgraph operations in the native +query planner to have slight differences when compared to the legacy, JavaScript-based query planner. We've ascertained these differences to be semantically insignificant, based on comparing ~2.5 million known unique user operations in GraphOS as well as +comparing ~630 million operations across actual router deployments in shadow +mode for a four month duration. + +The native query planner supports Federation v2 supergraphs. If you are using Federation v1 today, see our [migration guide](https://www.apollographql.com/docs/graphos/reference/migration/to-federation-version-2) on how to update your composition build step. Subgraph changes are typically not needed. + +The legacy, JavaScript, query planner is deprecated in this release, but you can still switch +back to it if you are still using Federation v1 supergraph: + +``` +experimental_query_planner_mode: legacy +``` + +> Note: The subgraph operations generated by the query planner are not +guaranteed consistent release over release. We strongly recommend against +relying on the shape of planned subgraph operations, as new router features and +optimizations will continuously affect it. + +By [@sachindshinde](https://github.com/sachindshinde), +[@goto-bus-stop](https://github.com/goto-bus-stop), +[@duckki](https://github.com/duckki), +[@TylerBloom](https://github.com/TylerBloom), +[@SimonSapin](https://github.com/SimonSapin), +[@dariuszkuc](https://github.com/dariuszkuc), +[@lrlna](https://github.com/lrlna), [@clenfest](https://github.com/clenfest), +and [@o0Ignition0o](https://github.com/o0Ignition0o). + +### Ability to skip persisted query list safelisting enforcement via plugin ([PR #6403](https://github.com/apollographql/router/pull/6403)) + +If safelisting is enabled, a `router_service` plugin can skip enforcement of the safelist (including the `require_id` check) by adding the key `apollo_persisted_queries::safelist::skip_enforcement` with value `true` to the request context. + +> Note: this doesn't affect the logging of unknown operations by the `persisted_queries.log_unknown` option. + +In cases where an operation would have been denied but is allowed due to the context key existing, the attribute `persisted_queries.safelist.enforcement_skipped` is set on the `apollo.router.operations.persisted_queries` metric with value `true`. + +By [@glasser](https://github.com/glasser) in https://github.com/apollographql/router/pull/6403 + +### Add fleet awareness plugin ([PR #6151](https://github.com/apollographql/router/pull/6151)) + +A new `fleet_awareness` plugin has been added that reports telemetry to Apollo about the configuration and deployment of the router. + +The reported telemetry include CPU and memory usage, CPU frequency, and other deployment characteristics such as operating system and cloud provider. For more details, along with a full list of data captured and how to opt out, go to our +[data privacy policy](https://www.apollographql.com/docs/graphos/reference/data-privacy). + +By [@jonathanrainer](https://github.com/jonathanrainer), [@nmoutschen](https://github.com/nmoutschen), [@loshz](https://github.com/loshz) +in https://github.com/apollographql/router/pull/6151 + +### Add fleet awareness schema metric ([PR #6283](https://github.com/apollographql/router/pull/6283)) + +The router now supports the `apollo.router.instance.schema` metric for its `fleet_detector` plugin. It has two attributes: `schema_hash` and `launch_id`. + +By [@loshz](https://github.com/loshz) and [@nmoutschen](https://github.com/nmoutschen) in https://github.com/apollographql/router/pull/6283 + +### Support client name for persisted query lists ([PR #6198](https://github.com/apollographql/router/pull/6198)) + +The persisted query manifest fetched from Apollo Uplink can now contain a `clientName` field in each operation. Two operations with the same `id` but different `clientName` are considered to be distinct operations, and they may have distinct bodies. + +The router resolves the client name by taking the first from the following that exists: +- Reading the `apollo_persisted_queries::client_name` context key that may be set by a `router_service` plugin +- Reading the HTTP header named by `telemetry.apollo.client_name_header`, which defaults to `apollographql-client-name` + + +If a client name can be resolved for a request, the router first tries to find a persisted query with the specified ID and the resolved client name. + +If there is no operation with that ID and client name, or if a client name cannot be resolved, the router tries to find a persisted query with the specified ID and no client name specified. This means that existing PQ lists that don't contain client names will continue to work. + +To learn more, go to [persisted queries](https://www.apollographql.com/docs/graphos/routing/security/persisted-queries#apollo_persisted_queriesclient_name) docs. + +By [@glasser](https://github.com/glasser) in https://github.com/apollographql/router/pull/6198 + +## šŸ› Fixes + +### Fix coprocessor empty body object panic ([PR #6398](https://github.com/apollographql/router/pull/6398)) + +Previously, the router would panic if a coprocessor responds with an empty body object at the supergraph stage: + +```json +{ + ... // other fields + "body": {} // empty object +} +``` + +This has been fixed in this release. + +> Note: the previous issue didn't affect coprocessors that responded with formed responses. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/6398 + +### Ensure cost directives are picked up when not explicitly imported ([PR #6328](https://github.com/apollographql/router/pull/6328)) + +With the recent composition changes, importing `@cost` results in a supergraph schema with the cost specification import at the top. The `@cost` directive itself is not explicitly imported, as it's expected to be available as the default export from the cost link. In contrast, uses of `@listSize` to translate to an explicit import in the supergraph. + +Old SDL link + +``` +@link( + url: "https://specs.apollo.dev/cost/v0.1" + import: ["@cost", "@listSize"] +) +``` + +New SDL link + +``` +@link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) +``` + +Instead of using the directive names from the import list in the link, the directive names now come from `SpecDefinition::directive_name_in_schema`, which is equivalent to the change we made on the composition side. + +By [@tninesling](https://github.com/tninesling) in https://github.com/apollographql/router/pull/6328 + +### Fix query hashing algorithm ([PR #6205](https://github.com/apollographql/router/pull/6205)) + +The router includes a schema-aware query hashing algorithm designed to return the same hash across schema updates if the query remains unaffected. This update enhances the algorithm by addressing various corner cases to improve its reliability and consistency. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/6205 + +### Fix typo in persisted query metric attribute ([PR #6332](https://github.com/apollographql/router/pull/6332)) + +The `apollo.router.operations.persisted_queries` metric reports an attribute when a persisted query was not found. +Previously, the attribute name was `persisted_quieries.not_found`, with one `i` too many. Now it's `persisted_queries.not_found`. + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/6332 + +### Fix telemetry instrumentation using supergraph query selector ([PR #6324](https://github.com/apollographql/router/pull/6324)) + +Previously, router telemetry instrumentation that used query selectors could log errors with messages such as `this is a bug and should not happen`. + +These errors have now been fixed, and configurations with query selectors such as the following work properly: + +```yaml title=router.yaml +telemetry: + exporters: + metrics: + common: + views: + # Define a custom view because operation limits are different than the default latency-oriented view of OpenTelemetry + - name: oplimits.* + aggregation: + histogram: + buckets: + - 0 + - 5 + - 10 + - 25 + - 50 + - 100 + - 500 + - 1000 + instrumentation: + instruments: + supergraph: + oplimits.aliases: + value: + query: aliases + type: histogram + unit: number + description: "Aliases for an operation" + oplimits.depth: + value: + query: depth + type: histogram + unit: number + description: "Depth for an operation" + oplimits.height: + value: + query: height + type: histogram + unit: number + description: "Height for an operation" + oplimits.root_fields: + value: + query: root_fields + type: histogram + unit: number + description: "Root fields for an operation" +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/6324 + +### More consistent attributes on `apollo.router.operations.persisted_queries` metric ([PR #6403](https://github.com/apollographql/router/pull/6403)) + +Version 1.28.1 added several *unstable* metrics, including `apollo.router.operations.persisted_queries`. + +When an operation is rejected, Router includes a `persisted_queries.safelist.rejected.unknown` attribute on the metric. Previously, this attribute had the value `true` if the operation is logged (via `log_unknown`), and `false` if the operation is not logged. (The attribute is not included at all if the operation is not rejected.) This appears to have been a mistake, as you can also tell whether it is logged via the `persisted_queries.logged` attribute. + +Router now only sets this attribute to true, and never to false. Note these metrics are unstable and will continue to change. + +By [@glasser](https://github.com/glasser) in https://github.com/apollographql/router/pull/6403 + +### Drop experimental reuse fragment query optimization option ([PR #6354](https://github.com/apollographql/router/pull/6354)) + +Drop support for the experimental reuse fragment query optimization. This implementation was not only very slow but also very buggy due to its complexity. + +Auto generation of fragments is a much simpler (and faster) algorithm that in most cases produces better results. Fragment auto generation is the default optimization since v1.58 release. + +By [@dariuszkuc](https://github.com/dariuszkuc) in https://github.com/apollographql/router/pull/6353 + +## šŸ“ƒ Configuration + +### Add version number to distributed query plan cache keys ([PR #6406](https://github.com/apollographql/router/pull/6406)) + +The router now includes its version number in the cache keys of distributed cache entries. Given that a new router release may change how query plans are generated or represented, including the router version in a cache key enables the router to use separate cache entries for different versions. + +If you have enabled [distributed query plan caching](https://www.apollographql.com/docs/router/configuration/distributed-caching/#distributed-query-plan-caching), expect additional processing for your cache to update for this router release. + + +By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/6406 + +## šŸ›  Maintenance + +### Remove catch_unwind wrapper around the native query planner ([PR #6397](https://github.com/apollographql/router/pull/6397)) + +As part of internal maintenance of the query planner, the +`catch_unwind` wrapper around the native query planner has been removed. This wrapper served as an extra safeguard for potential panics the native planner could produce. The +native query planner however no longer has any code paths that could panic. We have also +not witnessed a panic in the last four months, having processed 560 million real +user operations through the native planner. + +This maintenance work also removes backtrace capture for federation errors, which +was used for debugging and is no longer necessary as we have the confidence in +the native planner's implementation. + +By [@lrlna](https://github.com/lrlna) in https://github.com/apollographql/router/pull/6397 + +### Deprecate various metrics ([PR #6350](https://github.com/apollographql/router/pull/6350)) + +Several metrics have been deprecated in this release, in favor of OpenTelemetry-compatible alternatives: + +- `apollo_router_deduplicated_subscriptions_total` - use the `apollo.router.operations.subscriptions` metric's `subscriptions.deduplicated` attribute. +- `apollo_authentication_failure_count` - use the `apollo.router.operations.authentication.jwt` metric's `authentication.jwt.failed` attribute. +- `apollo_authentication_success_count` - use the `apollo.router.operations.authentication.jwt` metric instead. If the `authentication.jwt.failed` attribute is *absent* or `false`, the authentication succeeded. +- `apollo_require_authentication_failure_count` - use the `http.server.request.duration` metric's `http.response.status_code` attribute. Requests with authentication failures have HTTP status code 401. +- `apollo_router_timeout` - this metric conflates timed-out requests from client to the router, and requests from the router to subgraphs. Timed-out requests have HTTP status code 504. Use the `http.response.status_code` attribute on the `http.server.request.duration` metric to identify timed-out router requests, and the same attribute on the `http.client.request.duration` metric to identify timed-out subgraph requests. + +The deprecated metrics will continue to work in the 1.x release line. + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/6350 + + + # [1.58.1] - 2024-12-05 > [!IMPORTANT] diff --git a/Cargo.lock b/Cargo.lock index 4b1ab30f24..2e367d7c41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -96,6 +96,21 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "anes" version = "0.1.6" @@ -176,9 +191,20 @@ dependencies = [ "uuid", ] +[[package]] +name = "apollo-environment-detector" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c628346f10c7615f1dd9e3f486d55bcad9edb667f4444dcbcb9cb5943815583a" +dependencies = [ + "libc", + "serde", + "wmi", +] + [[package]] name = "apollo-federation" -version = "1.58.1" +version = "1.59.0" dependencies = [ "apollo-compiler", "derive_more", @@ -231,12 +257,13 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.58.1" +version = "1.59.0" dependencies = [ "access-json", "ahash", "anyhow", "apollo-compiler", + "apollo-environment-detector", "apollo-federation", "arc-swap", "async-channel 1.9.0", @@ -362,6 +389,7 @@ dependencies = [ "static_assertions", "strum_macros 0.26.4", "sys-info", + "sysinfo", "tempfile", "test-log", "thiserror", @@ -399,7 +427,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.58.1" +version = "1.59.0" dependencies = [ "apollo-parser", "apollo-router", @@ -415,7 +443,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.58.1" +version = "1.59.0" dependencies = [ "anyhow", "cargo-scaffold", @@ -651,7 +679,7 @@ dependencies = [ "proc-macro2", "quote", "strum 0.25.0", - "syn 2.0.76", + "syn 2.0.90", "thiserror", ] @@ -819,7 +847,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -836,7 +864,7 @@ checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -1410,7 +1438,7 @@ dependencies = [ "proc-macro2", "quote", "str_inflector", - "syn 2.0.76", + "syn 2.0.90", "thiserror", "try_match", ] @@ -1517,6 +1545,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "num-traits", + "serde", + "windows-targets 0.52.6", +] + [[package]] name = "ci_info" version = "0.14.14" @@ -1586,7 +1627,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -1926,7 +1967,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -1937,7 +1978,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -1997,7 +2038,7 @@ checksum = "3c65c2ffdafc1564565200967edc4851c7b55422d3913466688907efd05ea26f" dependencies = [ "deno-proc-macro-rules-macros", "proc-macro2", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -2009,7 +2050,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -2064,7 +2105,7 @@ dependencies = [ "strum 0.25.0", "strum_macros 0.25.3", "syn 1.0.109", - "syn 2.0.76", + "syn 2.0.90", "thiserror", ] @@ -2145,7 +2186,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -2158,7 +2199,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -2249,7 +2290,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -2355,7 +2396,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -2629,7 +2670,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -2797,7 +2838,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -2896,7 +2937,7 @@ checksum = "b0e085ded9f1267c32176b40921b9754c474f7dd96f7e808d4a982e48aa1e854" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -3446,6 +3487,147 @@ dependencies = [ "tokio", ] +[[package]] +name = "iana-time-zone" +version = "0.1.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core 0.52.0", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -3464,12 +3646,23 @@ dependencies = [ [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] @@ -3823,9 +4016,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.167" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" [[package]] name = "libfuzzer-sys" @@ -3933,7 +4126,7 @@ checksum = "f8dccda732e04fa3baf2e17cf835bfe2601c7c2edafd64417c627dabae3a8cda" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -3948,6 +4141,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" + [[package]] name = "lock_api" version = "0.4.12" @@ -4122,7 +4321,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -4194,6 +4393,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -4783,7 +4991,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -4826,7 +5034,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -4904,7 +5112,7 @@ checksum = "52a40bc70c2c58040d2d8b167ba9a5ff59fc9dab7ad44771cfde3dcfde7a09c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -5013,9 +5221,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -5115,7 +5323,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -5513,7 +5721,7 @@ checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -5645,7 +5853,7 @@ dependencies = [ "proc-macro2", "quote", "rust-embed-utils", - "syn 2.0.76", + "syn 2.0.90", "walkdir", ] @@ -5821,7 +6029,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -5906,9 +6114,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.204" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -5924,13 +6132,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -5941,7 +6149,7 @@ checksum = "afb2522c2a87137bf6c2b3493127fed12877ef1b9476f074d6664edc98acd8a7" dependencies = [ "quote", "regex", - "syn 2.0.76", + "syn 2.0.90", "thiserror", ] @@ -5953,7 +6161,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -6075,7 +6283,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -6302,7 +6510,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -6315,7 +6523,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -6349,9 +6557,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.76" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578e081a14e0cefc3279b0472138c513f37b41a08d5a3cca9b6e4e8ceb6cd525" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -6364,6 +6572,17 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "sys-info" version = "0.9.1" @@ -6374,6 +6593,20 @@ dependencies = [ "libc", ] +[[package]] +name = "sysinfo" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ae3f4f7d64646c46c4cae4e3f01d1c5d255c7406fdd7c7f999a94e488791" +dependencies = [ + "core-foundation-sys", + "libc", + "memchr", + "ntapi", + "rayon", + "windows 0.57.0", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -6450,7 +6683,7 @@ checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -6485,7 +6718,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -6601,6 +6834,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -6663,7 +6906,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -6932,7 +7175,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -7044,7 +7287,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -7080,7 +7323,7 @@ checksum = "b0a91713132798caecb23c977488945566875e7b61b902fb111979871cbff34e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", ] [[package]] @@ -7265,12 +7508,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 1.0.3", "percent-encoding", "serde", ] @@ -7300,6 +7543,18 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -7308,9 +7563,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "getrandom 0.2.15", "serde", @@ -7423,7 +7678,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", "wasm-bindgen-shared", ] @@ -7457,7 +7712,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -7546,6 +7801,132 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" +dependencies = [ + "windows-core 0.57.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +dependencies = [ + "windows-implement 0.57.0", + "windows-interface 0.57.0", + "windows-result 0.1.2", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement 0.58.0", + "windows-interface 0.58.0", + "windows-result 0.2.0", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "windows-interface" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result 0.2.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.45.0" @@ -7801,6 +8182,33 @@ dependencies = [ "tokio", ] +[[package]] +name = "wmi" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70df482bbec7017ce4132154233642de658000b24b805345572036782a66ad55" +dependencies = [ + "chrono", + "futures", + "log", + "serde", + "thiserror", + "windows 0.58.0", + "windows-core 0.58.0", +] + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "wsl" version = "0.1.0" @@ -7828,6 +8236,30 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -7845,7 +8277,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.76", + "syn 2.0.90", +] + +[[package]] +name = "zerofrom" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", + "synstructure", ] [[package]] @@ -7854,6 +8307,28 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "zstd" version = "0.13.2" diff --git a/about.toml b/about.toml index 094647afae..23c6c3ef58 100644 --- a/about.toml +++ b/about.toml @@ -9,6 +9,7 @@ accepted = [ "LicenseRef-ring", "MIT", "MPL-2.0", + "Unicode-3.0", "Unicode-DFS-2016", "Zlib" ] diff --git a/apollo-federation/Cargo.toml b/apollo-federation/Cargo.toml index 739d379f60..9469277646 100644 --- a/apollo-federation/Cargo.toml +++ b/apollo-federation/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-federation" -version = "1.58.1" +version = "1.59.0" authors = ["The Apollo GraphQL Contributors"] edition = "2021" description = "Apollo Federation" diff --git a/apollo-federation/cli/src/main.rs b/apollo-federation/cli/src/main.rs index 28bb5f7921..c89afb689c 100644 --- a/apollo-federation/cli/src/main.rs +++ b/apollo-federation/cli/src/main.rs @@ -23,12 +23,12 @@ struct QueryPlannerArgs { /// Enable @defer support. #[arg(long, default_value_t = false)] enable_defer: bool, - /// Reuse fragments to compress subgraph queries. - #[arg(long, default_value_t = false)] - reuse_fragments: bool, /// Generate fragments to compress subgraph queries. #[arg(long, default_value_t = false)] generate_fragments: bool, + /// Enable type conditioned fetching. + #[arg(long, default_value_t = false)] + type_conditioned_fetching: bool, /// Run GraphQL validation check on generated subgraph queries. (default: true) #[arg(long, default_missing_value = "true", require_equals = true, num_args = 0..=1)] subgraph_validation: Option, @@ -38,10 +38,6 @@ struct QueryPlannerArgs { /// Set the `debug.paths_limit` option. #[arg(long)] paths_limit: Option, - /// If the supergraph only represents a single subgraph, pass through queries directly without - /// planning. - #[arg(long, default_value_t = false)] - single_subgraph_passthrough: bool, } /// CLI arguments. See @@ -109,15 +105,13 @@ enum Command { impl QueryPlannerArgs { fn apply(&self, config: &mut QueryPlannerConfig) { config.incremental_delivery.enable_defer = self.enable_defer; - // --generate-fragments trumps --reuse-fragments - config.reuse_query_fragments = self.reuse_fragments && !self.generate_fragments; config.generate_query_fragments = self.generate_fragments; + config.type_conditioned_fetching = self.type_conditioned_fetching; config.subgraph_graphql_validation = self.subgraph_validation.unwrap_or(true); if let Some(max_evaluated_plans) = self.max_evaluated_plans { config.debug.max_evaluated_plans = max_evaluated_plans; } config.debug.paths_limit = self.paths_limit; - config.debug.bypass_planner_for_single_subgraph = self.single_subgraph_passthrough; } } diff --git a/apollo-federation/src/display_helpers.rs b/apollo-federation/src/display_helpers.rs index 330ec64003..898e7efd3e 100644 --- a/apollo-federation/src/display_helpers.rs +++ b/apollo-federation/src/display_helpers.rs @@ -1,9 +1,6 @@ use std::fmt; -use std::fmt::Debug; use std::fmt::Display; -use serde::Serializer; - pub(crate) struct State<'fmt, 'fmt2> { indent_level: usize, output: &'fmt mut fmt::Formatter<'fmt2>, @@ -98,30 +95,3 @@ impl Display for DisplayOption { } } } - -pub(crate) fn serialize_as_debug_string(data: &T, ser: S) -> Result -where - T: Debug, - S: Serializer, -{ - ser.serialize_str(&format!("{data:?}")) -} - -pub(crate) fn serialize_as_string(data: &T, ser: S) -> Result -where - T: ToString, - S: Serializer, -{ - ser.serialize_str(&data.to_string()) -} - -pub(crate) fn serialize_optional_vec_as_string( - data: &Option>, - ser: S, -) -> Result -where - T: Display, - S: Serializer, -{ - serialize_as_string(&DisplayOption(data.as_deref().map(DisplaySlice)), ser) -} diff --git a/apollo-federation/src/error/mod.rs b/apollo-federation/src/error/mod.rs index 8d1ceb156b..caeecf0a4a 100644 --- a/apollo-federation/src/error/mod.rs +++ b/apollo-federation/src/error/mod.rs @@ -1,10 +1,8 @@ -use std::backtrace::Backtrace; use std::cmp::Ordering; use std::fmt::Display; use std::fmt::Formatter; use std::fmt::Write; -use apollo_compiler::executable::GetOperationError; use apollo_compiler::validation::DiagnosticList; use apollo_compiler::validation::WithErrors; use apollo_compiler::InvalidNameError; @@ -84,7 +82,7 @@ macro_rules! ensure { #[cfg(not(debug_assertions))] if !$expr { - $crate::internal_error!( $( $arg )+ ); + $crate::bail!( $( $arg )+ ); } } } @@ -109,10 +107,6 @@ impl From for String { #[derive(Clone, Debug, strum_macros::Display, PartialEq, Eq)] pub enum UnsupportedFeatureKind { - #[strum(to_string = "defer")] - Defer, - #[strum(to_string = "context")] - Context, #[strum(to_string = "alias")] Alias, } @@ -126,6 +120,9 @@ pub enum SingleFederationError { #[error("An internal error has occurred, please report this bug to Apollo. Details: {0}")] #[allow(private_interfaces)] // users should not inspect this. InternalRebaseError(#[from] crate::operation::RebaseError), + // This is a known bug that will take time to fix, and does not require reporting. + #[error("{message}")] + InternalUnmergeableFields { message: String }, #[error("{diagnostics}")] InvalidGraphQL { diagnostics: DiagnosticList }, #[error(transparent)] @@ -134,6 +131,8 @@ pub enum SingleFederationError { InvalidSubgraph { message: String }, #[error("Operation name not found")] UnknownOperation, + #[error("Must provide operation name if query contains multiple operations")] + OperationNameNotProvided, #[error("Unsupported custom directive @{name} on fragment spread. Due to query transformations during planning, the router requires directives on fragment spreads to support both the FRAGMENT_SPREAD and INLINE_FRAGMENT locations.")] UnsupportedSpreadDirective { name: Name }, #[error("{message}")] @@ -302,15 +301,17 @@ impl SingleFederationError { match self { SingleFederationError::Internal { .. } => ErrorCode::Internal, SingleFederationError::InternalRebaseError { .. } => ErrorCode::Internal, + SingleFederationError::InternalUnmergeableFields { .. } => ErrorCode::Internal, SingleFederationError::InvalidGraphQL { .. } | SingleFederationError::InvalidGraphQLName(_) => ErrorCode::InvalidGraphQL, SingleFederationError::InvalidSubgraph { .. } => ErrorCode::InvalidGraphQL, - // TODO(@goto-bus-stop): this should have a different error code: it's not the graphql - // that's invalid, but the operation name - SingleFederationError::UnknownOperation => ErrorCode::InvalidGraphQL, // TODO(@goto-bus-stop): this should have a different error code: it's not invalid, // just unsupported due to internal limitations. SingleFederationError::UnsupportedSpreadDirective { .. } => ErrorCode::InvalidGraphQL, + // TODO(@goto-bus-stop): this should have a different error code: it's not the graphql + // that's invalid, but the operation name + SingleFederationError::UnknownOperation => ErrorCode::InvalidGraphQL, + SingleFederationError::OperationNameNotProvided => ErrorCode::InvalidGraphQL, SingleFederationError::DirectiveDefinitionInvalid { .. } => { ErrorCode::DirectiveDefinitionInvalid } @@ -492,12 +493,6 @@ impl From for FederationError { } } -impl From for FederationError { - fn from(_: GetOperationError) -> Self { - SingleFederationError::UnknownOperation.into() - } -} - impl From for FederationError { fn from(err: FederationSpecError) -> Self { // TODO: When we get around to finishing the composition port, we should really switch it to @@ -523,8 +518,8 @@ pub struct MultipleFederationErrors { impl MultipleFederationErrors { pub fn push(&mut self, error: FederationError) { match error { - FederationError::SingleFederationError { inner, .. } => { - self.errors.push(inner); + FederationError::SingleFederationError(error) => { + self.errors.push(error); } FederationError::MultipleFederationErrors(errors) => { self.errors.extend(errors.errors); @@ -591,22 +586,14 @@ impl Display for AggregateFederationError { } } -/// Work around thiserror, which when an error field has a type named `Backtrace` -/// "helpfully" implements `Error::provides` even though that API is not stable yet: -/// -type ThiserrorTrustMeThisIsTotallyNotABacktrace = Backtrace; - // PORT_NOTE: Often times, JS functions would either throw/return a GraphQLError, return a vector // of GraphQLErrors, or take a vector of GraphQLErrors and group them together under an // AggregateGraphQLError which itself would have a specific error message and code, and throw that. // We represent all these cases with an enum, and delegate to the members. -#[derive(thiserror::Error)] +#[derive(Clone, thiserror::Error)] pub enum FederationError { - #[error("{inner}")] - SingleFederationError { - inner: SingleFederationError, - trace: ThiserrorTrustMeThisIsTotallyNotABacktrace, - }, + #[error(transparent)] + SingleFederationError(#[from] SingleFederationError), #[error(transparent)] MultipleFederationErrors(#[from] MultipleFederationErrors), #[error(transparent)] @@ -616,22 +603,13 @@ pub enum FederationError { impl std::fmt::Debug for FederationError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - Self::SingleFederationError { inner, trace } => write!(f, "{inner}\n{trace}"), + Self::SingleFederationError(inner) => std::fmt::Debug::fmt(inner, f), Self::MultipleFederationErrors(inner) => std::fmt::Debug::fmt(inner, f), Self::AggregateFederationError(inner) => std::fmt::Debug::fmt(inner, f), } } } -impl From for FederationError { - fn from(inner: SingleFederationError) -> Self { - Self::SingleFederationError { - inner, - trace: Backtrace::capture(), - } - } -} - impl From for FederationError { fn from(value: DiagnosticList) -> Self { SingleFederationError::from(value).into() diff --git a/apollo-federation/src/link/argument.rs b/apollo-federation/src/link/argument.rs index d8ae1987f2..d6f9748438 100644 --- a/apollo-federation/src/link/argument.rs +++ b/apollo-federation/src/link/argument.rs @@ -100,22 +100,6 @@ pub(crate) fn directive_optional_boolean_argument( } } -#[allow(dead_code)] -pub(crate) fn directive_required_boolean_argument( - application: &Node, - name: &Name, -) -> Result { - directive_optional_boolean_argument(application, name)?.ok_or_else(|| { - SingleFederationError::Internal { - message: format!( - "Required argument \"{}\" of directive \"@{}\" was not present.", - name, application.name - ), - } - .into() - }) -} - pub(crate) fn directive_optional_variable_boolean_argument( application: &Node, name: &Name, diff --git a/apollo-federation/src/link/cost_spec_definition.rs b/apollo-federation/src/link/cost_spec_definition.rs index 9e2aed07cc..bd0894c392 100644 --- a/apollo-federation/src/link/cost_spec_definition.rs +++ b/apollo-federation/src/link/cost_spec_definition.rs @@ -1,16 +1,20 @@ +use std::collections::HashSet; + use apollo_compiler::ast::Argument; use apollo_compiler::ast::Directive; -use apollo_compiler::collections::IndexMap; +use apollo_compiler::ast::DirectiveList; +use apollo_compiler::ast::FieldDefinition; +use apollo_compiler::ast::InputValueDefinition; use apollo_compiler::name; use apollo_compiler::schema::Component; -use apollo_compiler::schema::EnumType; -use apollo_compiler::schema::ObjectType; -use apollo_compiler::schema::ScalarType; +use apollo_compiler::schema::ExtendedType; use apollo_compiler::Name; use apollo_compiler::Node; use lazy_static::lazy_static; use crate::error::FederationError; +use crate::internal_error; +use crate::link::federation_spec_definition::get_federation_spec_definition_from_subgraph; use crate::link::spec::Identity; use crate::link::spec::Url; use crate::link::spec::Version; @@ -21,14 +25,17 @@ use crate::schema::position::ObjectTypeDefinitionPosition; use crate::schema::position::ScalarTypeDefinitionPosition; use crate::schema::FederationSchema; -pub(crate) const COST_DIRECTIVE_NAME_IN_SPEC: Name = name!("cost"); -pub(crate) const COST_DIRECTIVE_NAME_DEFAULT: Name = name!("federation__cost"); - -pub(crate) const LIST_SIZE_DIRECTIVE_NAME_IN_SPEC: Name = name!("listSize"); -pub(crate) const LIST_SIZE_DIRECTIVE_NAME_DEFAULT: Name = name!("federation__listSize"); +const COST_DIRECTIVE_NAME: Name = name!("cost"); +const COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME: Name = name!("weight"); +const LIST_SIZE_DIRECTIVE_NAME: Name = name!("listSize"); +const LIST_SIZE_DIRECTIVE_ASSUMED_SIZE_ARGUMENT_NAME: Name = name!("assumedSize"); +const LIST_SIZE_DIRECTIVE_SLICING_ARGUMENTS_ARGUMENT_NAME: Name = name!("slicingArguments"); +const LIST_SIZE_DIRECTIVE_SIZED_FIELDS_ARGUMENT_NAME: Name = name!("sizedFields"); +const LIST_SIZE_DIRECTIVE_REQUIRE_ONE_SLICING_ARGUMENT_ARGUMENT_NAME: Name = + name!("requireOneSlicingArgument"); #[derive(Clone)] -pub(crate) struct CostSpecDefinition { +pub struct CostSpecDefinition { url: Url, minimum_federation_version: Option, } @@ -36,27 +43,24 @@ pub(crate) struct CostSpecDefinition { macro_rules! propagate_demand_control_directives { ($func_name:ident, $directives_ty:ty, $wrap_ty:expr) => { pub(crate) fn $func_name( - &self, - subgraph_schema: &FederationSchema, + supergraph_schema: &FederationSchema, source: &$directives_ty, + subgraph_schema: &FederationSchema, dest: &mut $directives_ty, - original_directive_names: &IndexMap, ) -> Result<(), FederationError> { - let cost_directive_name = original_directive_names.get(&COST_DIRECTIVE_NAME_IN_SPEC); - let cost_directive = cost_directive_name.and_then(|name| source.get(name.as_str())); + let cost_directive = Self::cost_directive_name(supergraph_schema)? + .and_then(|name| source.get(name.as_str())); if let Some(cost_directive) = cost_directive { - dest.push($wrap_ty(self.cost_directive( + dest.push($wrap_ty(Self::cost_directive( subgraph_schema, cost_directive.arguments.clone(), )?)); } - let list_size_directive_name = - original_directive_names.get(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC); - let list_size_directive = - list_size_directive_name.and_then(|name| source.get(name.as_str())); + let list_size_directive = Self::list_size_directive_name(supergraph_schema)? + .and_then(|name| source.get(name.as_str())); if let Some(list_size_directive) = list_size_directive { - dest.push($wrap_ty(self.list_size_directive( + dest.push($wrap_ty(Self::list_size_directive( subgraph_schema, list_size_directive.arguments.clone(), )?)); @@ -68,34 +72,31 @@ macro_rules! propagate_demand_control_directives { } macro_rules! propagate_demand_control_directives_to_position { - ($func_name:ident, $source_ty:ty, $dest_ty:ty) => { + ($func_name:ident, $source_ty:ty, $pos_ty:ty) => { pub(crate) fn $func_name( - &self, + supergraph_schema: &FederationSchema, subgraph_schema: &mut FederationSchema, - source: &Node<$source_ty>, - dest: &$dest_ty, - original_directive_names: &IndexMap, + pos: &$pos_ty, ) -> Result<(), FederationError> { - let cost_directive_name = original_directive_names.get(&COST_DIRECTIVE_NAME_IN_SPEC); - let cost_directive = - cost_directive_name.and_then(|name| source.directives.get(name.as_str())); + let source = pos.get(supergraph_schema.schema())?; + let cost_directive = Self::cost_directive_name(supergraph_schema)? + .and_then(|name| source.directives.get(name.as_str())); if let Some(cost_directive) = cost_directive { - dest.insert_directive( + pos.insert_directive( subgraph_schema, - Component::from( - self.cost_directive(subgraph_schema, cost_directive.arguments.clone())?, - ), + Component::from(Self::cost_directive( + subgraph_schema, + cost_directive.arguments.clone(), + )?), )?; } - let list_size_directive_name = - original_directive_names.get(&LIST_SIZE_DIRECTIVE_NAME_IN_SPEC); - let list_size_directive = - list_size_directive_name.and_then(|name| source.directives.get(name.as_str())); + let list_size_directive = Self::list_size_directive_name(supergraph_schema)? + .and_then(|name| source.directives.get(name.as_str())); if let Some(list_size_directive) = list_size_directive { - dest.insert_directive( + pos.insert_directive( subgraph_schema, - Component::from(self.list_size_directive( + Component::from(Self::list_size_directive( subgraph_schema, list_size_directive.arguments.clone(), )?), @@ -119,35 +120,32 @@ impl CostSpecDefinition { } pub(crate) fn cost_directive( - &self, schema: &FederationSchema, arguments: Vec>, ) -> Result { - let name = self - .directive_name_in_schema(schema, &COST_DIRECTIVE_NAME_IN_SPEC)? - .unwrap_or(COST_DIRECTIVE_NAME_DEFAULT); + let name = Self::cost_directive_name(schema)?.ok_or_else(|| { + internal_error!("The \"@cost\" directive is undefined in the target schema") + })?; Ok(Directive { name, arguments }) } pub(crate) fn list_size_directive( - &self, schema: &FederationSchema, arguments: Vec>, ) -> Result { - let name = self - .directive_name_in_schema(schema, &LIST_SIZE_DIRECTIVE_NAME_IN_SPEC)? - .unwrap_or(LIST_SIZE_DIRECTIVE_NAME_DEFAULT); + let name = Self::list_size_directive_name(schema)?.ok_or_else(|| { + internal_error!("The \"@listSize\" directive is undefined in the target schema") + })?; Ok(Directive { name, arguments }) } propagate_demand_control_directives!( propagate_demand_control_directives, - apollo_compiler::ast::DirectiveList, + DirectiveList, Node::new ); - propagate_demand_control_directives_to_position!( propagate_demand_control_directives_for_enum, EnumType, @@ -163,6 +161,81 @@ impl CostSpecDefinition { ScalarType, ScalarTypeDefinitionPosition ); + + fn for_federation_schema(schema: &FederationSchema) -> Option<&'static Self> { + let link = schema + .metadata()? + .for_identity(&Identity::cost_identity())?; + COST_VERSIONS.find(&link.url.version) + } + + /// Returns the name of the `@cost` directive in the given schema, accounting for import aliases or specification name + /// prefixes such as `@federation__cost`. This checks the linked cost specification, if there is one, and falls back + /// to the federation spec. + fn cost_directive_name(schema: &FederationSchema) -> Result, FederationError> { + if let Some(spec) = Self::for_federation_schema(schema) { + spec.directive_name_in_schema(schema, &COST_DIRECTIVE_NAME) + } else if let Ok(fed_spec) = get_federation_spec_definition_from_subgraph(schema) { + fed_spec.directive_name_in_schema(schema, &COST_DIRECTIVE_NAME) + } else { + Ok(None) + } + } + + /// Returns the name of the `@listSize` directive in the given schema, accounting for import aliases or specification name + /// prefixes such as `@federation__listSize`. This checks the linked cost specification, if there is one, and falls back + /// to the federation spec. + fn list_size_directive_name( + schema: &FederationSchema, + ) -> Result, FederationError> { + if let Some(spec) = Self::for_federation_schema(schema) { + spec.directive_name_in_schema(schema, &LIST_SIZE_DIRECTIVE_NAME) + } else if let Ok(fed_spec) = get_federation_spec_definition_from_subgraph(schema) { + fed_spec.directive_name_in_schema(schema, &LIST_SIZE_DIRECTIVE_NAME) + } else { + Ok(None) + } + } + + pub fn cost_directive_from_argument( + schema: &FederationSchema, + argument: &InputValueDefinition, + ty: &ExtendedType, + ) -> Result, FederationError> { + let directive_name = Self::cost_directive_name(schema)?; + if let Some(name) = directive_name.as_ref() { + Ok(CostDirective::from_directives(name, &argument.directives) + .or(CostDirective::from_schema_directives(name, ty.directives()))) + } else { + Ok(None) + } + } + + pub fn cost_directive_from_field( + schema: &FederationSchema, + field: &FieldDefinition, + ty: &ExtendedType, + ) -> Result, FederationError> { + let directive_name = Self::cost_directive_name(schema)?; + if let Some(name) = directive_name.as_ref() { + Ok(CostDirective::from_directives(name, &field.directives) + .or(CostDirective::from_schema_directives(name, ty.directives()))) + } else { + Ok(None) + } + } + + pub fn list_size_directive_from_field_definition( + schema: &FederationSchema, + field: &FieldDefinition, + ) -> Result, FederationError> { + let directive_name = Self::list_size_directive_name(schema)?; + if let Some(name) = directive_name.as_ref() { + Ok(ListSizeDirective::from_field_definition(name, field)) + } else { + Ok(None) + } + } } impl SpecDefinition for CostSpecDefinition { @@ -185,3 +258,96 @@ lazy_static! { definitions }; } + +pub struct CostDirective { + weight: i32, +} + +impl CostDirective { + pub fn weight(&self) -> f64 { + self.weight as f64 + } + + fn from_directives(directive_name: &Name, directives: &DirectiveList) -> Option { + directives + .get(directive_name)? + .specified_argument_by_name(&COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME)? + .to_i32() + .map(|weight| Self { weight }) + } + + fn from_schema_directives( + directive_name: &Name, + directives: &apollo_compiler::schema::DirectiveList, + ) -> Option { + directives + .get(directive_name)? + .specified_argument_by_name(&COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME)? + .to_i32() + .map(|weight| Self { weight }) + } +} + +pub struct ListSizeDirective { + pub assumed_size: Option, + pub slicing_argument_names: Option>, + pub sized_fields: Option>, + pub require_one_slicing_argument: bool, +} + +impl ListSizeDirective { + pub fn from_field_definition( + directive_name: &Name, + definition: &FieldDefinition, + ) -> Option { + let directive = definition.directives.get(directive_name)?; + let assumed_size = Self::assumed_size(directive); + let slicing_argument_names = Self::slicing_argument_names(directive); + let sized_fields = Self::sized_fields(directive); + let require_one_slicing_argument = + Self::require_one_slicing_argument(directive).unwrap_or(true); + + Some(Self { + assumed_size, + slicing_argument_names, + sized_fields, + require_one_slicing_argument, + }) + } + + fn assumed_size(directive: &Directive) -> Option { + directive + .specified_argument_by_name(&LIST_SIZE_DIRECTIVE_ASSUMED_SIZE_ARGUMENT_NAME)? + .to_i32() + } + + fn slicing_argument_names(directive: &Directive) -> Option> { + let names = directive + .specified_argument_by_name(&LIST_SIZE_DIRECTIVE_SLICING_ARGUMENTS_ARGUMENT_NAME)? + .as_list()? + .iter() + .flat_map(|arg| arg.as_str()) + .map(String::from) + .collect(); + Some(names) + } + + fn sized_fields(directive: &Directive) -> Option> { + let fields = directive + .specified_argument_by_name(&LIST_SIZE_DIRECTIVE_SIZED_FIELDS_ARGUMENT_NAME)? + .as_list()? + .iter() + .flat_map(|arg| arg.as_str()) + .map(String::from) + .collect(); + Some(fields) + } + + fn require_one_slicing_argument(directive: &Directive) -> Option { + directive + .specified_argument_by_name( + &LIST_SIZE_DIRECTIVE_REQUIRE_ONE_SLICING_ARGUMENT_ARGUMENT_NAME, + )? + .to_bool() + } +} diff --git a/apollo-federation/src/link/database.rs b/apollo-federation/src/link/database.rs index ced0dc7b07..13e786f2c4 100644 --- a/apollo-federation/src/link/database.rs +++ b/apollo-federation/src/link/database.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use apollo_compiler::ast::Directive; use apollo_compiler::ast::DirectiveLocation; +use apollo_compiler::collections::HashSet; use apollo_compiler::collections::IndexMap; use apollo_compiler::schema::DirectiveDefinition; use apollo_compiler::ty; @@ -14,6 +15,28 @@ use crate::link::Link; use crate::link::LinkError; use crate::link::LinksMetadata; use crate::link::DEFAULT_LINK_NAME; +use crate::subgraph::spec::FEDERATION_V2_DIRECTIVE_NAMES; +use crate::subgraph::spec::FEDERATION_V2_ELEMENT_NAMES; + +fn validate_federation_imports(link: &Link) -> Result<(), LinkError> { + let federation_directives: HashSet<_> = FEDERATION_V2_DIRECTIVE_NAMES.into_iter().collect(); + let federation_elements: HashSet<_> = FEDERATION_V2_ELEMENT_NAMES.into_iter().collect(); + + for imp in &link.imports { + if imp.is_directive && !federation_directives.contains(&imp.element) { + return Err(LinkError::InvalidImport(format!( + "Cannot import unknown federation directive \"@{}\".", + imp.element, + ))); + } else if !imp.is_directive && !federation_elements.contains(&imp.element) { + return Err(LinkError::InvalidImport(format!( + "Cannot import unknown federation element \"{}\".", + imp.element, + ))); + } + } + Ok(()) +} /// Extract @link metadata from a schema. pub fn links_metadata(schema: &Schema) -> Result, LinkError> { @@ -80,6 +103,10 @@ pub fn links_metadata(schema: &Schema) -> Result, LinkErro // We do a 2nd pass to collect and validate all the imports (it's a separate path so we // know all the names of the spec linked in the schema). for link in &links { + if link.url.identity == Identity::federation_identity() { + validate_federation_imports(link)?; + } + for import in &link.imports { let imported_name = import.imported_name(); let element_map = if import.is_directive { @@ -509,7 +536,7 @@ mod tests { let schema = Schema::parse(schema, "testSchema").unwrap(); let errors = links_metadata(&schema).expect_err("should error"); // TODO Multiple errors - insta::assert_snapshot!(errors, @r###"Invalid use of @link in schema: invalid sub-value for @link(import:) argument: values should be either strings or input object values of the form { name: "", as: "" }."###); + insta::assert_snapshot!(errors, @r###"Invalid use of @link in schema: in "2", invalid sub-value for @link(import:) argument: values should be either strings or input object values of the form { name: "", as: "" }."###); } #[test] @@ -534,11 +561,9 @@ mod tests { let schema = Schema::parse(schema, "testSchema").unwrap(); let errors = links_metadata(&schema).expect_err("should error"); // TODO Multiple errors - insta::assert_snapshot!(errors, @"Invalid use of @link in schema: invalid alias 'myKey' for import name '@key': should start with '@' since the imported name does"); + insta::assert_snapshot!(errors, @r###"Invalid use of @link in schema: in "{name: "@key", as: "myKey"}", invalid alias 'myKey' for import name '@key': should start with '@' since the imported name does"###); } - // TODO Implement - /* #[test] fn errors_on_importing_unknown_elements_for_known_features() { let schema = r#" @@ -557,8 +582,44 @@ mod tests { let schema = Schema::parse(schema, "testSchema").unwrap(); let errors = links_metadata(&schema).expect_err("should error"); - insta::assert_snapshot!(errors, @""); + insta::assert_snapshot!(errors, @"Unknown import: Cannot import unknown federation directive \"@foo\"."); + + // TODO Support multiple errors, in the meantime we'll just clone the code and run again + let schema = r#" + extend schema @link(url: "https://specs.apollo.dev/link/v1.0") + extend schema @link( + url: "https://specs.apollo.dev/federation/v2.0", + import: [ "key", { name: "@sharable" } ] + ) + + type Query { + q: Int + } + + directive @link(url: String, as: String, import: [Import], for: link__Purpose) repeatable on SCHEMA + "#; + + let schema = Schema::parse(schema, "testSchema").unwrap(); + let errors = links_metadata(&schema).expect_err("should error"); + insta::assert_snapshot!(errors, @"Unknown import: Cannot import unknown federation element \"key\"."); + + let schema = r#" + extend schema @link(url: "https://specs.apollo.dev/link/v1.0") + extend schema @link( + url: "https://specs.apollo.dev/federation/v2.0", + import: [ { name: "@sharable" } ] + ) + + type Query { + q: Int + } + + directive @link(url: String, as: String, import: [Import], for: link__Purpose) repeatable on SCHEMA + "#; + + let schema = Schema::parse(schema, "testSchema").unwrap(); + let errors = links_metadata(&schema).expect_err("should error"); + insta::assert_snapshot!(errors, @"Unknown import: Cannot import unknown federation directive \"@sharable\"."); } - */ } } diff --git a/apollo-federation/src/link/federation_spec_definition.rs b/apollo-federation/src/link/federation_spec_definition.rs index 5e3e09512c..e89ce9f7ff 100644 --- a/apollo-federation/src/link/federation_spec_definition.rs +++ b/apollo-federation/src/link/federation_spec_definition.rs @@ -14,8 +14,6 @@ use crate::error::SingleFederationError; use crate::link::argument::directive_optional_boolean_argument; use crate::link::argument::directive_optional_string_argument; use crate::link::argument::directive_required_string_argument; -use crate::link::cost_spec_definition::CostSpecDefinition; -use crate::link::cost_spec_definition::COST_VERSIONS; use crate::link::spec::Identity; use crate::link::spec::Url; use crate::link::spec::Version; @@ -539,17 +537,6 @@ impl FederationSpecDefinition { )?, }) } - - pub(crate) fn get_cost_spec_definition( - &self, - schema: &FederationSchema, - ) -> Option<&'static CostSpecDefinition> { - schema - .metadata() - .and_then(|metadata| metadata.for_identity(&Identity::cost_identity())) - .and_then(|link| COST_VERSIONS.find(&link.url.version)) - .or_else(|| COST_VERSIONS.find_for_federation_version(self.version())) - } } impl SpecDefinition for FederationSpecDefinition { diff --git a/apollo-federation/src/link/mod.rs b/apollo-federation/src/link/mod.rs index 39f51e9499..0b883fb36d 100644 --- a/apollo-federation/src/link/mod.rs +++ b/apollo-federation/src/link/mod.rs @@ -23,7 +23,7 @@ use crate::link::spec::Url; pub(crate) mod argument; pub(crate) mod context_spec_definition; -pub(crate) mod cost_spec_definition; +pub mod cost_spec_definition; pub mod database; pub(crate) mod federation_spec_definition; pub(crate) mod graphql_definition; @@ -45,6 +45,8 @@ pub enum LinkError { InvalidName(#[from] InvalidNameError), #[error("Invalid use of @link in schema: {0}")] BootstrapError(String), + #[error("Unknown import: {0}")] + InvalidImport(String), } // TODO: Replace LinkError usages with FederationError. @@ -65,13 +67,12 @@ pub enum Purpose { impl Purpose { pub fn from_value(value: &Value) -> Result { - if let Value::Enum(value) = value { - Ok(value.parse::()?) - } else { - Err(LinkError::BootstrapError( - "invalid `purpose` value, should be an enum".to_string(), - )) - } + value + .as_enum() + .ok_or_else(|| { + LinkError::BootstrapError("invalid `purpose` value, should be an enum".to_string()) + }) + .and_then(|value| value.parse()) } } @@ -83,8 +84,7 @@ impl str::FromStr for Purpose { "SECURITY" => Ok(Purpose::SECURITY), "EXECUTION" => Ok(Purpose::EXECUTION), _ => Err(LinkError::BootstrapError(format!( - "invalid/unrecognized `purpose` value '{}'", - s + "invalid/unrecognized `purpose` value '{s}'" ))), } } @@ -131,11 +131,19 @@ impl Import { match value { Value::String(str) => { if let Some(directive_name) = str.strip_prefix('@') { - Ok(Import { element: Name::new(directive_name)?, is_directive: true, alias: None }) + Ok(Import { + element: Name::new(directive_name)?, + is_directive: true, + alias: None, + }) } else { - Ok(Import { element: Name::new(str)?, is_directive: false, alias: None }) + Ok(Import { + element: Name::new(str)?, + is_directive: false, + alias: None, + }) } - }, + } Value::Object(fields) => { let mut name: Option<&str> = None; let mut alias: Option<&str> = None; @@ -143,47 +151,58 @@ impl Import { match k.as_str() { "name" => { name = Some(v.as_str().ok_or_else(|| { - LinkError::BootstrapError("invalid value for `name` field in @link(import:) argument: must be a string".to_string()) + LinkError::BootstrapError(format!(r#"in "{}", invalid value for `name` field in @link(import:) argument: must be a string"#, value.serialize().no_indent())) })?) }, "as" => { alias = Some(v.as_str().ok_or_else(|| { - LinkError::BootstrapError("invalid value for `as` field in @link(import:) argument: must be a string".to_string()) + LinkError::BootstrapError(format!(r#"in "{}", invalid value for `as` field in @link(import:) argument: must be a string"#, value.serialize().no_indent())) })?) }, - _ => Err(LinkError::BootstrapError(format!("unknown field `{k}` in @link(import:) argument")))? + _ => Err(LinkError::BootstrapError(format!(r#"in "{}", unknown field `{k}` in @link(import:) argument"#, value.serialize().no_indent())))? } } - if let Some(element) = name { - if let Some(directive_name) = element.strip_prefix('@') { - if let Some(alias_str) = alias.as_ref() { - let Some(alias_str) = alias_str.strip_prefix('@') else { - return Err(LinkError::BootstrapError(format!("invalid alias '{}' for import name '{}': should start with '@' since the imported name does", alias_str, element))); - }; - alias = Some(alias_str); - } - Ok(Import { - element: Name::new(directive_name)?, - is_directive: true, - alias: alias.map(Name::new).transpose()?, - }) - } else { - if let Some(alias) = &alias { - if alias.starts_with('@') { - return Err(LinkError::BootstrapError(format!("invalid alias '{}' for import name '{}': should not start with '@' (or, if {} is a directive, then the name should start with '@')", alias, element, element))); - } - } - Ok(Import { - element: Name::new(element)?, - is_directive: false, - alias: alias.map(Name::new).transpose()?, - }) + let Some(element) = name else { + return Err(LinkError::BootstrapError(format!( + r#"in "{}", invalid entry in @link(import:) argument, missing mandatory `name` field"#, + value.serialize().no_indent() + ))); + }; + if let Some(directive_name) = element.strip_prefix('@') { + if let Some(alias_str) = alias.as_ref() { + let Some(alias_str) = alias_str.strip_prefix('@') else { + return Err(LinkError::BootstrapError(format!( + r#"in "{}", invalid alias '{alias_str}' for import name '{element}': should start with '@' since the imported name does"#, + value.serialize().no_indent() + ))); + }; + alias = Some(alias_str); } + Ok(Import { + element: Name::new(directive_name)?, + is_directive: true, + alias: alias.map(Name::new).transpose()?, + }) } else { - Err(LinkError::BootstrapError("invalid entry in @link(import:) argument, missing mandatory `name` field".to_string())) + if let Some(alias) = &alias { + if alias.starts_with('@') { + return Err(LinkError::BootstrapError(format!( + r#"in "{}", invalid alias '{alias}' for import name '{element}': should not start with '@' (or, if {element} is a directive, then the name should start with '@')"#, + value.serialize().no_indent() + ))); + } + } + Ok(Import { + element: Name::new(element)?, + is_directive: false, + alias: alias.map(Name::new).transpose()?, + }) } - }, - _ => Err(LinkError::BootstrapError("invalid sub-value for @link(import:) argument: values should be either strings or input object values of the form { name: \"\", as: \"\" }.".to_string())) + } + _ => Err(LinkError::BootstrapError(format!( + r#"in "{}", invalid sub-value for @link(import:) argument: values should be either strings or input object values of the form {{ name: "", as: "" }}."#, + value.serialize().no_indent() + ))), } } @@ -195,7 +214,7 @@ impl Import { } pub fn imported_name(&self) -> &Name { - return self.alias.as_ref().unwrap_or(&self.element); + self.alias.as_ref().unwrap_or(&self.element) } pub fn imported_display_name(&self) -> impl fmt::Display + '_ { diff --git a/apollo-federation/src/link/spec_definition.rs b/apollo-federation/src/link/spec_definition.rs index 1fb084afe5..5826f8f4d9 100644 --- a/apollo-federation/src/link/spec_definition.rs +++ b/apollo-federation/src/link/spec_definition.rs @@ -182,17 +182,6 @@ impl SpecDefinitions { self.definitions.get(requested) } - pub(crate) fn find_for_federation_version(&self, federation_version: &Version) -> Option<&T> { - for definition in self.definitions.values() { - if let Some(minimum_federation_version) = definition.minimum_federation_version() { - if minimum_federation_version >= federation_version { - return Some(definition); - } - } - } - None - } - pub(crate) fn versions(&self) -> Keys { self.definitions.keys() } diff --git a/apollo-federation/src/operation/directive_list.rs b/apollo-federation/src/operation/directive_list.rs index ad716dd1b4..bd2f021f1a 100644 --- a/apollo-federation/src/operation/directive_list.rs +++ b/apollo-federation/src/operation/directive_list.rs @@ -10,6 +10,7 @@ use std::sync::OnceLock; use apollo_compiler::executable; use apollo_compiler::Name; use apollo_compiler::Node; +use serde::Serialize; use super::sort_arguments; @@ -102,16 +103,23 @@ fn compare_sorted_arguments( static EMPTY_DIRECTIVE_LIST: executable::DirectiveList = executable::DirectiveList(vec![]); /// Contents for a non-empty directive list. -#[derive(Debug, Clone)] +// NOTE: For serialization, we skip everything but the directives. This will require manually +// implementing `Deserialize` as all other fields are derived from the directives. This could also +// mean flattening the serialization and making this type deserialize from +// `executable::DirectiveList` directly. +#[derive(Debug, Clone, Serialize)] struct DirectiveListInner { // Cached hash: hashing may be expensive with deeply nested values or very many directives, // so we only want to do it once. // The hash is eagerly precomputed because we expect to, most of the time, hash a DirectiveList // at least once (when inserting its selection into a selection map). + #[serde(skip)] hash: u64, // Mutable access to the underlying directive list should not be handed out because `sort_order` // may get out of sync. + #[serde(serialize_with = "crate::utils::serde_bridge::serialize_exe_directive_list")] directives: executable::DirectiveList, + #[serde(skip)] sort_order: Vec, } @@ -166,7 +174,7 @@ impl DirectiveListInner { /// /// This list is cheaply cloneable, but not intended for frequent mutations. /// When the list is empty, it does not require an allocation. -#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize)] pub(crate) struct DirectiveList { inner: Option>, } diff --git a/apollo-federation/src/operation/merging.rs b/apollo-federation/src/operation/merging.rs index 6b8e89193c..79ebec9fe3 100644 --- a/apollo-federation/src/operation/merging.rs +++ b/apollo-federation/src/operation/merging.rs @@ -18,6 +18,7 @@ use super::SelectionValue; use crate::bail; use crate::ensure; use crate::error::FederationError; +use crate::error::SingleFederationError; impl<'a> FieldSelectionValue<'a> { /// Merges the given field selections into this one. @@ -42,12 +43,23 @@ impl<'a> FieldSelectionValue<'a> { other_field.schema == self_field.schema, "Cannot merge field selections from different schemas", ); - ensure!( - other_field.field_position == self_field.field_position, - "Cannot merge field selection for field \"{}\" into a field selection for field \"{}\"", - other_field.field_position, - self_field.field_position, - ); + if other_field.field_position != self_field.field_position { + return Err(SingleFederationError::InternalUnmergeableFields { + message: format!( + "Cannot merge field selection for field \"{}\" into a field selection for \ + field \"{}\". This is a known query planning bug in the old Javascript \ + query planner that was silently ignored. The Rust-native query planner \ + does not address this bug at this time, but in some cases does catch when \ + this bug occurs. If you're seeing this message, this bug was likely \ + triggered by one of the field selections mentioned previously having an \ + alias that was the same name as the field in the other field selection. \ + The recommended workaround is to change this alias to a different one in \ + your operation.", + other_field.field_position, self_field.field_position, + ), + } + .into()); + } if self.get().selection_set.is_some() { let Some(other_selection_set) = &other.selection_set else { bail!( diff --git a/apollo-federation/src/operation/mod.rs b/apollo-federation/src/operation/mod.rs index 1fe1f16287..9454842f1d 100644 --- a/apollo-federation/src/operation/mod.rs +++ b/apollo-federation/src/operation/mod.rs @@ -29,7 +29,6 @@ use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Node; use itertools::Itertools; -use serde::Serialize; use crate::compat::coerce_executable_values; use crate::error::FederationError; @@ -71,12 +70,11 @@ static NEXT_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(1); /// Opaque wrapper of the unique selection ID type. /// -/// Note that we shouldn't add `derive(Serialize, Deserialize)` to this without changing the types -/// to be something like UUIDs. -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -// NOTE(@TylerBloom): This feature gate can be removed once the condition in the comment above is -// met. Note that there are `serde(skip)` statements that should be removed once this is removed. -#[cfg_attr(feature = "snapshot_tracing", derive(Serialize))] +/// NOTE: This ID does not ensure that IDs are unique because its internal counter resets on +/// startup. It currently implements `Serialize` for debugging purposes. It should not implement +/// `Deserialize`, and, more specfically, it should not be used for caching until uniqueness is +/// provided (i.e. the inner type is a `Uuid` or the like). +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, serde::Serialize)] pub(crate) struct SelectionId(usize); impl SelectionId { @@ -91,9 +89,12 @@ impl SelectionId { /// All arguments and input object values are sorted in a consistent order. /// /// This type is immutable and cheaply cloneable. -#[derive(Clone, PartialEq, Eq, Default)] +#[derive(Clone, PartialEq, Eq, Default, serde::Serialize)] pub(crate) struct ArgumentList { /// The inner list *must* be sorted with `sort_arguments`. + #[serde( + serialize_with = "crate::utils::serde_bridge::serialize_optional_slice_of_exe_argument_nodes" + )] inner: Option]>>, } @@ -242,7 +243,7 @@ impl Operation { /// - For the type, stores the schema and the position in that schema instead of just the /// `NamedType`. /// - Stores selections in a map so they can be normalized efficiently. -#[derive(Debug, Clone, Serialize)] +#[derive(Debug, Clone, serde::Serialize)] pub(crate) struct SelectionSet { #[serde(skip)] pub(crate) schema: ValidFederationSchema, @@ -270,7 +271,7 @@ pub(crate) use selection_map::SelectionValue; /// An analogue of the apollo-compiler type `Selection` that stores our other selection analogues /// instead of the apollo-compiler types. -#[derive(Debug, Clone, PartialEq, Eq, derive_more::IsVariant, Serialize)] +#[derive(Debug, Clone, PartialEq, Eq, derive_more::IsVariant, serde::Serialize)] pub(crate) enum Selection { Field(Arc), FragmentSpread(Arc), @@ -658,9 +659,7 @@ mod field_selection { pub(crate) schema: ValidFederationSchema, pub(crate) field_position: FieldDefinitionPosition, pub(crate) alias: Option, - #[serde(serialize_with = "crate::display_helpers::serialize_as_debug_string")] pub(crate) arguments: ArgumentList, - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub(crate) directives: DirectiveList, pub(crate) sibling_typename: Option, } @@ -868,16 +867,13 @@ mod fragment_spread_selection { pub(crate) fragment_name: Name, pub(crate) type_condition_position: CompositeTypeDefinitionPosition, // directives applied on the fragment spread selection - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub(crate) directives: DirectiveList, // directives applied within the fragment definition // // PORT_NOTE: The JS codebase combined the fragment spread's directives with the fragment // definition's directives. This was invalid GraphQL as those directives may not be applicable // on different locations. While we now keep track of those references, they are currently ignored. - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub(crate) fragment_directives: DirectiveList, - #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] pub(crate) selection_id: SelectionId, } @@ -925,17 +921,6 @@ impl FragmentSpreadSelection { }) } - pub(crate) fn from_fragment( - fragment: &Node, - directives: &executable::DirectiveList, - ) -> Self { - let spread = FragmentSpread::from_fragment(fragment, directives); - Self { - spread, - selection_set: fragment.selection_set.clone(), - } - } - /// Creates a fragment spread selection (in an optimized operation). /// - `named_fragments`: Named fragment definitions that are rebased for the element's schema. pub(crate) fn new( @@ -1062,9 +1047,7 @@ mod inline_fragment_selection { pub(crate) schema: ValidFederationSchema, pub(crate) parent_type_position: CompositeTypeDefinitionPosition, pub(crate) type_condition_position: Option, - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] pub(crate) directives: DirectiveList, - #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] pub(crate) selection_id: SelectionId, } @@ -2177,15 +2160,6 @@ impl SelectionSet { }) } - /// In a normalized selection set containing only fields and inline fragments, - /// iterate over all the fields that may be selected. - /// - /// # Preconditions - /// The selection set must not contain named fragment spreads. - pub(crate) fn field_selections(&self) -> FieldSelectionsIter<'_> { - FieldSelectionsIter::new(self.selections.values()) - } - /// # Preconditions /// The selection set must not contain named fragment spreads. fn fields_in_set(&self) -> Vec { @@ -2326,36 +2300,6 @@ impl<'a> IntoIterator for &'a SelectionSet { } } -pub(crate) struct FieldSelectionsIter<'sel> { - stack: Vec>, -} - -impl<'sel> FieldSelectionsIter<'sel> { - fn new(iter: selection_map::Values<'sel>) -> Self { - Self { stack: vec![iter] } - } -} - -impl<'sel> Iterator for FieldSelectionsIter<'sel> { - type Item = &'sel Arc; - - fn next(&mut self) -> Option { - match self.stack.last_mut()?.next() { - None if self.stack.len() == 1 => None, - None => { - self.stack.pop(); - self.next() - } - Some(Selection::Field(field)) => Some(field), - Some(Selection::InlineFragment(frag)) => { - self.stack.push(frag.selection_set.selections.values()); - self.next() - } - Some(Selection::FragmentSpread(_frag)) => unreachable!(), - } - } -} - #[derive(Clone, Debug)] pub(crate) struct SelectionSetAtPath { path: Vec, @@ -2631,16 +2575,6 @@ impl Field { pub(crate) fn parent_type_position(&self) -> CompositeTypeDefinitionPosition { self.field_position.parent() } - - pub(crate) fn types_can_be_merged(&self, other: &Self) -> Result { - let self_definition = self.field_position.get(self.schema().schema())?; - let other_definition = other.field_position.get(self.schema().schema())?; - types_can_be_merged( - &self_definition.ty, - &other_definition.ty, - self.schema().schema(), - ) - } } impl InlineFragmentSelection { @@ -2738,23 +2672,6 @@ impl InlineFragmentSelection { )) } - /// Construct a new InlineFragmentSelection out of a selection set. - /// - The new type condition will be the same as the selection set's type. - pub(crate) fn from_selection_set( - parent_type_position: CompositeTypeDefinitionPosition, - selection_set: SelectionSet, - directives: DirectiveList, - ) -> Self { - let inline_fragment_data = InlineFragment { - schema: selection_set.schema.clone(), - parent_type_position, - type_condition_position: selection_set.type_position.clone().into(), - directives, - selection_id: SelectionId::new(), - }; - InlineFragmentSelection::new(inline_fragment_data, selection_set) - } - pub(crate) fn casted_type(&self) -> &CompositeTypeDefinitionPosition { self.inline_fragment .type_condition_position @@ -2817,31 +2734,10 @@ impl NamedFragments { NamedFragments::initialize_in_dependency_order(fragments, schema) } - pub(crate) fn is_empty(&self) -> bool { - self.fragments.len() == 0 - } - - pub(crate) fn len(&self) -> usize { - self.fragments.len() - } - pub(crate) fn iter(&self) -> impl Iterator> { self.fragments.values() } - pub(crate) fn iter_rev(&self) -> impl Iterator> { - self.fragments.values().rev() - } - - pub(crate) fn iter_mut(&mut self) -> indexmap::map::IterMut<'_, Name, Node> { - Arc::make_mut(&mut self.fragments).iter_mut() - } - - // Calls `retain` on the underlying `IndexMap`. - pub(crate) fn retain(&mut self, mut predicate: impl FnMut(&Name, &Node) -> bool) { - Arc::make_mut(&mut self.fragments).retain(|name, fragment| predicate(name, fragment)); - } - fn insert(&mut self, fragment: Fragment) { Arc::make_mut(&mut self.fragments).insert(fragment.name.clone(), Node::new(fragment)); } @@ -2935,32 +2831,6 @@ impl NamedFragments { } }) } - - /// When we rebase named fragments on a subgraph schema, only a subset of what the fragment handles may belong - /// to that particular subgraph. And there are a few sub-cases where that subset is such that we basically need or - /// want to consider to ignore the fragment for that subgraph, and that is when: - /// 1. the subset that apply is actually empty. The fragment wouldn't be valid in this case anyway. - /// 2. the subset is a single leaf field: in that case, using the one field directly is just shorter than using - /// the fragment, so we consider the fragment don't really apply to that subgraph. Technically, using the - /// fragment could still be of value if the fragment name is a lot smaller than the one field name, but it's - /// enough of a niche case that we ignore it. Note in particular that one sub-case of this rule that is likely - /// to be common is when the subset ends up being just `__typename`: this would basically mean the fragment - /// don't really apply to the subgraph, and that this will ensure this is the case. - pub(crate) fn is_selection_set_worth_using(selection_set: &SelectionSet) -> bool { - if selection_set.selections.len() == 0 { - return false; - } - if selection_set.selections.len() == 1 { - // true if NOT field selection OR non-leaf field - return if let Some(Selection::Field(field_selection)) = selection_set.selections.first() - { - field_selection.selection_set.is_some() - } else { - true - }; - } - true - } } // @defer handling: removing and normalization @@ -3399,49 +3269,6 @@ impl Operation { } } -// Collect fragment usages from operation types. - -impl Selection { - fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { - match self { - Selection::Field(field_selection) => { - if let Some(s) = &field_selection.selection_set { - s.collect_used_fragment_names(aggregator) - } - } - Selection::InlineFragment(inline) => { - inline.selection_set.collect_used_fragment_names(aggregator); - } - Selection::FragmentSpread(fragment) => { - let current_count = aggregator - .entry(fragment.spread.fragment_name.clone()) - .or_default(); - *current_count += 1; - } - } - } -} - -impl SelectionSet { - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { - for s in self.selections.values() { - s.collect_used_fragment_names(aggregator); - } - } - - pub(crate) fn used_fragments(&self) -> IndexMap { - let mut usages = IndexMap::default(); - self.collect_used_fragment_names(&mut usages); - usages - } -} - -impl Fragment { - pub(crate) fn collect_used_fragment_names(&self, aggregator: &mut IndexMap) { - self.selection_set.collect_used_fragment_names(aggregator) - } -} - // Collect used variables from operation types. pub(crate) struct VariableCollector<'s> { @@ -3539,16 +3366,6 @@ impl<'s> VariableCollector<'s> { } } -impl Fragment { - /// Returns the variable names that are used by this fragment. - pub(crate) fn used_variables(&self) -> IndexSet<&'_ Name> { - let mut collector = VariableCollector::new(); - collector.visit_directive_list(&self.directives); - collector.visit_selection_set(&self.selection_set); - collector.into_inner() - } -} - impl SelectionSet { /// Returns the variable names that are used by this selection set, including through fragment /// spreads. @@ -3911,7 +3728,9 @@ pub(crate) fn normalize_operation( variables: Arc::new(operation.variables.clone()), directives: operation.directives.clone().into(), selection_set: normalized_selection_set, - named_fragments, + // fragments were already expanded into selection sets + // new ones will be generated when optimizing the final subgraph fetch operations + named_fragments: Default::default(), }; Ok(normalized_operation) } diff --git a/apollo-federation/src/operation/optimize.rs b/apollo-federation/src/operation/optimize.rs index c7e54b23f7..7bdd0842a2 100644 --- a/apollo-federation/src/operation/optimize.rs +++ b/apollo-federation/src/operation/optimize.rs @@ -38,17 +38,10 @@ use std::sync::Arc; use apollo_compiler::collections::IndexMap; -use apollo_compiler::collections::IndexSet; use apollo_compiler::executable; -use apollo_compiler::executable::VariableDefinition; use apollo_compiler::Name; use apollo_compiler::Node; -use super::Containment; -use super::ContainmentOptions; -use super::DirectiveList; -use super::Field; -use super::FieldSelection; use super::Fragment; use super::FragmentSpreadSelection; use super::HasSelectionKey; @@ -62,152 +55,6 @@ use super::SelectionSet; use crate::error::FederationError; use crate::operation::FragmentSpread; use crate::operation::SelectionValue; -use crate::schema::position::CompositeTypeDefinitionPosition; - -#[derive(Debug)] -struct ReuseContext<'a> { - fragments: &'a NamedFragments, - operation_variables: Option>, -} - -impl<'a> ReuseContext<'a> { - fn for_fragments(fragments: &'a NamedFragments) -> Self { - Self { - fragments, - operation_variables: None, - } - } - - // Taking two separate parameters so the caller can still mutate the operation's selection set. - fn for_operation( - fragments: &'a NamedFragments, - operation_variables: &'a [Node], - ) -> Self { - Self { - fragments, - operation_variables: Some(operation_variables.iter().map(|var| &var.name).collect()), - } - } -} - -//============================================================================= -// Add __typename field for abstract types in named fragment definitions - -impl NamedFragments { - // - Expands all nested fragments - // - Applies the provided `mapper` to each selection set of the expanded fragments. - // - Finally, re-fragments the nested fragments. - // - `mapper` must return a fragment-spread-free selection set. - fn map_to_expanded_selection_sets( - &self, - mut mapper: impl FnMut(&SelectionSet) -> Result, - ) -> Result { - let mut result = NamedFragments::default(); - // Note: `self.fragments` has insertion order topologically sorted. - for fragment in self.fragments.values() { - let expanded_selection_set = fragment - .selection_set - .expand_all_fragments()? - .flatten_unnecessary_fragments( - &fragment.type_condition_position, - &Default::default(), - &fragment.schema, - )?; - let mut mapped_selection_set = mapper(&expanded_selection_set)?; - // `mapped_selection_set` must be fragment-spread-free. - mapped_selection_set.reuse_fragments(&ReuseContext::for_fragments(&result))?; - let updated = Fragment { - selection_set: mapped_selection_set, - schema: fragment.schema.clone(), - name: fragment.name.clone(), - type_condition_position: fragment.type_condition_position.clone(), - directives: fragment.directives.clone(), - }; - result.insert(updated); - } - Ok(result) - } - - pub(crate) fn add_typename_field_for_abstract_types_in_named_fragments( - &self, - ) -> Result { - // This method is a bit tricky due to potentially nested fragments. More precisely, suppose that - // we have: - // fragment MyFragment on T { - // a { - // b { - // ...InnerB - // } - // } - // } - // - // fragment InnerB on B { - // __typename - // x - // y - // } - // then if we were to "naively" add `__typename`, the first fragment would end up being: - // fragment MyFragment on T { - // a { - // __typename - // b { - // __typename - // ...InnerX - // } - // } - // } - // but that's not ideal because the inner-most `__typename` is already within `InnerX`. And that - // gets in the way to re-adding fragments (the `SelectionSet::reuse_fragments` method) because if we start - // with: - // { - // a { - // __typename - // b { - // __typename - // x - // y - // } - // } - // } - // and add `InnerB` first, we get: - // { - // a { - // __typename - // b { - // ...InnerB - // } - // } - // } - // and it becomes tricky to recognize the "updated-with-typename" version of `MyFragment` now (we "seem" - // to miss a `__typename`). - // - // Anyway, to avoid this issue, what we do is that for every fragment, we: - // 1. expand any nested fragments in its selection. - // 2. add `__typename` where we should in that expanded selection. - // 3. re-optimize all fragments (using the "updated-with-typename" versions). - // which is what `mapToExpandedSelectionSets` gives us. - - if self.is_empty() { - // PORT_NOTE: This was an assertion failure in JS version. But, it's actually ok to - // return unchanged if empty. - return Ok(self.clone()); - } - let updated = self.map_to_expanded_selection_sets(|ss| { - // Note: Since `ss` won't have any fragment spreads, `add_typename_field_for_abstract_types`'s return - // value won't have any fragment spreads. - ss.add_typename_field_for_abstract_types(/*parent_type_if_abstract*/ None) - })?; - // PORT_NOTE: The JS version asserts if `updated` is empty or not. But, we really want to - // check the `updated` has the same set of fragments. To avoid performance hit, only the - // size is checked here. - if updated.len() != self.len() { - return Err(FederationError::internal( - "Unexpected change in the number of fragments", - )); - } - Ok(updated) - } -} //============================================================================= // Selection/SelectionSet intersection/minus operations @@ -234,26 +81,6 @@ impl Selection { } Ok(None) } - - /// Computes the set-intersection of self and other - /// - If there are respective sub-selections, then we compute their intersections and add them - /// (if not empty). - /// - Otherwise, the intersection is same as `self`. - fn intersection(&self, other: &Selection) -> Result, FederationError> { - if let (Some(self_sub_selection), Some(other_sub_selection)) = - (self.selection_set(), other.selection_set()) - { - let common = self_sub_selection.intersection(other_sub_selection)?; - if common.is_empty() { - return Ok(None); - } else { - return self - .with_updated_selections(self_sub_selection.type_position.clone(), common) - .map(Some); - } - } - Ok(Some(self.clone())) - } } impl SelectionSet { @@ -279,1549 +106,279 @@ impl SelectionSet { iter, )) } - - /// Computes the set-intersection of self and other - fn intersection(&self, other: &SelectionSet) -> Result { - if self.is_empty() { - return Ok(self.clone()); - } - if other.is_empty() { - return Ok(other.clone()); - } - - let iter = self - .selections - .values() - .map(|v| { - if let Some(other_v) = other.selections.get(v.key()) { - v.intersection(other_v) - } else { - Ok(None) - } - }) - .collect::, _>>()? // early break in case of Err - .into_iter() - .flatten(); - Ok(SelectionSet::from_raw_selections( - self.schema.clone(), - self.type_position.clone(), - iter, - )) - } } //============================================================================= -// Collect applicable fragments at given type. - -impl Fragment { - /// Whether this fragment may apply _directly_ at the provided type, meaning that the fragment - /// sub-selection (_without_ the fragment condition, hence the "directly") can be normalized at - /// `ty` without overly "widening" the runtime types. - /// - /// * `ty` - the type at which we're looking at applying the fragment - // - // The runtime types of the fragment condition must be at least as general as those of the - // provided `ty`. Otherwise, putting it at `ty` without its condition would "generalize" - // more than the fragment meant to (and so we'd "widen" the runtime types more than what the - // query meant to. - fn can_apply_directly_at_type( - &self, - ty: &CompositeTypeDefinitionPosition, - ) -> Result { - // Short-circuit #1: the same type => trivially true. - if self.type_condition_position == *ty { - return Ok(true); - } - - // Short-circuit #2: The type condition is not an abstract type (too restrictive). - // - It will never cover all of the runtime types of `ty` unless it's the same type, which is - // already checked by short-circuit #1. - if !self.type_condition_position.is_abstract_type() { - return Ok(false); - } - - // Short-circuit #3: The type condition is not an object (due to short-circuit #2) nor a - // union type, but the `ty` may be too general. - // - In other words, the type condition must be an interface but `ty` is a (different) - // interface or a union. - // PORT_NOTE: In JS, this check was later on the return statement (negated). But, this - // should be checked before `possible_runtime_types` check, since this is - // cheaper to execute. - // PORT_NOTE: This condition may be too restrictive (potentially a bug leading to - // suboptimal compression). If ty is a union whose members all implements the - // type condition (interface). Then, this function should've returned true. - // Thus, `!ty.is_union_type()` might be needed. - if !self.type_condition_position.is_union_type() && !ty.is_object_type() { - return Ok(false); - } +// Matching fragments with selection set (`try_optimize_with_fragments`) - // Check if the type condition is a superset of the provided type. - // - The fragment condition must be at least as general as the provided type. - let condition_types = self - .schema - .possible_runtime_types(self.type_condition_position.clone())?; - let ty_types = self.schema.possible_runtime_types(ty.clone())?; - Ok(condition_types.is_superset(&ty_types)) - } +/// The return type for `SelectionSet::try_optimize_with_fragments`. +#[derive(derive_more::From)] +enum SelectionSetOrFragment { + SelectionSet(SelectionSet), + Fragment(Node), } -impl NamedFragments { - /// Returns fragments that can be applied directly at the given type. - fn get_all_may_apply_directly_at_type<'a>( - &'a self, - ty: &'a CompositeTypeDefinitionPosition, - ) -> impl Iterator, FederationError>> + 'a { - self.iter().filter_map(|fragment| { - fragment - .can_apply_directly_at_type(ty) - .map(|can_apply| can_apply.then_some(fragment)) - .transpose() - }) +// Note: `retain_fragments` methods may return a selection or a selection set. +impl From for SelectionMapperReturn { + fn from(value: SelectionOrSet) -> Self { + match value { + SelectionOrSet::Selection(selection) => selection.into(), + SelectionOrSet::SelectionSet(selections) => { + // The items in a selection set needs to be cloned here, since it's sub-selections + // are contained in an `Arc`. + Vec::from_iter(selections.selections.values().cloned()).into() + } + } } } //============================================================================= -// Field validation +// `reuse_fragments` methods (putting everything together) -// PORT_NOTE: Not having a validator and having a FieldsConflictValidator with empty -// `by_response_name` map has no difference in behavior. So, we could drop the `Option` from -// `Option`. However, `None` validator makes it clearer that validation is -// unnecessary. -struct FieldsConflictValidator { - by_response_name: IndexMap>>>, +/// Return type for `InlineFragmentSelection::reuse_fragments`. +#[derive(derive_more::From)] +enum FragmentSelection { + // Note: Enum variants are named to match those of `Selection`. + InlineFragment(InlineFragmentSelection), + FragmentSpread(FragmentSpreadSelection), } -impl FieldsConflictValidator { - /// Build a field merging validator for a selection set. - /// - /// # Preconditions - /// The selection set must not contain named fragment spreads. - fn from_selection_set(selection_set: &SelectionSet) -> Self { - Self::for_level(&[selection_set]) - } - - fn for_level<'a>(level: &[&'a SelectionSet]) -> Self { - // Group `level`'s fields by the response-name/field - let mut at_level: IndexMap>> = - IndexMap::default(); - for selection_set in level { - for field_selection in selection_set.field_selections() { - let response_name = field_selection.field.response_name(); - let at_response_name = at_level.entry(response_name.clone()).or_default(); - let entry = at_response_name - .entry(field_selection.field.clone()) - .or_default(); - if let Some(ref field_selection_set) = field_selection.selection_set { - entry.push(field_selection_set); - } - } - } - - // Collect validators per response-name/field - let mut by_response_name = IndexMap::default(); - for (response_name, fields) in at_level { - let mut at_response_name: IndexMap>> = - IndexMap::default(); - for (field, selection_sets) in fields { - if selection_sets.is_empty() { - at_response_name.insert(field, None); - } else { - let validator = Arc::new(Self::for_level(&selection_sets)); - at_response_name.insert(field, Some(validator)); - } - } - by_response_name.insert(response_name, at_response_name); - } - Self { by_response_name } - } - - fn for_field<'v>(&'v self, field: &Field) -> impl Iterator> + 'v { - self.by_response_name - .get(field.response_name()) - .into_iter() - .flat_map(|by_response_name| by_response_name.values()) - .flatten() - .cloned() - } - - fn has_same_response_shape( - &self, - other: &FieldsConflictValidator, - ) -> Result { - for (response_name, self_fields) in self.by_response_name.iter() { - let Some(other_fields) = other.by_response_name.get(response_name) else { - continue; - }; - - for (self_field, self_validator) in self_fields { - for (other_field, other_validator) in other_fields { - if !self_field.types_can_be_merged(other_field)? { - return Ok(false); - } - - if let Some(self_validator) = self_validator { - if let Some(other_validator) = other_validator { - if !self_validator.has_same_response_shape(other_validator)? { - return Ok(false); - } - } - } - } - } - } - Ok(true) - } - - fn do_merge_with(&self, other: &FieldsConflictValidator) -> Result { - for (response_name, self_fields) in self.by_response_name.iter() { - let Some(other_fields) = other.by_response_name.get(response_name) else { - continue; - }; - - // We're basically checking - // [FieldsInSetCanMerge](https://spec.graphql.org/draft/#FieldsInSetCanMerge()), but - // from 2 set of fields (`self_fields` and `other_fields`) of the same response that we - // know individually merge already. - for (self_field, self_validator) in self_fields { - for (other_field, other_validator) in other_fields { - if !self_field.types_can_be_merged(other_field)? { - return Ok(false); - } - - let p1 = self_field.parent_type_position(); - let p2 = other_field.parent_type_position(); - if p1 == p2 || !p1.is_object_type() || !p2.is_object_type() { - // Additional checks of `FieldsInSetCanMerge` when same parent type or one - // isn't object - if self_field.name() != other_field.name() - || self_field.arguments != other_field.arguments - { - return Ok(false); - } - if let (Some(self_validator), Some(other_validator)) = - (self_validator, other_validator) - { - if !self_validator.do_merge_with(other_validator)? { - return Ok(false); - } - } - } else { - // Otherwise, the sub-selection must pass - // [SameResponseShape](https://spec.graphql.org/draft/#SameResponseShape()). - if let (Some(self_validator), Some(other_validator)) = - (self_validator, other_validator) - { - if !self_validator.has_same_response_shape(other_validator)? { - return Ok(false); - } - } - } - } - } +impl From for Selection { + fn from(value: FragmentSelection) -> Self { + match value { + FragmentSelection::InlineFragment(inline_fragment) => inline_fragment.into(), + FragmentSelection::FragmentSpread(fragment_spread) => fragment_spread.into(), } - Ok(true) - } - - fn do_merge_with_all<'a>( - &self, - mut iter: impl Iterator, - ) -> Result { - iter.try_fold(true, |acc, v| Ok(acc && v.do_merge_with(self)?)) } } -struct FieldsConflictMultiBranchValidator { - validators: Vec>, - used_spread_trimmed_part_at_level: Vec>, -} - -impl FieldsConflictMultiBranchValidator { - fn new(validators: Vec>) -> Self { - Self { - validators, - used_spread_trimmed_part_at_level: Vec::new(), - } - } - - fn from_initial_validator(validator: FieldsConflictValidator) -> Self { - Self { - validators: vec![Arc::new(validator)], - used_spread_trimmed_part_at_level: Vec::new(), - } - } - - fn for_field(&self, field: &Field) -> Self { - let for_all_branches = self.validators.iter().flat_map(|v| v.for_field(field)); - Self::new(for_all_branches.collect()) - } - - // When this method is used in the context of `try_optimize_with_fragments`, we know that the - // fragment, restricted to the current parent type, matches a subset of the sub-selection. - // However, there is still one case we we cannot use it that we need to check, and this is if - // using the fragment would create a field "conflict" (in the sense of the graphQL spec - // [`FieldsInSetCanMerge`](https://spec.graphql.org/draft/#FieldsInSetCanMerge())) and thus - // create an invalid selection. To be clear, `at_type.selections` cannot create a conflict, - // since it is a subset of the target selection set and it is valid by itself. *But* there may - // be some part of the fragment that is not `at_type.selections` due to being "dead branches" - // for type `parent_type`. And while those branches _are_ "dead" as far as execution goes, the - // `FieldsInSetCanMerge` validation does not take this into account (it's 1st step says - // "including visiting fragments and inline fragments" but has no logic regarding ignoring any - // fragment that may not apply due to the intersection of runtime types between multiple - // fragment being empty). - fn check_can_reuse_fragment_and_track_it( - &mut self, - fragment_restriction: &FragmentRestrictionAtType, - ) -> Result { - // No validator means that everything in the fragment selection was part of the selection - // we're optimizing away (by using the fragment), and we know the original selection was - // ok, so nothing to check. - let Some(validator) = &fragment_restriction.validator else { - return Ok(true); // Nothing to check; Trivially ok. - }; - - if !validator.do_merge_with_all(self.validators.iter().map(Arc::as_ref))? { - return Ok(false); - } - - // We need to make sure the trimmed parts of `fragment` merges with the rest of the - // selection, but also that it merge with any of the trimmed parts of any fragment we have - // added already. - // Note: this last condition means that if 2 fragment conflict on their "trimmed" parts, - // then the choice of which is used can be based on the fragment ordering and selection - // order, which may not be optimal. This feels niche enough that we keep it simple for now, - // but we can revisit this decision if we run into real cases that justify it (but making - // it optimal would be a involved in general, as in theory you could have complex - // dependencies of fragments that conflict, even cycles, and you need to take the size of - // fragments into account to know what's best; and even then, this could even depend on - // overall usage, as it can be better to reuse a fragment that is used in other places, - // than to use one for which it's the only usage. Adding to all that the fact that conflict - // can happen in sibling branches). - if !validator.do_merge_with_all( - self.used_spread_trimmed_part_at_level - .iter() - .map(Arc::as_ref), - )? { - return Ok(false); - } - - // We're good, but track the fragment. - self.used_spread_trimmed_part_at_level - .push(validator.clone()); - Ok(true) +impl Operation { + /// Optimize the parsed size of the operation by generating fragments based on the selections + /// in the operation. + pub(crate) fn generate_fragments(&mut self) -> Result<(), FederationError> { + // Currently, this method simply pulls out every inline fragment into a named fragment. If + // multiple inline fragments are the same, they use the same named fragment. + // + // This method can generate named fragments that are only used once. It's not ideal, but it + // also doesn't seem that bad. Avoiding this is possible but more work, and keeping this + // as simple as possible is a big benefit for now. + // + // When we have more advanced correctness testing, we can add more features to fragment + // generation, like factoring out partial repeated slices of selection sets or only + // introducing named fragments for patterns that occur more than once. + let mut generator = FragmentGenerator::default(); + generator.visit_selection_set(&mut self.selection_set)?; + self.named_fragments = generator.into_inner(); + Ok(()) } } -//============================================================================= -// Matching fragments with selection set (`try_optimize_with_fragments`) - -/// Return type for `expanded_selection_set_at_type` method. -struct FragmentRestrictionAtType { - /// Selections that are expanded from a given fragment at a given type and then normalized. - /// - This represents the part of given type's sub-selections that are covered by the fragment. - selections: SelectionSet, - - /// A runtime validator to check the fragment selections against other fields. - /// - `None` means that there is nothing to check. - /// - See `check_can_reuse_fragment_and_track_it` for more details. - validator: Option>, +#[derive(Debug, Default)] +struct FragmentGenerator { + fragments: NamedFragments, + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + names: IndexMap<(String, usize), usize>, } -#[derive(Default)] -struct FragmentRestrictionAtTypeCache { - map: IndexMap<(Name, CompositeTypeDefinitionPosition), Arc>, -} +impl FragmentGenerator { + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + // In the future, we will just use `.next_name()`. + fn generate_name(&mut self, frag: &InlineFragmentSelection) -> Name { + use std::fmt::Write as _; -impl FragmentRestrictionAtTypeCache { - fn expanded_selection_set_at_type( - &mut self, - fragment: &Fragment, - ty: &CompositeTypeDefinitionPosition, - ) -> Result, FederationError> { - // I would like to avoid the Arc here, it seems unnecessary, but with `.entry()` - // the lifetime does not really want to work out. - // (&'cache mut self) -> Result<&'cache FragmentRestrictionAtType> - match self.map.entry((fragment.name.clone(), ty.clone())) { - indexmap::map::Entry::Occupied(entry) => Ok(Arc::clone(entry.get())), - indexmap::map::Entry::Vacant(entry) => Ok(Arc::clone( - entry.insert(Arc::new(fragment.expanded_selection_set_at_type(ty)?)), - )), - } - } -} + let type_condition = frag + .inline_fragment + .type_condition_position + .as_ref() + .map_or_else( + || "undefined".to_string(), + |condition| condition.to_string(), + ); + let selections = frag.selection_set.selections.len(); + let mut name = format!("_generated_on{type_condition}{selections}"); -impl FragmentRestrictionAtType { - fn new(selections: SelectionSet, validator: Option) -> Self { - Self { - selections, - validator: validator.map(Arc::new), - } + let key = (type_condition, selections); + let index = self + .names + .entry(key) + .and_modify(|index| *index += 1) + .or_default(); + _ = write!(&mut name, "_{index}"); + + Name::new_unchecked(&name) } - // It's possible that while the fragment technically applies at `parent_type`, it's "rebasing" on - // `parent_type` is empty, or contains only `__typename`. For instance, suppose we have - // a union `U = A | B | C`, and then a fragment: - // ```graphql - // fragment F on U { - // ... on A { - // x - // } - // ... on B { - // y - // } - // } - // ``` - // It is then possible to apply `F` when the parent type is `C`, but this ends up selecting - // nothing at all. - // - // Using `F` in those cases is, while not 100% incorrect, at least not productive, and so we - // skip it that case. This is essentially an optimization. - fn is_useless(&self) -> bool { - let mut iter = self.selections.iter(); + /// Is a selection set worth using for a newly generated named fragment? + fn is_worth_using(selection_set: &SelectionSet) -> bool { + let mut iter = selection_set.iter(); let Some(first) = iter.next() else { + // An empty selection is not worth using (and invalid!) + return false; + }; + let Selection::Field(field) = first else { return true; }; - iter.next().is_none() && first.is_typename_field() - } -} - -impl Fragment { - /// Computes the expanded selection set of this fragment along with its validator to check - /// against other fragments applied under the same selection set. - fn expanded_selection_set_at_type( - &self, - ty: &CompositeTypeDefinitionPosition, - ) -> Result { - let expanded_selection_set = self.selection_set.expand_all_fragments()?; - let selection_set = expanded_selection_set.flatten_unnecessary_fragments( - ty, - /*named_fragments*/ &Default::default(), - &self.schema, - )?; - - if !self.type_condition_position.is_object_type() { - // When the type condition of the fragment is not an object type, the - // `FieldsInSetCanMerge` rule is more restrictive and any fields can create conflicts. - // Thus, we have to use the full validator in this case. (see - // https://github.com/graphql/graphql-spec/issues/1085 for details.) - return Ok(FragmentRestrictionAtType::new( - selection_set.clone(), - Some(FieldsConflictValidator::from_selection_set( - &expanded_selection_set, - )), - )); - } - - // Use a smaller validator for efficiency. - // Note that `trimmed` is the difference of 2 selections that may not have been normalized - // on the same parent type, so in practice, it is possible that `trimmed` contains some of - // the selections that `selectionSet` contains, but that they have been simplified in - // `selectionSet` in such a way that the `minus` call does not see it. However, it is not - // trivial to deal with this, and it is fine given that we use trimmed to create the - // validator because we know the non-trimmed parts cannot create field conflict issues so - // we're trying to build a smaller validator, but it's ok if trimmed is not as small as it - // theoretically can be. - let trimmed = expanded_selection_set.minus(&selection_set)?; - let validator = - (!trimmed.is_empty()).then(|| FieldsConflictValidator::from_selection_set(&trimmed)); - Ok(FragmentRestrictionAtType::new( - selection_set.clone(), - validator, - )) - } - - /// Checks whether `self` fragment includes the other fragment (`other_fragment_name`). - // - // Note that this is slightly different from `self` "using" `other_fragment` in that this - // essentially checks if the full selection set of `other_fragment` is contained by `self`, so - // this only look at "top-level" usages. - // - // Note that this is guaranteed to return `false` if passed self's name. - // Note: This is a heuristic looking for the other named fragment used directly in the - // selection set. It may not return `true` even though the other fragment's selections - // are actually covered by self's selection set. - // PORT_NOTE: The JS version memoizes the result of this function. But, the current Rust port - // does not. - fn includes(&self, other_fragment_name: &Name) -> bool { - if self.name == *other_fragment_name { - return false; - } - - self.selection_set.selections.values().any(|selection| { - matches!( - selection, - Selection::FragmentSpread(fragment) if fragment.spread.fragment_name == *other_fragment_name - ) - }) + // If there's more than one selection, or one selection with a subselection, + // it's probably worth using + iter.next().is_some() || field.selection_set.is_some() } -} - -enum FullMatchingFragmentCondition<'a> { - ForFieldSelection, - ForInlineFragmentSelection { - // the type condition and directives on an inline fragment selection. - type_condition_position: &'a CompositeTypeDefinitionPosition, - directives: &'a DirectiveList, - }, -} -impl<'a> FullMatchingFragmentCondition<'a> { - /// Determines whether the given fragment is allowed to match the whole selection set by itself - /// (without another selection set wrapping it). - fn check(&self, fragment: &Node) -> bool { - match self { - // We can never apply a fragments that has directives on it at the field level. - Self::ForFieldSelection => fragment.directives.is_empty(), + /// Modify the selection set so that eligible inline fragments are moved to named fragment spreads. + fn visit_selection_set( + &mut self, + selection_set: &mut SelectionSet, + ) -> Result<(), FederationError> { + let mut new_selection_set = SelectionSet::empty( + selection_set.schema.clone(), + selection_set.type_position.clone(), + ); - // To be able to use a matching inline fragment, it needs to have either no directives, - // or if it has some, then: - // 1. All it's directives should also be on the current element. - // 2. The type condition of this element should be the fragment's condition. because - // If those 2 conditions are true, we can replace the whole current inline fragment - // with the match spread and directives will still match. - Self::ForInlineFragmentSelection { - type_condition_position, - directives, - } => { - if fragment.directives.is_empty() { - return true; + for selection in Arc::make_mut(&mut selection_set.selections).values_mut() { + match selection { + SelectionValue::Field(mut field) => { + if let Some(selection_set) = field.get_selection_set_mut() { + self.visit_selection_set(selection_set)?; + } + new_selection_set + .add_local_selection(&Selection::Field(Arc::clone(field.get())))?; + } + SelectionValue::FragmentSpread(frag) => { + new_selection_set + .add_local_selection(&Selection::FragmentSpread(Arc::clone(frag.get())))?; } + SelectionValue::InlineFragment(frag) + if !Self::is_worth_using(&frag.get().selection_set) => + { + new_selection_set + .add_local_selection(&Selection::InlineFragment(Arc::clone(frag.get())))?; + } + SelectionValue::InlineFragment(mut candidate) => { + self.visit_selection_set(candidate.get_selection_set_mut())?; - // PORT_NOTE: The JS version handles `@defer` directive differently. However, Rust - // version can't have `@defer` at this point (see comments on `enum SelectionKey` - // definition) - fragment.type_condition_position == **type_condition_position - && fragment - .directives - .iter() - .all(|d1| directives.iter().any(|d2| d1 == d2)) - } - } - } -} - -/// The return type for `SelectionSet::try_optimize_with_fragments`. -#[derive(derive_more::From)] -enum SelectionSetOrFragment { - SelectionSet(SelectionSet), - Fragment(Node), -} - -impl SelectionSet { - /// Reduce the list of applicable fragments by eliminating fragments that directly include - /// another fragment. - // - // We have found the list of fragments that applies to some subset of sub-selection. In - // general, we want to now produce the selection set with spread for those fragments plus - // any selection that is not covered by any of the fragments. For instance, suppose that - // `subselection` is `{ a b c d e }` and we have found that `fragment F1 on X { a b c }` - // and `fragment F2 on X { c d }` applies, then we will generate `{ ...F1 ...F2 e }`. - // - // In that example, `c` is covered by both fragments. And this is fine in this example as - // it is worth using both fragments in general. A special case of this however is if a - // fragment is entirely included into another. That is, consider that we now have `fragment - // F1 on X { a ...F2 }` and `fragment F2 on X { b c }`. In that case, the code above would - // still match both `F1 and `F2`, but as `F1` includes `F2` already, we really want to only - // use `F1`. So in practice, we filter away any fragment spread that is known to be - // included in another one that applies. - // - // TODO: note that the logic used for this is theoretically a bit sub-optimal. That is, we - // only check if one of the fragment happens to directly include a spread for another - // fragment at top-level as in the example above. We do this because it is cheap to check - // and is likely the most common case of this kind of inclusion. But in theory, we would - // have `fragment F1 on X { a b c }` and `fragment F2 on X { b c }`, in which case `F2` is - // still included in `F1`, but we'd have to work harder to figure this out and it's unclear - // it's a good tradeoff. And while you could argue that it's on the user to define its - // fragments a bit more optimally, it's actually a tad more complex because we're looking - // at fragments in a particular context/parent type. Consider an interface `I` and: - // ```graphql - // fragment F3 on I { - // ... on X { - // a - // } - // ... on Y { - // b - // c - // } - // } - // - // fragment F4 on I { - // ... on Y { - // c - // } - // ... on Z { - // d - // } - // } - // ``` - // In that case, neither fragment include the other per-se. But what if we have - // sub-selection `{ b c }` but where parent type is `Y`. In that case, both `F3` and `F4` - // applies, and in that particular context, `F3` is fully included in `F4`. Long story - // short, we'll currently return `{ ...F3 ...F4 }` in that case, but it would be - // technically better to return only `F4`. However, this feels niche, and it might be - // costly to verify such inclusions, so not doing it for now. - fn reduce_applicable_fragments( - applicable_fragments: &mut Vec<(Node, Arc)>, - ) { - // Note: It's not possible for two fragments to include each other. So, we don't need to - // worry about inclusion cycles. - let included_fragments: IndexSet = applicable_fragments - .iter() - .filter(|(fragment, _)| { - applicable_fragments - .iter() - .any(|(other_fragment, _)| other_fragment.includes(&fragment.name)) - }) - .map(|(fragment, _)| fragment.name.clone()) - .collect(); - - applicable_fragments.retain(|(fragment, _)| !included_fragments.contains(&fragment.name)); - } - - /// Try to reuse existing fragments to optimize this selection set. - /// Returns either - /// - a new selection set partially optimized by re-using given `fragments`, or - /// - a single fragment that covers the full selection set. - // PORT_NOTE: Moved from `Selection` class in JS code to SelectionSet struct in Rust. - // PORT_NOTE: `parent_type` argument seems always to be the same as `self.type_position`. - // PORT_NOTE: In JS, this was called `tryOptimizeWithFragments`. - fn try_apply_fragments( - &self, - parent_type: &CompositeTypeDefinitionPosition, - context: &ReuseContext<'_>, - validator: &mut FieldsConflictMultiBranchValidator, - fragments_at_type: &mut FragmentRestrictionAtTypeCache, - full_match_condition: FullMatchingFragmentCondition, - ) -> Result { - // We limit to fragments whose selection could be applied "directly" at `parent_type`, - // meaning without taking the fragment condition into account. The idea being that if the - // fragment condition would be needed inside `parent_type`, then that condition will not - // have been "normalized away" and so we want for this very call to be called on the - // fragment whose type _is_ the fragment condition (at which point, this - // `can_apply_directly_at_type` method will apply. Also note that this is because we have - // this restriction that calling `expanded_selection_set_at_type` is ok. - let candidates = context - .fragments - .get_all_may_apply_directly_at_type(parent_type); - - // First, we check which of the candidates do apply inside the selection set, if any. If we - // find a candidate that applies to the whole selection set, then we stop and only return - // that one candidate. Otherwise, we cumulate in `applicable_fragments` the list of fragments - // that applies to a subset. - let mut applicable_fragments = Vec::new(); - for candidate in candidates { - let candidate = candidate?; - let at_type = - fragments_at_type.expanded_selection_set_at_type(candidate, parent_type)?; - if at_type.is_useless() { - continue; - } - - // I don't love this, but fragments may introduce new fields to the operation, including - // fields that use variables that are not declared in the operation. There are two ways - // to work around this: adjusting the fragments so they only list the fields that we - // actually need, or excluding fragments that introduce variable references from reuse. - // The former would be ideal, as we would not execute more fields than required. It's - // also much trickier to do. The latter fixes this particular issue but leaves the - // output in a less than ideal state. - // The consideration here is: `generate_query_fragments` has significant advantages - // over fragment reuse, and so we do not want to invest a lot of time into improving - // fragment reuse. We do the simple, less-than-ideal thing. - if let Some(variable_definitions) = &context.operation_variables { - let fragment_variables = candidate.used_variables(); - if fragment_variables - .difference(variable_definitions) - .next() - .is_some() - { - continue; - } - } - - // As we check inclusion, we ignore the case where the fragment queries __typename - // but the `self` does not. The rational is that querying `__typename` - // unnecessarily is mostly harmless (it always works and it's super cheap) so we - // don't want to not use a fragment just to save querying a `__typename` in a few - // cases. But the underlying context of why this matters is that the query planner - // always requests __typename for abstract type, and will do so in fragments too, - // but we can have a field that _does_ return an abstract type within a fragment, - // but that _does not_ end up returning an abstract type when applied in a "more - // specific" context (think a fragment on an interface I1 where a inside field - // returns another interface I2, but applied in the context of a implementation - // type of I1 where that particular field returns an implementation of I2 rather - // than I2 directly; we would have added __typename to the fragment (because it's - // all interfaces), but the selection itself, which only deals with object type, - // may not have __typename requested; using the fragment might still be a good - // idea, and querying __typename needlessly is a very small price to pay for that). - let res = self.containment( - &at_type.selections, - ContainmentOptions { - ignore_missing_typename: true, - }, - ); - match res { - Containment::Equal if full_match_condition.check(candidate) => { - if !validator.check_can_reuse_fragment_and_track_it(&at_type)? { - // We cannot use it at all, so no point in adding to `applicable_fragments`. + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + // JS federation does not consider fragments without a type condition. + if candidate + .get() + .inline_fragment + .type_condition_position + .is_none() + { + new_selection_set.add_local_selection(&Selection::InlineFragment( + Arc::clone(candidate.get()), + ))?; continue; } - // Special case: Found a fragment that covers the full selection set. - return Ok(candidate.clone().into()); - } - // Note that if a fragment applies to only a subset of the sub-selections, then we - // really only can use it if that fragment is defined _without_ directives. - Containment::Equal | Containment::StrictlyContained - if candidate.directives.is_empty() => - { - applicable_fragments.push((candidate.clone(), at_type)); - } - // Not eligible; Skip it. - _ => (), - } - } - - if applicable_fragments.is_empty() { - return Ok(self.clone().into()); // Not optimizable - } - // Narrow down the list of applicable fragments by removing those that are included in - // another. - Self::reduce_applicable_fragments(&mut applicable_fragments); - - // Build a new optimized selection set. - let mut not_covered_so_far = self.clone(); - let mut optimized = SelectionSet::empty(self.schema.clone(), self.type_position.clone()); - for (fragment, at_type) in applicable_fragments { - if !validator.check_can_reuse_fragment_and_track_it(&at_type)? { - continue; - } - let not_covered = self.minus(&at_type.selections)?; - not_covered_so_far = not_covered_so_far.intersection(¬_covered)?; + let directives = &candidate.get().inline_fragment.directives; + let skip_include = directives + .iter() + .map(|directive| match directive.name.as_str() { + "skip" | "include" => Ok(directive.clone()), + _ => Err(()), + }) + .collect::>(); - // PORT_NOTE: The JS version uses `parent_type` as the "sourceType", which may be - // different from `fragment.type_condition_position`. But, Rust version does - // not have "sourceType" field for `FragmentSpreadSelection`. - let fragment_selection = FragmentSpreadSelection::from_fragment( - &fragment, - /*directives*/ &Default::default(), - ); - optimized.add_local_selection(&fragment_selection.into())?; - } + // If there are any directives *other* than @skip and @include, + // we can't just transfer them to the generated fragment spread, + // so we have to keep this inline fragment. + let Ok(skip_include) = skip_include else { + new_selection_set.add_local_selection(&Selection::InlineFragment( + Arc::clone(candidate.get()), + ))?; + continue; + }; - optimized.add_local_selection_set(¬_covered_so_far)?; - Ok(optimized.into()) - } -} + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + // JS does not special-case @skip and @include. It never extracts a fragment if + // there's any directives on it. This code duplicates the body from the + // previous condition so it's very easy to remove when we're ready :) + if !skip_include.is_empty() { + new_selection_set.add_local_selection(&Selection::InlineFragment( + Arc::clone(candidate.get()), + ))?; + continue; + } -//============================================================================= -// Retain fragments in selection sets while expanding the rest + let existing = self.fragments.iter().find(|existing| { + existing.type_condition_position + == candidate.get().inline_fragment.casted_type() + && existing.selection_set == candidate.get().selection_set + }); -impl Selection { - /// Expand fragments that are not in the `fragments_to_keep`. - // PORT_NOTE: The JS version's name was `expandFragments`, which was confusing with - // `expand_all_fragments`. So, it was renamed to `retain_fragments`. - fn retain_fragments( - &self, - parent_type: &CompositeTypeDefinitionPosition, - fragments_to_keep: &NamedFragments, - ) -> Result { - match self { - Selection::FragmentSpread(fragment) => { - if fragments_to_keep.contains(&fragment.spread.fragment_name) { - // Keep this spread - Ok(self.clone().into()) - } else { - // Expand the fragment - let expanded_sub_selections = - fragment.selection_set.retain_fragments(fragments_to_keep)?; - if *parent_type == fragment.spread.type_condition_position - && fragment.spread.directives.is_empty() - { - // The fragment is of the same type as the parent, so we can just use - // the expanded sub-selections directly. - Ok(expanded_sub_selections.into()) + let existing = if let Some(existing) = existing { + existing } else { - // Create an inline fragment since type condition is necessary. - let inline = InlineFragmentSelection::from_selection_set( - parent_type.clone(), - expanded_sub_selections, - fragment.spread.directives.clone(), - ); - Ok(Selection::from(inline).into()) - } + // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! + // This should be reverted to `self.next_name();` when we're ready. + let name = self.generate_name(candidate.get()); + self.fragments.insert(Fragment { + schema: selection_set.schema.clone(), + name: name.clone(), + type_condition_position: candidate.get().inline_fragment.casted_type(), + directives: Default::default(), + selection_set: candidate.get().selection_set.clone(), + }); + self.fragments.get(&name).unwrap() + }; + new_selection_set.add_local_selection(&Selection::from( + FragmentSpreadSelection { + spread: FragmentSpread { + schema: selection_set.schema.clone(), + fragment_name: existing.name.clone(), + type_condition_position: existing.type_condition_position.clone(), + directives: skip_include.into(), + fragment_directives: existing.directives.clone(), + selection_id: crate::operation::SelectionId::new(), + }, + selection_set: existing.selection_set.clone(), + }, + ))?; } } - - // Otherwise, expand the sub-selections. - _ => Ok(self - .map_selection_set(|selection_set| { - Ok(Some(selection_set.retain_fragments(fragments_to_keep)?)) - })? - .into()), - } - } -} - -// Note: `retain_fragments` methods may return a selection or a selection set. -impl From for SelectionMapperReturn { - fn from(value: SelectionOrSet) -> Self { - match value { - SelectionOrSet::Selection(selection) => selection.into(), - SelectionOrSet::SelectionSet(selections) => { - // The items in a selection set needs to be cloned here, since it's sub-selections - // are contained in an `Arc`. - Vec::from_iter(selections.selections.values().cloned()).into() - } - } - } -} - -impl SelectionSet { - /// Expand fragments that are not in the `fragments_to_keep`. - // PORT_NOTE: The JS version's name was `expandFragments`, which was confusing with - // `expand_all_fragments`. So, it was renamed to `retain_fragments`. - fn retain_fragments( - &self, - fragments_to_keep: &NamedFragments, - ) -> Result { - self.lazy_map(fragments_to_keep, |selection| { - Ok(selection - .retain_fragments(&self.type_position, fragments_to_keep)? - .into()) - }) - } -} - -//============================================================================= -// Optimize (or reduce) the named fragments in the query -// -// Things to consider: -// - Unused fragment definitions can be dropped without an issue. -// - Dropping low-usage named fragments and expanding them may insert other fragments resulting in -// increased usage of those inserted. -// -// Example: -// ```graphql -// query { -// ...F1 -// } -// -// fragment F1 { -// a { ...F2 } -// b { ...F2 } -// } -// -// fragment F2 { -// // something -// } -// ``` -// then at this point where we've only counted usages in the query selection, `usages` will be -// `{ F1: 1, F2: 0 }`. But we do not want to expand _both_ F1 and F2. Instead, we want to expand -// F1 first, and then realize that this increases F2 usages to 2, which means we stop there and keep F2. - -impl NamedFragments { - /// Updates `self` by computing the reduced set of NamedFragments that are used in the - /// selection set and other fragments at least `min_usage_to_optimize` times. Also, computes - /// the new selection set that uses only the reduced set of fragments by expanding the other - /// ones. - /// - Returned selection set will be normalized. - fn reduce( - &mut self, - selection_set: &SelectionSet, - min_usage_to_optimize: u32, - ) -> Result { - // Call `reduce_inner` repeatedly until we reach a fix-point, since newly computed - // selection set may drop some fragment references due to normalization, which could lead - // to further reduction. - // - It is hard to avoid this chain reaction, since we need to account for the effects of - // normalization. - let mut last_size = self.len(); - let mut last_selection_set = selection_set.clone(); - while last_size > 0 { - let new_selection_set = - self.reduce_inner(&last_selection_set, min_usage_to_optimize)?; - - // Reached a fix-point => stop - if self.len() == last_size { - // Assumes that `new_selection_set` is the same as `last_selection_set` in this - // case. - break; - } - - // If we've expanded some fragments but kept others, then it's not 100% impossible that - // some fragment was used multiple times in some expanded fragment(s), but that - // post-expansion all of it's usages are "dead" branches that are removed by the final - // `flatten_unnecessary_fragments`. In that case though, we need to ensure we don't include the now-unused - // fragment in the final list of fragments. - // TODO: remark that the same reasoning could leave a single instance of a fragment - // usage, so if we really really want to never have less than `minUsagesToOptimize`, we - // could do some loop of `expand then flatten` unless all fragments are provably used - // enough. We don't bother, because leaving this is not a huge deal and it's not worth - // the complexity, but it could be that we can refactor all this later to avoid this - // case without additional complexity. - - // Prepare the next iteration - last_size = self.len(); - last_selection_set = new_selection_set; - } - Ok(last_selection_set) - } - - /// The inner loop body of `reduce` method. - fn reduce_inner( - &mut self, - selection_set: &SelectionSet, - min_usage_to_optimize: u32, - ) -> Result { - let mut usages = selection_set.used_fragments(); - - // Short-circuiting: Nothing was used => Drop everything (selection_set is unchanged). - if usages.is_empty() { - *self = Default::default(); - return Ok(selection_set.clone()); - } - - // Determine which one to retain. - // - Calculate the usage count of each fragment in both query and other fragment definitions. - // - If a fragment is to keep, fragments used in it are counted. - // - If a fragment is to drop, fragments used in it are counted and multiplied by its usage. - // - Decide in reverse dependency order, so that at each step, the fragment being visited - // has following properties: - // - It is either indirectly used by a previous fragment; Or, not used directly by any - // one visited & retained before. - // - Its usage count should be correctly calculated as if dropped fragments were expanded. - // - We take advantage of the fact that `NamedFragments` is already sorted in dependency - // order. - // PORT_NOTE: The `computeFragmentsToKeep` function is implemented here. - let original_size = self.len(); - for fragment in self.iter_rev() { - let usage_count = usages.get(&fragment.name).copied().unwrap_or_default(); - if usage_count >= min_usage_to_optimize { - // Count indirect usages within the fragment definition. - fragment.collect_used_fragment_names(&mut usages); - } else { - // Compute the new usage count after expanding the `fragment`. - Self::update_usages(&mut usages, fragment, usage_count); - } } - self.retain(|name, _fragment| { - let usage_count = usages.get(name).copied().unwrap_or_default(); - usage_count >= min_usage_to_optimize - }); - - // Short-circuiting: Nothing was dropped (fully used) => Nothing to change. - if self.len() == original_size { - return Ok(selection_set.clone()); - } - - // Update the fragment definitions in `self` after reduction. - // Note: This is an unfortunate clone, since `self` can't be passed to `retain_fragments`, - // while being mutated. - let fragments_to_keep = self.clone(); - for (_, fragment) in self.iter_mut() { - Node::make_mut(fragment).selection_set = fragment - .selection_set - .retain_fragments(&fragments_to_keep)? - .flatten_unnecessary_fragments( - &fragment.selection_set.type_position, - &fragments_to_keep, - &fragment.schema, - )?; - } - - // Compute the new selection set based on the new reduced set of fragments. - // Note that optimizing all fragments to potentially re-expand some is not entirely - // optimal, but it's unclear how to do otherwise, and it probably don't matter too much in - // practice (we only call this optimization on the final computed query plan, so not a very - // hot path; plus in most cases we won't even reach that point either because there is no - // fragment, or none will have been optimized away so we'll exit above). - let reduced_selection_set = selection_set.retain_fragments(self)?; + *selection_set = new_selection_set; - // Expanding fragments could create some "inefficiencies" that we wouldn't have if we - // hadn't re-optimized the fragments to de-optimize it later, so we do a final "flatten" - // pass to remove those. - reduced_selection_set.flatten_unnecessary_fragments( - &reduced_selection_set.type_position, - self, - &selection_set.schema, - ) + Ok(()) } - fn update_usages( - usages: &mut IndexMap, - fragment: &Node, - usage_count: u32, - ) { - let mut inner_usages = IndexMap::default(); - fragment.collect_used_fragment_names(&mut inner_usages); - - for (name, inner_count) in inner_usages { - *usages.entry(name).or_insert(0) += inner_count * usage_count; - } + /// Consumes the generator and returns the fragments it generated. + fn into_inner(self) -> NamedFragments { + self.fragments } } //============================================================================= -// `reuse_fragments` methods (putting everything together) - -impl Selection { - fn reuse_fragments_inner( - &self, - context: &ReuseContext<'_>, - validator: &mut FieldsConflictMultiBranchValidator, - fragments_at_type: &mut FragmentRestrictionAtTypeCache, - ) -> Result { - match self { - Selection::Field(field) => Ok(field - .reuse_fragments_inner(context, validator, fragments_at_type)? - .into()), - Selection::FragmentSpread(_) => Ok(self.clone()), // Do nothing - Selection::InlineFragment(inline_fragment) => Ok(inline_fragment - .reuse_fragments_inner(context, validator, fragments_at_type)? - .into()), - } - } -} - -impl FieldSelection { - fn reuse_fragments_inner( - &self, - context: &ReuseContext<'_>, - validator: &mut FieldsConflictMultiBranchValidator, - fragments_at_type: &mut FragmentRestrictionAtTypeCache, - ) -> Result { - let Some(base_composite_type): Option = - self.field.output_base_type()?.try_into().ok() - else { - return Ok(self.clone()); - }; - let Some(ref selection_set) = self.selection_set else { - return Ok(self.clone()); - }; - - let mut field_validator = validator.for_field(&self.field); - - // First, see if we can reuse fragments for the selection of this field. - let opt = selection_set.try_apply_fragments( - &base_composite_type, - context, - &mut field_validator, - fragments_at_type, - FullMatchingFragmentCondition::ForFieldSelection, - )?; - - let mut optimized = match opt { - SelectionSetOrFragment::Fragment(fragment) => { - let fragment_selection = FragmentSpreadSelection::from_fragment( - &fragment, - /*directives*/ &Default::default(), - ); - SelectionSet::from_selection(base_composite_type, fragment_selection.into()) - } - SelectionSetOrFragment::SelectionSet(selection_set) => selection_set, - }; - optimized = - optimized.reuse_fragments_inner(context, &mut field_validator, fragments_at_type)?; - Ok(self.with_updated_selection_set(Some(optimized))) - } -} - -/// Return type for `InlineFragmentSelection::reuse_fragments`. -#[derive(derive_more::From)] -enum FragmentSelection { - // Note: Enum variants are named to match those of `Selection`. - InlineFragment(InlineFragmentSelection), - FragmentSpread(FragmentSpreadSelection), -} - -impl From for Selection { - fn from(value: FragmentSelection) -> Self { - match value { - FragmentSelection::InlineFragment(inline_fragment) => inline_fragment.into(), - FragmentSelection::FragmentSpread(fragment_spread) => fragment_spread.into(), - } - } -} - -impl InlineFragmentSelection { - fn reuse_fragments_inner( - &self, - context: &ReuseContext<'_>, - validator: &mut FieldsConflictMultiBranchValidator, - fragments_at_type: &mut FragmentRestrictionAtTypeCache, - ) -> Result { - let optimized; +// Tests - let type_condition_position = &self.inline_fragment.type_condition_position; - if let Some(type_condition_position) = type_condition_position { - let opt = self.selection_set.try_apply_fragments( - type_condition_position, - context, - validator, - fragments_at_type, - FullMatchingFragmentCondition::ForInlineFragmentSelection { - type_condition_position, - directives: &self.inline_fragment.directives, - }, - )?; +#[cfg(test)] +mod tests { + use super::*; + use crate::operation::tests::*; - match opt { - SelectionSetOrFragment::Fragment(fragment) => { - // We're fully matching the sub-selection. If the fragment condition is also - // this element condition, then we can replace the whole element by the spread - // (not just the sub-selection). - if *type_condition_position == fragment.type_condition_position { - // Optimized as `...`, dropping the original inline spread (`self`). + /// Returns a consistent GraphQL name for the given index. + fn fragment_name(mut index: usize) -> Name { + /// https://spec.graphql.org/draft/#NameContinue + const NAME_CHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"; + /// https://spec.graphql.org/draft/#NameStart + const NAME_START_CHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"; - // Note that `FullMatchingFragmentCondition::ForInlineFragmentSelection` - // above guarantees that this element directives are a superset of the - // fragment directives. But there can be additional directives, and in that - // case they should be kept on the spread. - // PORT_NOTE: We are assuming directives on fragment definitions are - // carried over to their spread sites as JS version does, which - // is handled differently in Rust version (see `FragmentSpread`). - let directives: executable::DirectiveList = self - .inline_fragment - .directives - .iter() - .filter(|d1| !fragment.directives.iter().any(|d2| *d1 == d2)) - .cloned() - .collect(); - return Ok( - FragmentSpreadSelection::from_fragment(&fragment, &directives).into(), - ); - } else { - // Otherwise, we keep this element and use a sub-selection with just the spread. - // Optimized as `...on { ... }` - optimized = SelectionSet::from_selection( - type_condition_position.clone(), - FragmentSpreadSelection::from_fragment( - &fragment, - /*directives*/ &Default::default(), - ) - .into(), - ); - } - } - SelectionSetOrFragment::SelectionSet(selection_set) => { - optimized = selection_set; - } - } + if index < NAME_START_CHARS.len() { + Name::new_static_unchecked(&NAME_START_CHARS[index..index + 1]) } else { - optimized = self.selection_set.clone(); - } - - Ok(InlineFragmentSelection::new( - self.inline_fragment.clone(), - // Then, recurse inside the field sub-selection (note that if we matched some fragments - // above, this recursion will "ignore" those as `FragmentSpreadSelection`'s - // `reuse_fragments()` is a no-op). - optimized.reuse_fragments_inner(context, validator, fragments_at_type)?, - ) - .into()) - } -} - -impl SelectionSet { - fn reuse_fragments_inner( - &self, - context: &ReuseContext<'_>, - validator: &mut FieldsConflictMultiBranchValidator, - fragments_at_type: &mut FragmentRestrictionAtTypeCache, - ) -> Result { - self.lazy_map(context.fragments, |selection| { - Ok(selection - .reuse_fragments_inner(context, validator, fragments_at_type)? - .into()) - }) - } - - fn contains_fragment_spread(&self) -> bool { - self.iter().any(|selection| { - matches!(selection, Selection::FragmentSpread(_)) - || selection - .selection_set() - .map(|subselection| subselection.contains_fragment_spread()) - .unwrap_or(false) - }) - } - - /// ## Errors - /// Returns an error if the selection set contains a named fragment spread. - fn reuse_fragments(&mut self, context: &ReuseContext<'_>) -> Result<(), FederationError> { - if context.fragments.is_empty() { - return Ok(()); - } - - if self.contains_fragment_spread() { - return Err(FederationError::internal("reuse_fragments() must only be used on selection sets that do not contain named fragment spreads")); - } + let mut s = String::new(); - // Calling reuse_fragments() will not match a fragment that would have expanded at - // top-level. That is, say we have the selection set `{ x y }` for a top-level `Query`, and - // we have a fragment - // ``` - // fragment F on Query { - // x - // y - // } - // ``` - // then calling `self.reuse_fragments(fragments)` would only apply check if F apply to - // `x` and then `y`. - // - // To ensure the fragment match in this case, we "wrap" the selection into a trivial - // fragment of the selection parent, so in the example above, we create selection `... on - // Query { x y }`. With that, `reuse_fragments` will correctly match on the `on Query` - // fragment; after which we can unpack the final result. - let wrapped = InlineFragmentSelection::from_selection_set( - self.type_position.clone(), // parent type - self.clone(), // selection set - Default::default(), // directives - ); - let mut validator = FieldsConflictMultiBranchValidator::from_initial_validator( - FieldsConflictValidator::from_selection_set(self), - ); - let optimized = wrapped.reuse_fragments_inner( - context, - &mut validator, - &mut FragmentRestrictionAtTypeCache::default(), - )?; + let i = index % NAME_START_CHARS.len(); + s.push(NAME_START_CHARS.as_bytes()[i].into()); + index /= NAME_START_CHARS.len(); - // Now, it's possible we matched a full fragment, in which case `optimized` will be just - // the named fragment, and in that case we return a singleton selection with just that. - // Otherwise, it's our wrapping inline fragment with the sub-selections optimized, and we - // just return that subselection. - *self = match optimized { - FragmentSelection::FragmentSpread(spread) => { - SelectionSet::from_selection(self.type_position.clone(), spread.into()) + while index > 0 { + let i = index % NAME_CHARS.len(); + s.push(NAME_CHARS.as_bytes()[i].into()); + index /= NAME_CHARS.len(); } - FragmentSelection::InlineFragment(inline_fragment) => inline_fragment.selection_set, - }; - Ok(()) - } -} - -impl Operation { - // PORT_NOTE: The JS version of `reuse_fragments` takes an optional `minUsagesToOptimize` argument. - // However, it's only used in tests. So, it's removed in the Rust version. - const DEFAULT_MIN_USAGES_TO_OPTIMIZE: u32 = 2; - // `fragments` - rebased fragment definitions for the operation's subgraph - // - `self.selection_set` must be fragment-spread-free. - fn reuse_fragments_inner( - &mut self, - fragments: &NamedFragments, - min_usages_to_optimize: u32, - ) -> Result<(), FederationError> { - if fragments.is_empty() { - return Ok(()); - } - - // Optimize the operation's selection set by re-using existing fragments. - let before_optimization = self.selection_set.clone(); - self.selection_set - .reuse_fragments(&ReuseContext::for_operation(fragments, &self.variables))?; - if before_optimization == self.selection_set { - return Ok(()); - } - - // Optimize the named fragment definitions by dropping low-usage ones. - let mut final_fragments = fragments.clone(); - let final_selection_set = - final_fragments.reduce(&self.selection_set, min_usages_to_optimize)?; - - self.selection_set = final_selection_set; - self.named_fragments = final_fragments; - Ok(()) - } - - /// Optimize the parsed size of the operation by applying fragment spreads. Fragment spreads - /// are reused from the original user-provided fragments. - /// - /// `fragments` - rebased fragment definitions for the operation's subgraph - /// - // PORT_NOTE: In JS, this function was called "optimize". - pub(crate) fn reuse_fragments( - &mut self, - fragments: &NamedFragments, - ) -> Result<(), FederationError> { - self.reuse_fragments_inner(fragments, Self::DEFAULT_MIN_USAGES_TO_OPTIMIZE) - } - - /// Optimize the parsed size of the operation by generating fragments based on the selections - /// in the operation. - pub(crate) fn generate_fragments(&mut self) -> Result<(), FederationError> { - // Currently, this method simply pulls out every inline fragment into a named fragment. If - // multiple inline fragments are the same, they use the same named fragment. - // - // This method can generate named fragments that are only used once. It's not ideal, but it - // also doesn't seem that bad. Avoiding this is possible but more work, and keeping this - // as simple as possible is a big benefit for now. - // - // When we have more advanced correctness testing, we can add more features to fragment - // generation, like factoring out partial repeated slices of selection sets or only - // introducing named fragments for patterns that occur more than once. - let mut generator = FragmentGenerator::default(); - generator.visit_selection_set(&mut self.selection_set)?; - self.named_fragments = generator.into_inner(); - Ok(()) - } - - /// Used by legacy roundtrip tests. - /// - This lowers `min_usages_to_optimize` to `1` in order to make it easier to write unit tests. - #[cfg(test)] - fn reuse_fragments_for_roundtrip_test( - &mut self, - fragments: &NamedFragments, - ) -> Result<(), FederationError> { - self.reuse_fragments_inner(fragments, /*min_usages_to_optimize*/ 1) - } - - // PORT_NOTE: This mirrors the JS version's `Operation.expandAllFragments`. But this method is - // mainly for unit tests. The actual port of `expandAllFragments` is in `normalize_operation`. - #[cfg(test)] - fn expand_all_fragments_and_normalize(&self) -> Result { - let selection_set = self - .selection_set - .expand_all_fragments()? - .flatten_unnecessary_fragments( - &self.selection_set.type_position, - &self.named_fragments, - &self.schema, - )?; - Ok(Self { - named_fragments: Default::default(), - selection_set, - ..self.clone() - }) - } -} - -#[derive(Debug, Default)] -struct FragmentGenerator { - fragments: NamedFragments, - // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! - names: IndexMap<(String, usize), usize>, -} - -impl FragmentGenerator { - // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! - // In the future, we will just use `.next_name()`. - fn generate_name(&mut self, frag: &InlineFragmentSelection) -> Name { - use std::fmt::Write as _; - - let type_condition = frag - .inline_fragment - .type_condition_position - .as_ref() - .map_or_else( - || "undefined".to_string(), - |condition| condition.to_string(), - ); - let selections = frag.selection_set.selections.len(); - let mut name = format!("_generated_on{type_condition}{selections}"); - - let key = (type_condition, selections); - let index = self - .names - .entry(key) - .and_modify(|index| *index += 1) - .or_default(); - _ = write!(&mut name, "_{index}"); - - Name::new_unchecked(&name) - } - - /// Is a selection set worth using for a newly generated named fragment? - fn is_worth_using(selection_set: &SelectionSet) -> bool { - let mut iter = selection_set.iter(); - let Some(first) = iter.next() else { - // An empty selection is not worth using (and invalid!) - return false; - }; - let Selection::Field(field) = first else { - return true; - }; - // If there's more than one selection, or one selection with a subselection, - // it's probably worth using - iter.next().is_some() || field.selection_set.is_some() - } - - /// Modify the selection set so that eligible inline fragments are moved to named fragment spreads. - fn visit_selection_set( - &mut self, - selection_set: &mut SelectionSet, - ) -> Result<(), FederationError> { - let mut new_selection_set = SelectionSet::empty( - selection_set.schema.clone(), - selection_set.type_position.clone(), - ); - - for selection in Arc::make_mut(&mut selection_set.selections).values_mut() { - match selection { - SelectionValue::Field(mut field) => { - if let Some(selection_set) = field.get_selection_set_mut() { - self.visit_selection_set(selection_set)?; - } - new_selection_set - .add_local_selection(&Selection::Field(Arc::clone(field.get())))?; - } - SelectionValue::FragmentSpread(frag) => { - new_selection_set - .add_local_selection(&Selection::FragmentSpread(Arc::clone(frag.get())))?; - } - SelectionValue::InlineFragment(frag) - if !Self::is_worth_using(&frag.get().selection_set) => - { - new_selection_set - .add_local_selection(&Selection::InlineFragment(Arc::clone(frag.get())))?; - } - SelectionValue::InlineFragment(mut candidate) => { - self.visit_selection_set(candidate.get_selection_set_mut())?; - - // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! - // JS federation does not consider fragments without a type condition. - if candidate - .get() - .inline_fragment - .type_condition_position - .is_none() - { - new_selection_set.add_local_selection(&Selection::InlineFragment( - Arc::clone(candidate.get()), - ))?; - continue; - } - - let directives = &candidate.get().inline_fragment.directives; - let skip_include = directives - .iter() - .map(|directive| match directive.name.as_str() { - "skip" | "include" => Ok(directive.clone()), - _ => Err(()), - }) - .collect::>(); - - // If there are any directives *other* than @skip and @include, - // we can't just transfer them to the generated fragment spread, - // so we have to keep this inline fragment. - let Ok(skip_include) = skip_include else { - new_selection_set.add_local_selection(&Selection::InlineFragment( - Arc::clone(candidate.get()), - ))?; - continue; - }; - - // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! - // JS does not special-case @skip and @include. It never extracts a fragment if - // there's any directives on it. This code duplicates the body from the - // previous condition so it's very easy to remove when we're ready :) - if !skip_include.is_empty() { - new_selection_set.add_local_selection(&Selection::InlineFragment( - Arc::clone(candidate.get()), - ))?; - continue; - } - - let existing = self.fragments.iter().find(|existing| { - existing.type_condition_position - == candidate.get().inline_fragment.casted_type() - && existing.selection_set == candidate.get().selection_set - }); - - let existing = if let Some(existing) = existing { - existing - } else { - // XXX(@goto-bus-stop): This is temporary to support mismatch testing with JS! - // This should be reverted to `self.next_name();` when we're ready. - let name = self.generate_name(candidate.get()); - self.fragments.insert(Fragment { - schema: selection_set.schema.clone(), - name: name.clone(), - type_condition_position: candidate.get().inline_fragment.casted_type(), - directives: Default::default(), - selection_set: candidate.get().selection_set.clone(), - }); - self.fragments.get(&name).unwrap() - }; - new_selection_set.add_local_selection(&Selection::from( - FragmentSpreadSelection { - spread: FragmentSpread { - schema: selection_set.schema.clone(), - fragment_name: existing.name.clone(), - type_condition_position: existing.type_condition_position.clone(), - directives: skip_include.into(), - fragment_directives: existing.directives.clone(), - selection_id: crate::operation::SelectionId::new(), - }, - selection_set: existing.selection_set.clone(), - }, - ))?; - } - } - } - - *selection_set = new_selection_set; - - Ok(()) - } - - /// Consumes the generator and returns the fragments it generated. - fn into_inner(self) -> NamedFragments { - self.fragments - } -} - -//============================================================================= -// Tests - -#[cfg(test)] -mod tests { - use apollo_compiler::ExecutableDocument; - - use super::*; - use crate::operation::tests::*; - - macro_rules! assert_without_fragments { - ($operation: expr, @$expected: literal) => {{ - let without_fragments = $operation.expand_all_fragments_and_normalize().unwrap(); - insta::assert_snapshot!(without_fragments, @$expected); - without_fragments - }}; - } - - macro_rules! assert_optimized { - ($operation: expr, $named_fragments: expr, @$expected: literal) => {{ - let mut optimized = $operation.clone(); - optimized.reuse_fragments(&$named_fragments).unwrap(); - validate_operation(&$operation.schema, &optimized.to_string()); - insta::assert_snapshot!(optimized, @$expected) - }}; - } - - /// Returns a consistent GraphQL name for the given index. - fn fragment_name(mut index: usize) -> Name { - /// https://spec.graphql.org/draft/#NameContinue - const NAME_CHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"; - /// https://spec.graphql.org/draft/#NameStart - const NAME_START_CHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"; - - if index < NAME_START_CHARS.len() { - Name::new_static_unchecked(&NAME_START_CHARS[index..index + 1]) - } else { - let mut s = String::new(); - - let i = index % NAME_START_CHARS.len(); - s.push(NAME_START_CHARS.as_bytes()[i].into()); - index /= NAME_START_CHARS.len(); - - while index > 0 { - let i = index % NAME_CHARS.len(); - s.push(NAME_CHARS.as_bytes()[i].into()); - index /= NAME_CHARS.len(); - } - - Name::new_unchecked(&s) + Name::new_unchecked(&s) } } @@ -1832,1619 +389,6 @@ mod tests { assert_eq!(fragment_name(usize::MAX), "oS5Uz8g3Iqw"); } - #[test] - fn duplicate_fragment_spreads_after_fragment_expansion() { - // This is a regression test for FED-290, making sure `make_select` method can handle - // duplicate fragment spreads. - // During optimization, `make_selection` may merge multiple fragment spreads with the same - // key. This can happen in the case below where `F1` and `F2` are expanded and generating - // two duplicate `F_shared` spreads in the definition of `fragment F_target`. - let schema_doc = r#" - type Query { - t: T - t2: T - } - - type T { - id: ID! - a: Int! - b: Int! - c: Int! - } - "#; - - let query = r#" - fragment F_shared on T { - id - a - } - fragment F1 on T { - ...F_shared - b - } - - fragment F2 on T { - ...F_shared - c - } - - fragment F_target on T { - ...F1 - ...F2 - } - - query { - t { - ...F_target - } - t2 { - ...F_target - } - } - "#; - - let operation = parse_operation(&parse_schema(schema_doc), query); - let expanded = operation.expand_all_fragments_and_normalize().unwrap(); - assert_optimized!(expanded, operation.named_fragments, @r###" - fragment F_target on T { - id - a - b - c - } - - { - t { - ...F_target - } - t2 { - ...F_target - } - } - "###); - } - - #[test] - fn optimize_fragments_using_other_fragments_when_possible() { - let schema = r#" - type Query { - t: I - } - - interface I { - b: Int - u: U - } - - type T1 implements I { - a: Int - b: Int - u: U - } - - type T2 implements I { - x: String - y: String - b: Int - u: U - } - - union U = T1 | T2 - "#; - - let query = r#" - fragment OnT1 on T1 { - a - b - } - - fragment OnT2 on T2 { - x - y - } - - fragment OnI on I { - b - } - - fragment OnU on U { - ...OnI - ...OnT1 - ...OnT2 - } - - query { - t { - ...OnT1 - ...OnT2 - ...OnI - u { - ...OnU - } - } - } - "#; - - let operation = parse_operation(&parse_schema(schema), query); - - let expanded = assert_without_fragments!( - operation, - @r###" - { - t { - ... on T1 { - a - b - } - ... on T2 { - x - y - } - b - u { - ... on I { - b - } - ... on T1 { - a - b - } - ... on T2 { - x - y - } - } - } - } - "### - ); - - assert_optimized!(expanded, operation.named_fragments, @r###" - fragment OnU on U { - ... on I { - b - } - ... on T1 { - a - b - } - ... on T2 { - x - y - } - } - - { - t { - ...OnU - u { - ...OnU - } - } - } - "###); - } - - #[test] - fn handles_fragments_using_other_fragments() { - let schema = r#" - type Query { - t: I - } - - interface I { - b: Int - c: Int - u1: U - u2: U - } - - type T1 implements I { - a: Int - b: Int - c: Int - me: T1 - u1: U - u2: U - } - - type T2 implements I { - x: String - y: String - b: Int - c: Int - u1: U - u2: U - } - - union U = T1 | T2 - "#; - - let query = r#" - fragment OnT1 on T1 { - a - b - } - - fragment OnT2 on T2 { - x - y - } - - fragment OnI on I { - b - c - } - - fragment OnU on U { - ...OnI - ...OnT1 - ...OnT2 - } - - query { - t { - ...OnT1 - ...OnT2 - u1 { - ...OnU - } - u2 { - ...OnU - } - ... on T1 { - me { - ...OnI - } - } - } - } - "#; - - let operation = parse_operation(&parse_schema(schema), query); - - let expanded = assert_without_fragments!( - &operation, - @r###" - { - t { - ... on T1 { - a - b - me { - b - c - } - } - ... on T2 { - x - y - } - u1 { - ... on I { - b - c - } - ... on T1 { - a - b - } - ... on T2 { - x - y - } - } - u2 { - ... on I { - b - c - } - ... on T1 { - a - b - } - ... on T2 { - x - y - } - } - } - } - "###); - - // We should reuse and keep all fragments, because 1) onU is used twice and 2) - // all the other ones are used once in the query, and once in onU definition. - assert_optimized!(expanded, operation.named_fragments, @r###" - fragment OnT1 on T1 { - a - b - } - - fragment OnT2 on T2 { - x - y - } - - fragment OnI on I { - b - c - } - - fragment OnU on U { - ...OnI - ...OnT1 - ...OnT2 - } - - { - t { - ... on T1 { - ...OnT1 - me { - ...OnI - } - } - ...OnT2 - u1 { - ...OnU - } - u2 { - ...OnU - } - } - } - "###); - } - - macro_rules! test_fragments_roundtrip { - ($schema_doc: expr, $query: expr, @$expanded: literal) => {{ - let schema = parse_schema($schema_doc); - let operation = parse_operation(&schema, $query); - let without_fragments = operation.expand_all_fragments_and_normalize().unwrap(); - insta::assert_snapshot!(without_fragments, @$expanded); - - let mut optimized = without_fragments; - optimized.reuse_fragments(&operation.named_fragments).unwrap(); - validate_operation(&operation.schema, &optimized.to_string()); - assert_eq!(optimized.to_string(), operation.to_string()); - }}; - } - - /// Tests ported from JS codebase rely on special behavior of - /// `Operation::reuse_fragments_for_roundtrip_test` that is specific for testing, since it makes it - /// easier to write tests. - macro_rules! test_fragments_roundtrip_legacy { - ($schema_doc: expr, $query: expr, @$expanded: literal) => {{ - let schema = parse_schema($schema_doc); - let operation = parse_operation(&schema, $query); - let without_fragments = operation.expand_all_fragments_and_normalize().unwrap(); - insta::assert_snapshot!(without_fragments, @$expanded); - - let mut optimized = without_fragments; - optimized.reuse_fragments_for_roundtrip_test(&operation.named_fragments).unwrap(); - validate_operation(&operation.schema, &optimized.to_string()); - assert_eq!(optimized.to_string(), operation.to_string()); - }}; - } - - #[test] - fn handles_fragments_with_nested_selections() { - let schema_doc = r#" - type Query { - t1a: T1 - t2a: T1 - } - - type T1 { - t2: T2 - } - - type T2 { - x: String - y: String - } - "#; - - let query = r#" - fragment OnT1 on T1 { - t2 { - x - } - } - - query { - t1a { - ...OnT1 - t2 { - y - } - } - t2a { - ...OnT1 - } - } - "#; - - test_fragments_roundtrip!(schema_doc, query, @r###" - { - t1a { - t2 { - x - y - } - } - t2a { - t2 { - x - } - } - } - "###); - } - - #[test] - fn handles_nested_fragments_with_field_intersection() { - let schema_doc = r#" - type Query { - t: T - } - - type T { - a: A - b: Int - } - - type A { - x: String - y: String - z: String - } - "#; - - // The subtlety here is that `FA` contains `__typename` and so after we're reused it, the - // selection will look like: - // { - // t { - // a { - // ...FA - // } - // } - // } - // But to recognize that `FT` can be reused from there, we need to be able to see that - // the `__typename` that `FT` wants is inside `FA` (and since FA applies on the parent type `A` - // directly, it is fine to reuse). - let query = r#" - fragment FA on A { - __typename - x - y - } - - fragment FT on T { - a { - __typename - ...FA - } - } - - query { - t { - ...FT - } - } - "#; - - test_fragments_roundtrip_legacy!(schema_doc, query, @r###" - { - t { - a { - __typename - x - y - } - } - } - "###); - } - - #[test] - fn handles_fragment_matching_subset_of_field_selection() { - let schema_doc = r#" - type Query { - t: T - } - - type T { - a: String - b: B - c: Int - d: D - } - - type B { - x: String - y: String - } - - type D { - m: String - n: String - } - "#; - - let query = r#" - fragment FragT on T { - b { - __typename - x - } - c - d { - m - } - } - - { - t { - ...FragT - d { - n - } - a - } - } - "#; - - test_fragments_roundtrip_legacy!(schema_doc, query, @r###" - { - t { - b { - __typename - x - } - c - d { - m - n - } - a - } - } - "###); - } - - #[test] - fn handles_fragment_matching_subset_of_inline_fragment_selection() { - // Pretty much the same test than the previous one, but matching inside a fragment selection inside - // of inside a field selection. - // PORT_NOTE: ` implements I` was added in the definition of `type T`, so that validation can pass. - let schema_doc = r#" - type Query { - i: I - } - - interface I { - a: String - } - - type T implements I { - a: String - b: B - c: Int - d: D - } - - type B { - x: String - y: String - } - - type D { - m: String - n: String - } - "#; - - let query = r#" - fragment FragT on T { - b { - __typename - x - } - c - d { - m - } - } - - { - i { - ... on T { - ...FragT - d { - n - } - a - } - } - } - "#; - - test_fragments_roundtrip_legacy!(schema_doc, query, @r###" - { - i { - ... on T { - b { - __typename - x - } - c - d { - m - n - } - a - } - } - } - "###); - } - - #[test] - fn intersecting_fragments() { - let schema_doc = r#" - type Query { - t: T - } - - type T { - a: String - b: B - c: Int - d: D - } - - type B { - x: String - y: String - } - - type D { - m: String - n: String - } - "#; - - // Note: the code that reuse fragments iterates on fragments in the order they are defined - // in the document, but when it reuse a fragment, it puts it at the beginning of the - // selection (somewhat random, it just feel often easier to read), so the net effect on - // this example is that `Frag2`, which will be reused after `Frag1` will appear first in - // the re-optimized selection. So we put it first in the input too so that input and output - // actually match (the `testFragmentsRoundtrip` compares strings, so it is sensible to - // ordering; we could theoretically use `Operation.equals` instead of string equality, - // which wouldn't really on ordering, but `Operation.equals` is not entirely trivial and - // comparing strings make problem a bit more obvious). - let query = r#" - fragment Frag1 on T { - b { - x - } - c - d { - m - } - } - - fragment Frag2 on T { - a - b { - __typename - x - } - d { - m - n - } - } - - { - t { - ...Frag1 - ...Frag2 - } - } - "#; - - // PORT_NOTE: `__typename` and `x`'s placements are switched in Rust. - test_fragments_roundtrip_legacy!(schema_doc, query, @r###" - { - t { - b { - __typename - x - } - c - d { - m - n - } - a - } - } - "###); - } - - #[test] - fn fragments_application_makes_type_condition_trivial() { - let schema_doc = r#" - type Query { - t: T - } - - interface I { - x: String - } - - type T implements I { - x: String - a: String - } - "#; - - let query = r#" - fragment FragI on I { - x - ... on T { - a - } - } - - { - t { - ...FragI - } - } - "#; - - test_fragments_roundtrip_legacy!(schema_doc, query, @r###" - { - t { - x - a - } - } - "###); - } - - #[test] - fn handles_fragment_matching_at_the_top_level_of_another_fragment() { - let schema_doc = r#" - type Query { - t: T - } - - type T { - a: String - u: U - } - - type U { - x: String - y: String - } - "#; - - let query = r#" - fragment Frag1 on T { - a - } - - fragment Frag2 on T { - u { - x - y - } - ...Frag1 - } - - fragment Frag3 on Query { - t { - ...Frag2 - } - } - - { - ...Frag3 - } - "#; - - test_fragments_roundtrip_legacy!(schema_doc, query, @r###" - { - t { - u { - x - y - } - a - } - } - "###); - } - - #[test] - fn handles_fragments_used_in_context_where_they_get_trimmed() { - let schema_doc = r#" - type Query { - t1: T1 - } - - interface I { - x: Int - } - - type T1 implements I { - x: Int - y: Int - } - - type T2 implements I { - x: Int - z: Int - } - "#; - - let query = r#" - fragment FragOnI on I { - ... on T1 { - y - } - ... on T2 { - z - } - } - - { - t1 { - ...FragOnI - } - } - "#; - - test_fragments_roundtrip_legacy!(schema_doc, query, @r###" - { - t1 { - y - } - } - "###); - } - - #[test] - fn handles_fragments_used_in_the_context_of_non_intersecting_abstract_types() { - let schema_doc = r#" - type Query { - i2: I2 - } - - interface I1 { - x: Int - } - - interface I2 { - y: Int - } - - interface I3 { - z: Int - } - - type T1 implements I1 & I2 { - x: Int - y: Int - } - - type T2 implements I1 & I3 { - x: Int - z: Int - } - "#; - - let query = r#" - fragment FragOnI1 on I1 { - ... on I2 { - y - } - ... on I3 { - z - } - } - - { - i2 { - ...FragOnI1 - } - } - "#; - - test_fragments_roundtrip_legacy!(schema_doc, query, @r###" - { - i2 { - ... on I1 { - ... on I2 { - y - } - ... on I3 { - z - } - } - } - } - "###); - } - - #[test] - fn handles_fragments_on_union_in_context_with_limited_intersection() { - let schema_doc = r#" - type Query { - t1: T1 - } - - union U = T1 | T2 - - type T1 { - x: Int - } - - type T2 { - y: Int - } - "#; - - let query = r#" - fragment OnU on U { - ... on T1 { - x - } - ... on T2 { - y - } - } - - { - t1 { - ...OnU - } - } - "#; - - test_fragments_roundtrip_legacy!(schema_doc, query, @r###" - { - t1 { - x - } - } - "###); - } - - #[test] - fn off_by_1_error() { - let schema = r#" - type Query { - t: T - } - type T { - id: String! - a: A - v: V - } - type A { - id: String! - } - type V { - t: T! - } - "#; - - let query = r#" - { - t { - ...TFrag - v { - t { - id - a { - __typename - id - } - } - } - } - } - - fragment TFrag on T { - __typename - id - } - "#; - - let operation = parse_operation(&parse_schema(schema), query); - - let expanded = assert_without_fragments!( - operation, - @r###" - { - t { - __typename - id - v { - t { - id - a { - __typename - id - } - } - } - } - } - "### - ); - - assert_optimized!(expanded, operation.named_fragments, @r###" - fragment TFrag on T { - __typename - id - } - - { - t { - ...TFrag - v { - t { - ...TFrag - a { - __typename - id - } - } - } - } - } - "###); - } - - #[test] - fn removes_all_unused_fragments() { - let schema = r#" - type Query { - t1: T1 - } - - union U1 = T1 | T2 | T3 - union U2 = T2 | T3 - - type T1 { - x: Int - } - - type T2 { - y: Int - } - - type T3 { - z: Int - } - "#; - - let query = r#" - query { - t1 { - ...Outer - } - } - - fragment Outer on U1 { - ... on T1 { - x - } - ... on T2 { - ... Inner - } - ... on T3 { - ... Inner - } - } - - fragment Inner on U2 { - ... on T2 { - y - } - } - "#; - - let operation = parse_operation(&parse_schema(schema), query); - - let expanded = assert_without_fragments!( - operation, - @r###" - { - t1 { - x - } - } - "### - ); - - // This is a bit of contrived example, but the reusing code will be able - // to figure out that the `Outer` fragment can be reused and will initially - // do so, but it's only use once, so it will expand it, which yields: - // { - // t1 { - // ... on T1 { - // x - // } - // ... on T2 { - // ... Inner - // } - // ... on T3 { - // ... Inner - // } - // } - // } - // and so `Inner` will not be expanded (it's used twice). Except that - // the `flatten_unnecessary_fragments` code is apply then and will _remove_ both instances - // of `.... Inner`. Which is ok, but we must make sure the fragment - // itself is removed since it is not used now, which this test ensures. - assert_optimized!(expanded, operation.named_fragments, @r###" - { - t1 { - x - } - } - "###); - } - - #[test] - fn removes_fragments_only_used_by_unused_fragments() { - // Similar to the previous test, but we artificially add a - // fragment that is only used by the fragment that is finally - // unused. - let schema = r#" - type Query { - t1: T1 - } - - union U1 = T1 | T2 | T3 - union U2 = T2 | T3 - - type T1 { - x: Int - } - - type T2 { - y1: Y - y2: Y - } - - type T3 { - z: Int - } - - type Y { - v: Int - } - "#; - - let query = r#" - query { - t1 { - ...Outer - } - } - - fragment Outer on U1 { - ... on T1 { - x - } - ... on T2 { - ... Inner - } - ... on T3 { - ... Inner - } - } - - fragment Inner on U2 { - ... on T2 { - y1 { - ...WillBeUnused - } - y2 { - ...WillBeUnused - } - } - } - - fragment WillBeUnused on Y { - v - } - "#; - - let operation = parse_operation(&parse_schema(schema), query); - - let expanded = assert_without_fragments!( - operation, - @r###" - { - t1 { - x - } - } - "### - ); - - assert_optimized!(expanded, operation.named_fragments, @r###" - { - t1 { - x - } - } - "###); - } - - #[test] - fn keeps_fragments_used_by_other_fragments() { - let schema = r#" - type Query { - t1: T - t2: T - } - - type T { - a1: Int - a2: Int - b1: B - b2: B - } - - type B { - x: Int - y: Int - } - "#; - - let query = r#" - query { - t1 { - ...TFields - } - t2 { - ...TFields - } - } - - fragment TFields on T { - ...DirectFieldsOfT - b1 { - ...BFields - } - b2 { - ...BFields - } - } - - fragment DirectFieldsOfT on T { - a1 - a2 - } - - fragment BFields on B { - x - y - } - "#; - - let operation = parse_operation(&parse_schema(schema), query); - - let expanded = assert_without_fragments!( - operation, - @r###" - { - t1 { - a1 - a2 - b1 { - x - y - } - b2 { - x - y - } - } - t2 { - a1 - a2 - b1 { - x - y - } - b2 { - x - y - } - } - } - "### - ); - - // The `DirectFieldsOfT` fragments should not be kept as it is used only once within `TFields`, - // but the `BFields` one should be kept. - assert_optimized!(expanded, operation.named_fragments, @r###" - fragment BFields on B { - x - y - } - - fragment TFields on T { - a1 - a2 - b1 { - ...BFields - } - b2 { - ...BFields - } - } - - { - t1 { - ...TFields - } - t2 { - ...TFields - } - } - "###); - } - - /// - /// applied directives - /// - - #[test] - fn reuse_fragments_with_same_directive_in_the_fragment_selection() { - let schema_doc = r#" - type Query { - t1: T - t2: T - t3: T - } - - type T { - a: Int - b: Int - c: Int - d: Int - } - "#; - - let query = r#" - fragment DirectiveInDef on T { - a @include(if: $cond1) - } - - query myQuery($cond1: Boolean!, $cond2: Boolean!) { - t1 { - a - } - t2 { - ...DirectiveInDef - } - t3 { - a @include(if: $cond2) - } - } - "#; - - test_fragments_roundtrip_legacy!(schema_doc, query, @r###" - query myQuery($cond1: Boolean!, $cond2: Boolean!) { - t1 { - a - } - t2 { - a @include(if: $cond1) - } - t3 { - a @include(if: $cond2) - } - } - "###); - } - - #[test] - fn reuse_fragments_with_directives_on_inline_fragments() { - let schema_doc = r#" - type Query { - t1: T - t2: T - t3: T - } - - type T { - a: Int - b: Int - c: Int - d: Int - } - "#; - - let query = r#" - fragment NoDirectiveDef on T { - a - } - - query myQuery($cond1: Boolean!) { - t1 { - ...NoDirectiveDef - } - t2 { - ...NoDirectiveDef @include(if: $cond1) - } - } - "#; - - test_fragments_roundtrip!(schema_doc, query, @r###" - query myQuery($cond1: Boolean!) { - t1 { - a - } - t2 { - ... on T @include(if: $cond1) { - a - } - } - } - "###); - } - - #[test] - fn reuse_fragments_with_directive_on_typename() { - let schema = r#" - type Query { - t1: T - t2: T - t3: T - } - - type T { - a: Int - b: Int - c: Int - d: Int - } - "#; - let query = r#" - query A ($if: Boolean!) { - t1 { b a ...x } - t2 { ...x } - } - query B { - # Because this inline fragment is exactly the same shape as `x`, - # except for a `__typename` field, it may be tempting to reuse it. - # But `x.__typename` has a directive with a variable, and this query - # does not have that variable declared, so it can't be used. - t3 { ... on T { a c } } - } - fragment x on T { - __typename @include(if: $if) - a - c - } - "#; - let schema = parse_schema(schema); - let query = ExecutableDocument::parse_and_validate(schema.schema(), query, "query.graphql") - .unwrap(); - - let operation_a = - Operation::from_operation_document(schema.clone(), &query, Some("A")).unwrap(); - let operation_b = - Operation::from_operation_document(schema.clone(), &query, Some("B")).unwrap(); - let expanded_b = operation_b.expand_all_fragments_and_normalize().unwrap(); - - assert_optimized!(expanded_b, operation_a.named_fragments, @r###" - query B { - t3 { - a - c - } - } - "###); - } - - #[test] - fn reuse_fragments_with_non_intersecting_types() { - let schema = r#" - type Query { - t: T - s: S - s2: S - i: I - } - - interface I { - a: Int - b: Int - } - - type T implements I { - a: Int - b: Int - - c: Int - d: Int - } - type S implements I { - a: Int - b: Int - - f: Int - g: Int - } - "#; - let query = r#" - query A ($if: Boolean!) { - t { ...x } - s { ...x } - i { ...x } - } - query B { - s { - # this matches fragment x once it is flattened, - # because the `...on T` condition does not intersect with our - # current type `S` - __typename - a b - } - s2 { - # same snippet to get it to use the fragment - __typename - a b - } - } - fragment x on I { - __typename - a - b - ... on T { c d @include(if: $if) } - } - "#; - let schema = parse_schema(schema); - let query = ExecutableDocument::parse_and_validate(schema.schema(), query, "query.graphql") - .unwrap(); - - let operation_a = - Operation::from_operation_document(schema.clone(), &query, Some("A")).unwrap(); - let operation_b = - Operation::from_operation_document(schema.clone(), &query, Some("B")).unwrap(); - let expanded_b = operation_b.expand_all_fragments_and_normalize().unwrap(); - - assert_optimized!(expanded_b, operation_a.named_fragments, @r###" - query B { - s { - __typename - a - b - } - s2 { - __typename - a - b - } - } - "###); - } - /// /// empty branches removal /// diff --git a/apollo-federation/src/operation/rebase.rs b/apollo-federation/src/operation/rebase.rs index 556a72ca5f..cdf09bbafa 100644 --- a/apollo-federation/src/operation/rebase.rs +++ b/apollo-federation/src/operation/rebase.rs @@ -9,7 +9,6 @@ use itertools::Itertools; use super::runtime_types_intersect; use super::Field; use super::FieldSelection; -use super::Fragment; use super::FragmentSpread; use super::FragmentSpreadSelection; use super::InlineFragment; @@ -45,45 +44,23 @@ fn print_possible_runtimes( ) } -/// Options for handling rebasing errors. -#[derive(Clone, Copy, Default)] -enum OnNonRebaseableSelection { - /// Drop the selection that can't be rebased and continue. - Drop, - /// Propagate the rebasing error. - #[default] - Error, -} - impl Selection { fn rebase_inner( &self, parent_type: &CompositeTypeDefinitionPosition, named_fragments: &NamedFragments, schema: &ValidFederationSchema, - on_non_rebaseable_selection: OnNonRebaseableSelection, ) -> Result { match self { Selection::Field(field) => field - .rebase_inner( - parent_type, - named_fragments, - schema, - on_non_rebaseable_selection, - ) + .rebase_inner(parent_type, named_fragments, schema) .map(|field| field.into()), - Selection::FragmentSpread(spread) => spread.rebase_inner( - parent_type, - named_fragments, - schema, - on_non_rebaseable_selection, - ), - Selection::InlineFragment(inline) => inline.rebase_inner( - parent_type, - named_fragments, - schema, - on_non_rebaseable_selection, - ), + Selection::FragmentSpread(spread) => { + spread.rebase_inner(parent_type, named_fragments, schema) + } + Selection::InlineFragment(inline) => { + inline.rebase_inner(parent_type, named_fragments, schema) + } } } @@ -93,7 +70,7 @@ impl Selection { named_fragments: &NamedFragments, schema: &ValidFederationSchema, ) -> Result { - self.rebase_inner(parent_type, named_fragments, schema, Default::default()) + self.rebase_inner(parent_type, named_fragments, schema) } fn can_add_to( @@ -145,18 +122,6 @@ pub(crate) enum RebaseError { }, } -impl FederationError { - fn is_rebase_error(&self) -> bool { - matches!( - self, - crate::error::FederationError::SingleFederationError { - inner: crate::error::SingleFederationError::InternalRebaseError(_), - .. - } - ) - } -} - impl From for FederationError { fn from(value: RebaseError) -> Self { crate::error::SingleFederationError::from(value).into() @@ -312,7 +277,6 @@ impl FieldSelection { parent_type: &CompositeTypeDefinitionPosition, named_fragments: &NamedFragments, schema: &ValidFederationSchema, - on_non_rebaseable_selection: OnNonRebaseableSelection, ) -> Result { if &self.field.schema == schema && &self.field.field_position.parent() == parent_type { // we are rebasing field on the same parent within the same schema - we can just return self @@ -345,12 +309,8 @@ impl FieldSelection { }); } - let rebased_selection_set = selection_set.rebase_inner( - &rebased_base_type, - named_fragments, - schema, - on_non_rebaseable_selection, - )?; + let rebased_selection_set = + selection_set.rebase_inner(&rebased_base_type, named_fragments, schema)?; if rebased_selection_set.selections.is_empty() { Err(RebaseError::EmptySelectionSet.into()) } else { @@ -434,7 +394,6 @@ impl FragmentSpreadSelection { parent_type: &CompositeTypeDefinitionPosition, named_fragments: &NamedFragments, schema: &ValidFederationSchema, - on_non_rebaseable_selection: OnNonRebaseableSelection, ) -> Result { // We preserve the parent type here, to make sure we don't lose context, but we actually don't // want to expand the spread as that would compromise the code that optimize subgraph fetches to re-use named @@ -482,12 +441,9 @@ impl FragmentSpreadSelection { // important because the very logic we're hitting here may need to happen inside the rebase on the // fragment selection, but that logic would not be triggered if we used the rebased `named_fragment` since // `rebase_on_same_schema` would then be 'true'. - let expanded_selection_set = self.selection_set.rebase_inner( - parent_type, - named_fragments, - schema, - on_non_rebaseable_selection, - )?; + let expanded_selection_set = + self.selection_set + .rebase_inner(parent_type, named_fragments, schema)?; // In theory, we could return the selection set directly, but making `SelectionSet.rebase_on` sometimes // return a `SelectionSet` complicate things quite a bit. So instead, we encapsulate the selection set // in an "empty" inline fragment. This make for non-really-optimal selection sets in the (relatively @@ -524,7 +480,7 @@ impl FragmentSpreadSelection { named_fragments: &NamedFragments, schema: &ValidFederationSchema, ) -> Result { - self.rebase_inner(parent_type, named_fragments, schema, Default::default()) + self.rebase_inner(parent_type, named_fragments, schema) } } @@ -620,7 +576,6 @@ impl InlineFragmentSelection { parent_type: &CompositeTypeDefinitionPosition, named_fragments: &NamedFragments, schema: &ValidFederationSchema, - on_non_rebaseable_selection: OnNonRebaseableSelection, ) -> Result { if &self.inline_fragment.schema == schema && self.inline_fragment.parent_type_position == *parent_type @@ -637,12 +592,9 @@ impl InlineFragmentSelection { // we are within the same schema - selection set does not have to be rebased Ok(InlineFragmentSelection::new(rebased_fragment, self.selection_set.clone()).into()) } else { - let rebased_selection_set = self.selection_set.rebase_inner( - &rebased_casted_type, - named_fragments, - schema, - on_non_rebaseable_selection, - )?; + let rebased_selection_set = + self.selection_set + .rebase_inner(&rebased_casted_type, named_fragments, schema)?; if rebased_selection_set.selections.is_empty() { // empty selection set Err(RebaseError::EmptySelectionSet.into()) @@ -711,24 +663,11 @@ impl SelectionSet { parent_type: &CompositeTypeDefinitionPosition, named_fragments: &NamedFragments, schema: &ValidFederationSchema, - on_non_rebaseable_selection: OnNonRebaseableSelection, ) -> Result { let rebased_results = self .selections .values() - .map(|selection| { - selection.rebase_inner( - parent_type, - named_fragments, - schema, - on_non_rebaseable_selection, - ) - }) - // Remove selections with rebase errors if requested - .filter(|result| { - matches!(on_non_rebaseable_selection, OnNonRebaseableSelection::Error) - || !result.as_ref().is_err_and(|err| err.is_rebase_error()) - }); + .map(|selection| selection.rebase_inner(parent_type, named_fragments, schema)); Ok(SelectionSet { schema: schema.clone(), @@ -748,7 +687,7 @@ impl SelectionSet { named_fragments: &NamedFragments, schema: &ValidFederationSchema, ) -> Result { - self.rebase_inner(parent_type, named_fragments, schema, Default::default()) + self.rebase_inner(parent_type, named_fragments, schema) } /// Returns true if the selection set would select cleanly from the given type in the given @@ -763,641 +702,3 @@ impl SelectionSet { .fallible_all(|selection| selection.can_add_to(parent_type, schema)) } } - -impl NamedFragments { - pub(crate) fn rebase_on( - &self, - schema: &ValidFederationSchema, - ) -> Result { - let mut rebased_fragments = NamedFragments::default(); - for fragment in self.fragments.values() { - if let Some(rebased_type) = schema - .get_type(fragment.type_condition_position.type_name().clone()) - .ok() - .and_then(|ty| CompositeTypeDefinitionPosition::try_from(ty).ok()) - { - if let Ok(mut rebased_selection) = fragment.selection_set.rebase_inner( - &rebased_type, - &rebased_fragments, - schema, - OnNonRebaseableSelection::Drop, - ) { - // Rebasing can leave some inefficiencies in some case (particularly when a spread has to be "expanded", see `FragmentSpreadSelection.rebaseOn`), - // so we do a top-level normalization to keep things clean. - rebased_selection = rebased_selection.flatten_unnecessary_fragments( - &rebased_type, - &rebased_fragments, - schema, - )?; - if NamedFragments::is_selection_set_worth_using(&rebased_selection) { - let fragment = Fragment { - schema: schema.clone(), - name: fragment.name.clone(), - type_condition_position: rebased_type.clone(), - directives: fragment.directives.clone(), - selection_set: rebased_selection, - }; - rebased_fragments.insert(fragment); - } - } - } - } - Ok(rebased_fragments) - } -} - -#[cfg(test)] -mod tests { - use apollo_compiler::collections::IndexSet; - use apollo_compiler::name; - - use crate::operation::normalize_operation; - use crate::operation::tests::parse_schema_and_operation; - use crate::operation::tests::parse_subgraph; - use crate::operation::NamedFragments; - use crate::schema::position::InterfaceTypeDefinitionPosition; - - #[test] - fn skips_unknown_fragment_fields() { - let operation_fragments = r#" -query TestQuery { - t { - ...FragOnT - } -} - -fragment FragOnT on T { - v0 - v1 - v2 - u1 { - v3 - v4 - v5 - } - u2 { - v4 - v5 - } -} - -type Query { - t: T -} - -type T { - v0: Int - v1: Int - v2: Int - u1: U - u2: U -} - -type U { - v3: Int - v4: Int - v5: Int -} -"#; - let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments); - assert!( - !executable_document.fragments.is_empty(), - "operation should have some fragments" - ); - - if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { - let normalized_operation = normalize_operation( - operation, - NamedFragments::new(&executable_document.fragments, &schema), - &schema, - &IndexSet::default(), - ) - .unwrap(); - - let subgraph_schema = r#"type Query { - _: Int -} - -type T { - v1: Int - u1: U -} - -type U { - v3: Int - v5: Int -}"#; - let subgraph = parse_subgraph("A", subgraph_schema); - let rebased_fragments = normalized_operation.named_fragments.rebase_on(&subgraph); - assert!(rebased_fragments.is_ok()); - let rebased_fragments = rebased_fragments.unwrap(); - assert!(!rebased_fragments.is_empty()); - assert!(rebased_fragments.contains(&name!("FragOnT"))); - let rebased_fragment = rebased_fragments.fragments.get("FragOnT").unwrap(); - - insta::assert_snapshot!(rebased_fragment, @r###" - fragment FragOnT on T { - v1 - u1 { - v3 - v5 - } - } - "###); - } - } - - #[test] - fn skips_unknown_fragment_on_condition() { - let operation_fragments = r#" -query TestQuery { - t { - ...FragOnT - } - u { - ...FragOnU - } -} - -fragment FragOnT on T { - x - y -} - -fragment FragOnU on U { - x - y -} - -type Query { - t: T - u: U -} - -type T { - x: Int - y: Int -} - -type U { - x: Int - y: Int -} -"#; - let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments); - assert!( - !executable_document.fragments.is_empty(), - "operation should have some fragments" - ); - assert_eq!(2, executable_document.fragments.len()); - - if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { - let normalized_operation = normalize_operation( - operation, - NamedFragments::new(&executable_document.fragments, &schema), - &schema, - &IndexSet::default(), - ) - .unwrap(); - - let subgraph_schema = r#"type Query { - t: T -} - -type T { - x: Int - y: Int -}"#; - let subgraph = parse_subgraph("A", subgraph_schema); - let rebased_fragments = normalized_operation.named_fragments.rebase_on(&subgraph); - assert!(rebased_fragments.is_ok()); - let rebased_fragments = rebased_fragments.unwrap(); - assert!(!rebased_fragments.is_empty()); - assert!(rebased_fragments.contains(&name!("FragOnT"))); - assert!(!rebased_fragments.contains(&name!("FragOnU"))); - let rebased_fragment = rebased_fragments.fragments.get("FragOnT").unwrap(); - - let expected = r#"fragment FragOnT on T { - x - y -}"#; - let actual = rebased_fragment.to_string(); - assert_eq!(actual, expected); - } - } - - #[test] - fn skips_unknown_type_within_fragment() { - let operation_fragments = r#" -query TestQuery { - i { - ...FragOnI - } -} - -fragment FragOnI on I { - id - otherId - ... on T1 { - x - } - ... on T2 { - y - } -} - -type Query { - i: I -} - -interface I { - id: ID! - otherId: ID! -} - -type T1 implements I { - id: ID! - otherId: ID! - x: Int -} - -type T2 implements I { - id: ID! - otherId: ID! - y: Int -} -"#; - let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments); - assert!( - !executable_document.fragments.is_empty(), - "operation should have some fragments" - ); - - if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { - let normalized_operation = normalize_operation( - operation, - NamedFragments::new(&executable_document.fragments, &schema), - &schema, - &IndexSet::default(), - ) - .unwrap(); - - let subgraph_schema = r#"type Query { - i: I -} - -interface I { - id: ID! -} - -type T2 implements I { - id: ID! - y: Int -} -"#; - let subgraph = parse_subgraph("A", subgraph_schema); - let rebased_fragments = normalized_operation.named_fragments.rebase_on(&subgraph); - assert!(rebased_fragments.is_ok()); - let rebased_fragments = rebased_fragments.unwrap(); - assert!(!rebased_fragments.is_empty()); - assert!(rebased_fragments.contains(&name!("FragOnI"))); - let rebased_fragment = rebased_fragments.fragments.get("FragOnI").unwrap(); - - let expected = r#"fragment FragOnI on I { - id - ... on T2 { - y - } -}"#; - let actual = rebased_fragment.to_string(); - assert_eq!(actual, expected); - } - } - - #[test] - fn skips_typename_on_possible_interface_objects_within_fragment() { - let operation_fragments = r#" -query TestQuery { - i { - ...FragOnI - } -} - -fragment FragOnI on I { - __typename - id - x -} - -type Query { - i: I -} - -interface I { - id: ID! - x: String! -} - -type T implements I { - id: ID! - x: String! -} -"#; - - let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments); - assert!( - !executable_document.fragments.is_empty(), - "operation should have some fragments" - ); - - if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { - let mut interface_objects: IndexSet = - IndexSet::default(); - interface_objects.insert(InterfaceTypeDefinitionPosition { - type_name: name!("I"), - }); - let normalized_operation = normalize_operation( - operation, - NamedFragments::new(&executable_document.fragments, &schema), - &schema, - &interface_objects, - ) - .unwrap(); - - let subgraph_schema = r#"extend schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/federation/v2.5", import: [{ name: "@interfaceObject" }, { name: "@key" }]) - -directive @link(url: String, as: String, import: [link__Import]) repeatable on SCHEMA - -directive @key(fields: federation__FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE - -directive @interfaceObject on OBJECT - -type Query { - i: I -} - -type I @interfaceObject @key(fields: "id") { - id: ID! - x: String! -} - -scalar link__Import - -scalar federation__FieldSet -"#; - let subgraph = parse_subgraph("A", subgraph_schema); - let rebased_fragments = normalized_operation.named_fragments.rebase_on(&subgraph); - assert!(rebased_fragments.is_ok()); - let rebased_fragments = rebased_fragments.unwrap(); - assert!(!rebased_fragments.is_empty()); - assert!(rebased_fragments.contains(&name!("FragOnI"))); - let rebased_fragment = rebased_fragments.fragments.get("FragOnI").unwrap(); - - let expected = r#"fragment FragOnI on I { - id - x -}"#; - let actual = rebased_fragment.to_string(); - assert_eq!(actual, expected); - } - } - - #[test] - fn skips_fragments_with_trivial_selections() { - let operation_fragments = r#" -query TestQuery { - t { - ...F1 - ...F2 - ...F3 - } -} - -fragment F1 on T { - a - b -} - -fragment F2 on T { - __typename - a - b -} - -fragment F3 on T { - __typename - a - b - c - d -} - -type Query { - t: T -} - -type T { - a: Int - b: Int - c: Int - d: Int -} -"#; - let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments); - assert!( - !executable_document.fragments.is_empty(), - "operation should have some fragments" - ); - - if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { - let normalized_operation = normalize_operation( - operation, - NamedFragments::new(&executable_document.fragments, &schema), - &schema, - &IndexSet::default(), - ) - .unwrap(); - - let subgraph_schema = r#"type Query { - t: T -} - -type T { - c: Int - d: Int -} -"#; - let subgraph = parse_subgraph("A", subgraph_schema); - let rebased_fragments = normalized_operation.named_fragments.rebase_on(&subgraph); - assert!(rebased_fragments.is_ok()); - let rebased_fragments = rebased_fragments.unwrap(); - // F1 reduces to nothing, and F2 reduces to just __typename so we shouldn't keep them. - assert_eq!(1, rebased_fragments.len()); - assert!(rebased_fragments.contains(&name!("F3"))); - let rebased_fragment = rebased_fragments.fragments.get("F3").unwrap(); - - let expected = r#"fragment F3 on T { - __typename - c - d -}"#; - let actual = rebased_fragment.to_string(); - assert_eq!(actual, expected); - } - } - - #[test] - fn handles_skipped_fragments_within_fragments() { - let operation_fragments = r#" -query TestQuery { - ...TheQuery -} - -fragment TheQuery on Query { - t { - x - ... GetU - } -} - -fragment GetU on T { - u { - y - z - } -} - -type Query { - t: T -} - -type T { - x: Int - u: U -} - -type U { - y: Int - z: Int -} -"#; - let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments); - assert!( - !executable_document.fragments.is_empty(), - "operation should have some fragments" - ); - - if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { - let normalized_operation = normalize_operation( - operation, - NamedFragments::new(&executable_document.fragments, &schema), - &schema, - &IndexSet::default(), - ) - .unwrap(); - - let subgraph_schema = r#"type Query { - t: T -} - -type T { - x: Int -}"#; - let subgraph = parse_subgraph("A", subgraph_schema); - let rebased_fragments = normalized_operation.named_fragments.rebase_on(&subgraph); - assert!(rebased_fragments.is_ok()); - let rebased_fragments = rebased_fragments.unwrap(); - // F1 reduces to nothing, and F2 reduces to just __typename so we shouldn't keep them. - assert_eq!(1, rebased_fragments.len()); - assert!(rebased_fragments.contains(&name!("TheQuery"))); - let rebased_fragment = rebased_fragments.fragments.get("TheQuery").unwrap(); - - let expected = r#"fragment TheQuery on Query { - t { - x - } -}"#; - let actual = rebased_fragment.to_string(); - assert_eq!(actual, expected); - } - } - - #[test] - fn handles_subtypes_within_subgraphs() { - let operation_fragments = r#" -query TestQuery { - ...TQuery -} - -fragment TQuery on Query { - t { - x - y - ... on T { - z - } - } -} - -type Query { - t: I -} - -interface I { - x: Int - y: Int -} - -type T implements I { - x: Int - y: Int - z: Int -} -"#; - let (schema, mut executable_document) = parse_schema_and_operation(operation_fragments); - assert!( - !executable_document.fragments.is_empty(), - "operation should have some fragments" - ); - - if let Some(operation) = executable_document.operations.named.get_mut("TestQuery") { - let normalized_operation = normalize_operation( - operation, - NamedFragments::new(&executable_document.fragments, &schema), - &schema, - &IndexSet::default(), - ) - .unwrap(); - - let subgraph_schema = r#"type Query { - t: T -} - -type T { - x: Int - y: Int - z: Int -} -"#; - - let subgraph = parse_subgraph("A", subgraph_schema); - let rebased_fragments = normalized_operation.named_fragments.rebase_on(&subgraph); - assert!(rebased_fragments.is_ok()); - let rebased_fragments = rebased_fragments.unwrap(); - // F1 reduces to nothing, and F2 reduces to just __typename so we shouldn't keep them. - assert_eq!(1, rebased_fragments.len()); - assert!(rebased_fragments.contains(&name!("TQuery"))); - let rebased_fragment = rebased_fragments.fragments.get("TQuery").unwrap(); - - let expected = r#"fragment TQuery on Query { - t { - x - y - z - } -}"#; - let actual = rebased_fragment.to_string(); - assert_eq!(actual, expected); - } - } -} diff --git a/apollo-federation/src/operation/selection_map.rs b/apollo-federation/src/operation/selection_map.rs index 19857ba274..477e2548ef 100644 --- a/apollo-federation/src/operation/selection_map.rs +++ b/apollo-federation/src/operation/selection_map.rs @@ -34,26 +34,22 @@ pub(crate) enum SelectionKey<'a> { /// The field alias (if specified) or field name in the resulting selection set. response_name: &'a Name, /// directives applied on the field - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] directives: &'a DirectiveList, }, FragmentSpread { /// The name of the fragment. fragment_name: &'a Name, /// Directives applied on the fragment spread (does not contain @defer). - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] directives: &'a DirectiveList, }, InlineFragment { /// The optional type condition of the fragment. type_condition: Option<&'a Name>, /// Directives applied on the fragment spread (does not contain @defer). - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] directives: &'a DirectiveList, }, Defer { /// Unique selection ID used to distinguish deferred fragment spreads that cannot be merged. - #[cfg_attr(not(feature = "snapshot_tracing"), serde(skip))] deferred_id: SelectionId, }, } @@ -252,11 +248,6 @@ impl SelectionMap { self.selections.is_empty() } - /// Returns the first selection in the map, or None if the map is empty. - pub(crate) fn first(&self) -> Option<&Selection> { - self.selections.first() - } - /// Computes the hash of a selection key. fn hash(&self, key: SelectionKey<'_>) -> u64 { self.hash_builder.hash_one(key) diff --git a/apollo-federation/src/operation/tests/mod.rs b/apollo-federation/src/operation/tests/mod.rs index 6988b8e659..b0329cbb3d 100644 --- a/apollo-federation/src/operation/tests/mod.rs +++ b/apollo-federation/src/operation/tests/mod.rs @@ -16,10 +16,18 @@ use crate::query_graph::graph_path::OpPathElement; use crate::schema::position::InterfaceTypeDefinitionPosition; use crate::schema::position::ObjectTypeDefinitionPosition; use crate::schema::ValidFederationSchema; -use crate::subgraph::Subgraph; mod defer; +macro_rules! assert_normalized { + ($schema_doc: expr, $query: expr, @$expected: literal) => {{ + let schema = parse_schema($schema_doc); + let without_fragments = parse_and_expand(&schema, $query).unwrap(); + insta::assert_snapshot!(without_fragments, @$expected); + without_fragments + }}; +} + pub(super) fn parse_schema_and_operation( schema_and_operation: &str, ) -> (ValidFederationSchema, ExecutableDocument) { @@ -30,12 +38,6 @@ pub(super) fn parse_schema_and_operation( (schema, executable_document) } -pub(super) fn parse_subgraph(name: &str, schema: &str) -> ValidFederationSchema { - let parsed_schema = - Subgraph::parse_and_expand(name, &format!("https://{name}"), schema).unwrap(); - ValidFederationSchema::new(parsed_schema.schema).unwrap() -} - pub(super) fn parse_schema(schema_doc: &str) -> ValidFederationSchema { let schema = Schema::parse_and_validate(schema_doc, "schema.graphql").unwrap(); ValidFederationSchema::new(schema).unwrap() @@ -65,17 +67,6 @@ pub(super) fn parse_and_expand( normalize_operation(operation, fragments, schema, &Default::default()) } -/// Parse and validate the query similarly to `parse_operation`, but does not construct the -/// `Operation` struct. -pub(super) fn validate_operation(schema: &ValidFederationSchema, query: &str) { - apollo_compiler::ExecutableDocument::parse_and_validate( - schema.schema(), - query, - "query.graphql", - ) - .unwrap(); -} - #[test] fn expands_named_fragments() { let operation_with_named_fragment = r#" @@ -1672,10 +1663,6 @@ fn directive_propagation() { ) .expect("directive applications to be valid"); insta::assert_snapshot!(query, @r###" - fragment DirectiveOnDef on T @fragDefOnly @fragAll { - a - } - { t2 { ... on T @fragInlineOnly @fragAll { @@ -1704,3 +1691,376 @@ fn directive_propagation() { .expect_err("directive @fragSpreadOnly to be rejected"); insta::assert_snapshot!(err, @"Unsupported custom directive @fragSpreadOnly on fragment spread. Due to query transformations during planning, the router requires directives on fragment spreads to support both the FRAGMENT_SPREAD and INLINE_FRAGMENT locations."); } + +#[test] +fn handles_fragment_matching_at_the_top_level_of_another_fragment() { + let schema_doc = r#" + type Query { + t: T + } + + type T { + a: String + u: U + } + + type U { + x: String + y: String + } + "#; + + let query = r#" + fragment Frag1 on T { + a + } + + fragment Frag2 on T { + u { + x + y + } + ...Frag1 + } + + fragment Frag3 on Query { + t { + ...Frag2 + } + } + + { + ...Frag3 + } + "#; + + assert_normalized!(schema_doc, query, @r###" + { + t { + u { + x + y + } + a + } + } + "###); +} + +#[test] +fn handles_fragments_used_in_context_where_they_get_trimmed() { + let schema_doc = r#" + type Query { + t1: T1 + } + + interface I { + x: Int + } + + type T1 implements I { + x: Int + y: Int + } + + type T2 implements I { + x: Int + z: Int + } + "#; + + let query = r#" + fragment FragOnI on I { + ... on T1 { + y + } + ... on T2 { + z + } + } + + { + t1 { + ...FragOnI + } + } + "#; + + assert_normalized!(schema_doc, query, @r###" + { + t1 { + y + } + } + "###); +} + +#[test] +fn handles_fragments_on_union_in_context_with_limited_intersection() { + let schema_doc = r#" + type Query { + t1: T1 + } + + union U = T1 | T2 + + type T1 { + x: Int + } + + type T2 { + y: Int + } + "#; + + let query = r#" + fragment OnU on U { + ... on T1 { + x + } + ... on T2 { + y + } + } + + { + t1 { + ...OnU + } + } + "#; + + assert_normalized!(schema_doc, query, @r###" + { + t1 { + x + } + } + "###); +} + +#[test] +fn off_by_1_error() { + let schema = r#" + type Query { + t: T + } + type T { + id: String! + a: A + v: V + } + type A { + id: String! + } + type V { + t: T! + } + "#; + + let query = r#" + { + t { + ...TFrag + v { + t { + id + a { + __typename + id + } + } + } + } + } + + fragment TFrag on T { + __typename + id + } + "#; + + assert_normalized!(schema, query,@r###" + { + t { + id + v { + t { + id + a { + id + } + } + } + } + } + "### + ); +} + +/// +/// applied directives +/// + +#[test] +fn reuse_fragments_with_same_directive_in_the_fragment_selection() { + let schema_doc = r#" + type Query { + t1: T + t2: T + t3: T + } + + type T { + a: Int + b: Int + c: Int + d: Int + } + "#; + + let query = r#" + fragment DirectiveInDef on T { + a @include(if: $cond1) + } + + query ($cond1: Boolean!, $cond2: Boolean!) { + t1 { + a + } + t2 { + ...DirectiveInDef + } + t3 { + a @include(if: $cond2) + } + } + "#; + + assert_normalized!(schema_doc, query, @r###" + query($cond1: Boolean!, $cond2: Boolean!) { + t1 { + a + } + t2 { + a @include(if: $cond1) + } + t3 { + a @include(if: $cond2) + } + } + "###); +} + +#[test] +fn reuse_fragments_with_directive_on_typename() { + let schema = r#" + type Query { + t1: T + t2: T + t3: T + } + + type T { + a: Int + b: Int + c: Int + d: Int + } + "#; + let query = r#" + query ($if: Boolean!) { + t1 { b a ...x } + t2 { ...x } + } + fragment x on T { + __typename @include(if: $if) + a + c + } + "#; + + assert_normalized!(schema, query, @r###" + query($if: Boolean!) { + t1 { + b + a + __typename @include(if: $if) + c + } + t2 { + __typename @include(if: $if) + a + c + } + } + "###); +} + +#[test] +fn reuse_fragments_with_non_intersecting_types() { + let schema = r#" + type Query { + t: T + s: S + s2: S + i: I + } + + interface I { + a: Int + b: Int + } + + type T implements I { + a: Int + b: Int + + c: Int + d: Int + } + type S implements I { + a: Int + b: Int + + f: Int + g: Int + } + "#; + let query = r#" + query ($if: Boolean!) { + t { ...x } + s { ...x } + i { ...x } + } + fragment x on I { + __typename + a + b + ... on T { c d @include(if: $if) } + } + "#; + + assert_normalized!(schema, query, @r###" + query($if: Boolean!) { + t { + a + b + c + d @include(if: $if) + } + s { + a + b + } + i { + a + b + ... on T { + c + d @include(if: $if) + } + } + } + "###); +} diff --git a/apollo-federation/src/query_graph/graph_path.rs b/apollo-federation/src/query_graph/graph_path.rs index 99fe6aa60c..df480aba81 100644 --- a/apollo-federation/src/query_graph/graph_path.rs +++ b/apollo-federation/src/query_graph/graph_path.rs @@ -252,21 +252,20 @@ pub(crate) struct SubgraphEnteringEdgeInfo { /// Wrapper for an override ID, which indicates a relationship between a group of `OpGraphPath`s /// where one "overrides" the others in the group. /// -/// Note that we shouldn't add `derive(Serialize, Deserialize)` to this without changing the types -/// to be something like UUIDs. -#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] -// NOTE(@TylerBloom): This feature gate can be removed once the condition in the comment above is -// met. -#[cfg_attr(feature = "snapshot_tracing", derive(serde::Serialize))] +/// NOTE: This ID does not ensure that IDs are unique because its internal counter resets on +/// startup. It currently implements `Serialize` for debugging purposes. It should not implement +/// `Deserialize`, and, more specfically, it should not be used for caching until uniqueness is +/// provided (i.e. the inner type is a `Uuid` or the like). +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, serde::Serialize)] pub(crate) struct OverrideId(usize); -/// Global storage for the counter used to allocate `OverrideId`s. -static NEXT_OVERRIDE_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(1); +// Global storage for the counter used to uniquely identify selections +static NEXT_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(1); impl OverrideId { fn new() -> Self { // atomically increment global counter - Self(NEXT_OVERRIDE_ID.fetch_add(1, atomic::Ordering::AcqRel)) + Self(NEXT_ID.fetch_add(1, atomic::Ordering::AcqRel)) } } diff --git a/apollo-federation/src/query_plan/conditions.rs b/apollo-federation/src/query_plan/conditions.rs index f202fd4058..9d61ab76ea 100644 --- a/apollo-federation/src/query_plan/conditions.rs +++ b/apollo-federation/src/query_plan/conditions.rs @@ -1,3 +1,4 @@ +use std::fmt::Display; use std::sync::Arc; use apollo_compiler::ast::Directive; @@ -35,17 +36,50 @@ impl ConditionKind { } } -/// This struct is meant for tracking whether a selection set in a `FetchDependencyGraphNode` needs +impl Display for ConditionKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.as_str().fmt(f) + } +} + +/// Represents a combined set of conditions. +/// +/// This struct is meant for tracking whether a selection set in a [FetchDependencyGraphNode] needs /// to be queried, based on the `@skip`/`@include` applications on the selections within. -/// Accordingly, there is much logic around merging and short-circuiting; `OperationConditional` is +/// Accordingly, there is much logic around merging and short-circuiting; [OperationConditional] is /// the more appropriate struct when trying to record the original structure/intent of those /// `@skip`/`@include` applications. +/// +/// [FetchDependencyGraphNode]: crate::query_plan::fetch_dependency_graph::FetchDependencyGraphNode +/// [OperationConditional]: crate::link::graphql_definition::OperationConditional #[derive(Debug, Clone, PartialEq, Serialize)] pub(crate) enum Conditions { Variables(VariableConditions), Boolean(bool), } +impl Display for Conditions { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + // This uses GraphQL directive syntax. + // Add brackets to distinguish it from a real directive list. + write!(f, "[")?; + + match self { + Conditions::Boolean(constant) => write!(f, "{constant:?}")?, + Conditions::Variables(variables) => { + for (index, (name, kind)) in variables.iter().enumerate() { + if index > 0 { + write!(f, " ")?; + } + write!(f, "@{kind}(if: ${name})")?; + } + } + } + + write!(f, "]") + } +} + /// A list of variable conditions, represented as a map from variable names to whether that variable /// is negated in the condition. We maintain the invariant that there's at least one condition (i.e. /// the map is non-empty), and that there's at most one condition per variable name. @@ -96,23 +130,28 @@ impl VariableConditions { } } -#[derive(Debug, Clone, PartialEq)] -pub(crate) struct VariableCondition { - variable: Name, - kind: ConditionKind, -} - impl Conditions { - /// Create conditions from a map of variable conditions. If empty, instead returns a - /// condition that always evaluates to true. + /// Create conditions from a map of variable conditions. + /// + /// If empty, instead returns a condition that always evaluates to true. fn from_variables(map: IndexMap) -> Self { if map.is_empty() { - Self::Boolean(true) + Self::always() } else { Self::Variables(VariableConditions::new_unchecked(map)) } } + /// Create conditions that always evaluate to true. + pub(crate) const fn always() -> Self { + Self::Boolean(true) + } + + /// Create conditions that always evaluate to false. + pub(crate) const fn never() -> Self { + Self::Boolean(false) + } + /// Parse @skip and @include conditions from a directive list. /// /// # Errors @@ -127,7 +166,7 @@ impl Conditions { match value.as_ref() { // Constant @skip(if: true) can never match - Value::Boolean(true) => return Ok(Self::Boolean(false)), + Value::Boolean(true) => return Ok(Self::never()), // Constant @skip(if: false) always matches Value::Boolean(_) => {} Value::Variable(name) => { @@ -146,7 +185,7 @@ impl Conditions { match value.as_ref() { // Constant @include(if: false) can never match - Value::Boolean(false) => return Ok(Self::Boolean(false)), + Value::Boolean(false) => return Ok(Self::never()), // Constant @include(if: true) always matches Value::Boolean(true) => {} // If both @skip(if: $var) and @include(if: $var) exist, the condition can also @@ -155,7 +194,7 @@ impl Conditions { if variables.insert(name.clone(), ConditionKind::Include) == Some(ConditionKind::Skip) { - return Ok(Self::Boolean(false)); + return Ok(Self::never()); } } _ => { @@ -167,10 +206,34 @@ impl Conditions { Ok(Self::from_variables(variables)) } - // TODO(@goto-bus-stop): what exactly is the difference between this and `Self::merge`? - pub(crate) fn update_with(&self, new_conditions: &Self) -> Self { - match (new_conditions, self) { - (Conditions::Boolean(_), _) | (_, Conditions::Boolean(_)) => new_conditions.clone(), + /// Returns a new set of conditions that omits those conditions that are already handled by the + /// argument. + /// + /// For example, if we have a selection set like so: + /// ```graphql + /// { + /// a @skip(if: $a) { + /// b @skip(if: $a) @include(if: $b) { + /// c + /// } + /// } + /// } + /// ``` + /// Then we may call `b.conditions().update_with( a.conditions() )`, and get: + /// ```graphql + /// { + /// a @skip(if: $a) { + /// b @include(if: $b) { + /// c + /// } + /// } + /// } + /// ``` + /// because the `@skip(if: $a)` condition in `b` must always match, as implied by + /// being nested inside `a`. + pub(crate) fn update_with(&self, handled_conditions: &Self) -> Self { + match (self, handled_conditions) { + (Conditions::Boolean(_), _) | (_, Conditions::Boolean(_)) => self.clone(), (Conditions::Variables(new_conditions), Conditions::Variables(handled_conditions)) => { let mut filtered = IndexMap::default(); for (cond_name, &cond_kind) in new_conditions.0.iter() { @@ -179,7 +242,7 @@ impl Conditions { // If we've already handled that exact condition, we can skip it. // But if we've already handled the _negation_ of this condition, then this mean the overall conditions // are unreachable and we can just return `false` directly. - return Conditions::Boolean(false); + return Conditions::never(); } Some(_) => {} None => { @@ -198,7 +261,7 @@ impl Conditions { match (self, other) { // Absorbing element (Conditions::Boolean(false), _) | (_, Conditions::Boolean(false)) => { - Conditions::Boolean(false) + Conditions::never() } // Neutral element @@ -207,7 +270,7 @@ impl Conditions { (Conditions::Variables(self_vars), Conditions::Variables(other_vars)) => { match self_vars.merge(other_vars) { Some(vars) => Conditions::Variables(vars), - None => Conditions::Boolean(false), + None => Conditions::never(), } } } @@ -366,3 +429,168 @@ fn matches_condition_for_kind( None => false, } } + +#[cfg(test)] +mod tests { + use apollo_compiler::ExecutableDocument; + use apollo_compiler::Schema; + + use super::*; + + fn parse(directives: &str) -> Conditions { + let schema = + Schema::parse_and_validate("type Query { a: String }", "schema.graphql").unwrap(); + let doc = + ExecutableDocument::parse(&schema, format!("{{ a {directives} }}"), "query.graphql") + .unwrap(); + let operation = doc.operations.get(None).unwrap(); + let directives = operation.selection_set.selections[0].directives(); + Conditions::from_directives(&DirectiveList::from(directives.clone())).unwrap() + } + + #[test] + fn merge_conditions() { + assert_eq!( + parse("@skip(if: $a)") + .merge(parse("@include(if: $b)")) + .to_string(), + "[@skip(if: $a) @include(if: $b)]", + "combine skip/include" + ); + assert_eq!( + parse("@skip(if: $a)") + .merge(parse("@skip(if: $b)")) + .to_string(), + "[@skip(if: $a) @skip(if: $b)]", + "combine multiple skips" + ); + assert_eq!( + parse("@include(if: $a)") + .merge(parse("@include(if: $b)")) + .to_string(), + "[@include(if: $a) @include(if: $b)]", + "combine multiple includes" + ); + assert_eq!( + parse("@skip(if: $a)").merge(parse("@include(if: $a)")), + Conditions::never(), + "skip/include with same variable conflicts" + ); + assert_eq!( + parse("@skip(if: $a)").merge(Conditions::always()), + parse("@skip(if: $a)"), + "merge with `true` returns original" + ); + assert_eq!( + Conditions::always().merge(Conditions::always()), + Conditions::always(), + "merge with `true` returns original" + ); + assert_eq!( + parse("@skip(if: $a)").merge(Conditions::never()), + Conditions::never(), + "merge with `false` returns `false`" + ); + assert_eq!( + parse("@include(if: $a)").merge(Conditions::never()), + Conditions::never(), + "merge with `false` returns `false`" + ); + assert_eq!( + Conditions::always().merge(Conditions::never()), + Conditions::never(), + "merge with `false` returns `false`" + ); + assert_eq!( + parse("@skip(if: true)").merge(parse("@include(if: $a)")), + Conditions::never(), + "@skip with hardcoded if: true can never evaluate to true" + ); + assert_eq!( + parse("@skip(if: false)").merge(parse("@include(if: $a)")), + parse("@include(if: $a)"), + "@skip with hardcoded if: false returns other side" + ); + assert_eq!( + parse("@include(if: true)").merge(parse("@include(if: $a)")), + parse("@include(if: $a)"), + "@include with hardcoded if: true returns other side" + ); + assert_eq!( + parse("@include(if: false)").merge(parse("@include(if: $a)")), + Conditions::never(), + "@include with hardcoded if: false can never evaluate to true" + ); + } + + #[test] + fn update_conditions() { + assert_eq!( + parse("@skip(if: $a)") + .merge(parse("@include(if: $b)")) + .update_with(&parse("@include(if: $b)")), + parse("@skip(if: $a)"), + "trim @include(if:) condition" + ); + assert_eq!( + parse("@skip(if: $a)") + .merge(parse("@include(if: $b)")) + .update_with(&parse("@skip(if: $a)")), + parse("@include(if: $b)"), + "trim @skip(if:) condition" + ); + + let list = parse("@skip(if: $a)") + .merge(parse("@skip(if: $b)")) + .merge(parse("@skip(if: $c)")) + .merge(parse("@skip(if: $d)")) + .merge(parse("@skip(if: $e)")); + let handled = parse("@skip(if: $b)").merge(parse("@skip(if: $e)")); + assert_eq!( + list.update_with(&handled), + parse("@skip(if: $a)") + .merge(parse("@skip(if: $c)")) + .merge(parse("@skip(if: $d)")), + "trim multiple conditions" + ); + + let list = parse("@include(if: $a)") + .merge(parse("@include(if: $b)")) + .merge(parse("@include(if: $c)")) + .merge(parse("@include(if: $d)")) + .merge(parse("@include(if: $e)")); + let handled = parse("@include(if: $b)").merge(parse("@include(if: $e)")); + assert_eq!( + list.update_with(&handled), + parse("@include(if: $a)") + .merge(parse("@include(if: $c)")) + .merge(parse("@include(if: $d)")), + "trim multiple conditions" + ); + + let list = parse("@include(if: $a)") + .merge(parse("@include(if: $b)")) + .merge(parse("@include(if: $c)")) + .merge(parse("@include(if: $d)")) + .merge(parse("@include(if: $e)")); + // It may technically be correct to return `never()` here? + // But the result for query planning is the same either way, as these conditions will never + // be reached. + assert_eq!( + list.update_with(&Conditions::never()), + list, + "update with constant does not affect conditions" + ); + + let list = parse("@include(if: $a)") + .merge(parse("@include(if: $b)")) + .merge(parse("@include(if: $c)")) + .merge(parse("@include(if: $d)")) + .merge(parse("@include(if: $e)")); + assert_eq!( + list.update_with(&Conditions::always()), + list, + "update with constant does not affect conditions" + ); + } +} diff --git a/apollo-federation/src/query_plan/display.rs b/apollo-federation/src/query_plan/display.rs index 48e9168684..4a3530d236 100644 --- a/apollo-federation/src/query_plan/display.rs +++ b/apollo-federation/src/query_plan/display.rs @@ -382,8 +382,8 @@ impl fmt::Display for FetchDataPathElement { } } -fn write_conditions(conditions: &[Name], f: &mut fmt::Formatter<'_>) -> fmt::Result { - if !conditions.is_empty() { +fn write_conditions(conditions: &Option>, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(conditions) = conditions { write!(f, "|[{}]", conditions.join(",")) } else { Ok(()) diff --git a/apollo-federation/src/query_plan/fetch_dependency_graph.rs b/apollo-federation/src/query_plan/fetch_dependency_graph.rs index c2d8caff50..7c92af6fde 100644 --- a/apollo-federation/src/query_plan/fetch_dependency_graph.rs +++ b/apollo-federation/src/query_plan/fetch_dependency_graph.rs @@ -32,6 +32,7 @@ use crate::bail; use crate::display_helpers::DisplayOption; use crate::error::FederationError; use crate::error::SingleFederationError; +use crate::internal_error; use crate::link::graphql_definition::DeferDirectiveArguments; use crate::operation::ArgumentList; use crate::operation::ContainmentOptions; @@ -214,9 +215,10 @@ impl FetchIdGenerator { pub(crate) struct FetchSelectionSet { /// The selection set to be fetched from the subgraph. pub(crate) selection_set: Arc, - /// The conditions determining whether the fetch should be executed (which must be recomputed - /// from the selection set when it changes). - pub(crate) conditions: Conditions, + /// The conditions determining whether the fetch should be executed, derived from the selection + /// set. + #[serde(skip)] + conditions: OnceLock, } // PORT_NOTE: The JS codebase additionally has a property `onUpdateCallback`. This was only ever @@ -507,15 +509,14 @@ impl FetchDependencyGraphNodePath { type_conditioned_fetching_enabled: bool, root_type: CompositeTypeDefinitionPosition, ) -> Result { - let root_possible_types = if type_conditioned_fetching_enabled { + let root_possible_types: IndexSet = if type_conditioned_fetching_enabled { schema.possible_runtime_types(root_type)? } else { Default::default() } .into_iter() - .map(|pos| Ok(pos.get(schema.schema())?.name.clone())) - .collect::, _>>() - .map_err(|e: PositionLookupError| FederationError::from(e))?; + .map(|pos| Ok::<_, PositionLookupError>(pos.get(schema.schema())?.name.clone())) + .process_results(|c| c.sorted().collect())?; Ok(Self { schema, @@ -575,12 +576,13 @@ impl FetchDependencyGraphNodePath { None => self.possible_types.clone(), Some(tcp) => { let element_possible_types = self.schema.possible_runtime_types(tcp.clone())?; - element_possible_types + self.possible_types .iter() .filter(|&possible_type| { - self.possible_types.contains(&possible_type.type_name) + element_possible_types + .contains(&ObjectTypeDefinitionPosition::new(possible_type.clone())) }) - .map(|possible_type| possible_type.type_name.clone()) + .cloned() .collect() } }, @@ -590,15 +592,11 @@ impl FetchDependencyGraphNodePath { } fn advance_field_type(&self, element: &Field) -> Result, FederationError> { - if !element - .output_base_type() - .map(|base_type| base_type.is_composite_type()) - .unwrap_or_default() - { + if !element.output_base_type()?.is_composite_type() { return Ok(Default::default()); } - let mut res = self + let mut res: IndexSet = self .possible_types .clone() .into_iter() @@ -614,17 +612,13 @@ impl FetchDependencyGraphNodePath { .schema .possible_runtime_types(typ)? .into_iter() - .map(|ctdp| ctdp.type_name) - .collect::>()) + .map(|ctdp| ctdp.type_name)) }) - .collect::>, FederationError>>()? - .into_iter() - .flatten() - .collect::>(); + .process_results::<_, _, FederationError, _>(|c| c.flatten().collect())?; res.sort(); - Ok(res.into_iter().collect()) + Ok(res) } fn updated_response_path( @@ -647,17 +641,22 @@ impl FetchDependencyGraphNodePath { match new_path.pop() { Some(FetchDataPathElement::AnyIndex(_)) => { - new_path.push(FetchDataPathElement::AnyIndex( + new_path.push(FetchDataPathElement::AnyIndex(Some( conditions.iter().cloned().collect(), - )); + ))); } Some(FetchDataPathElement::Key(name, _)) => { new_path.push(FetchDataPathElement::Key( name, - conditions.iter().cloned().collect(), + Some(conditions.iter().cloned().collect()), )); } Some(other) => new_path.push(other), + // TODO: We should be emitting type conditions here on no element like the + // JS code, which requires a new FetchDataPathElement variant in Rust. + // This really has to do with a hack we did to avoid changing fetch + // data paths too much, in which type conditions ought to be their own + // variant entirely. None => {} } } @@ -1708,7 +1707,7 @@ impl FetchDependencyGraph { children.push(child_index); } else { let Some(child_defer_ref) = &child.defer_ref else { - panic!( + bail!( "{} has defer_ref `{}`, so its child {} cannot have a top-level defer_ref.", node.display(node_index), DisplayOption(node.defer_ref.as_ref()), @@ -1770,7 +1769,10 @@ impl FetchDependencyGraph { .graph .node_weight_mut(node_index) .ok_or_else(|| FederationError::internal("Node unexpectedly missing"))?; - let conditions = handled_conditions.update_with(&node.selection_set.conditions); + let conditions = node + .selection_set + .conditions()? + .update_with(&handled_conditions); let new_handled_conditions = conditions.clone().merge(handled_conditions); let processed = processor.on_node( @@ -2660,15 +2662,13 @@ impl FetchDependencyGraphNode { &operation_name, )? }; - let operation = - operation_compression.compress(&self.subgraph_name, subgraph_schema, operation)?; + let operation = operation_compression.compress(operation)?; let operation_document = operation.try_into().map_err(|err| match err { - FederationError::SingleFederationError { - inner: SingleFederationError::InvalidGraphQL { diagnostics }, - .. - } => FederationError::internal(format!( + FederationError::SingleFederationError(SingleFederationError::InvalidGraphQL { + diagnostics, + }) => internal_error!( "Query planning produced an invalid subgraph operation.\n{diagnostics}" - )), + ), _ => err, })?; @@ -3169,9 +3169,8 @@ impl FetchSelectionSet { type_position: CompositeTypeDefinitionPosition, ) -> Result { let selection_set = Arc::new(SelectionSet::empty(schema, type_position)); - let conditions = selection_set.conditions()?; Ok(Self { - conditions, + conditions: OnceLock::new(), selection_set, }) } @@ -3182,19 +3181,35 @@ impl FetchSelectionSet { selection_set: Option<&Arc>, ) -> Result<(), FederationError> { Arc::make_mut(&mut self.selection_set).add_at_path(path_in_node, selection_set)?; - // TODO: when calling this multiple times, maybe only re-compute conditions at the end? - // Or make it lazily-initialized and computed on demand? - self.conditions = self.selection_set.conditions()?; + self.conditions.take(); Ok(()) } fn add_selections(&mut self, selection_set: &Arc) -> Result<(), FederationError> { Arc::make_mut(&mut self.selection_set).add_selection_set(selection_set)?; - // TODO: when calling this multiple times, maybe only re-compute conditions at the end? - // Or make it lazily-initialized and computed on demand? - self.conditions = self.selection_set.conditions()?; + self.conditions.take(); Ok(()) } + + /// The conditions determining whether the fetch should be executed. + fn conditions(&self) -> Result<&Conditions, FederationError> { + // This is a bit inefficient, because `get_or_try_init` is unstable. + // https://github.com/rust-lang/rust/issues/109737 + // + // Essentially we do `.get()` twice. This is still much better than eagerly recomputing the + // selection set all the time, though :) + if let Some(conditions) = self.conditions.get() { + return Ok(conditions); + } + + // Separating this call and the `.get_or_init` call means we could, if called from multiple + // threads, do the same work twice. + // The query planner does not use multiple threads for a single plan at the moment, and + // even if it did, occasionally computing this twice would still be better than eagerly + // recomputing it after every change. + let conditions = self.selection_set.conditions()?; + Ok(self.conditions.get_or_init(|| conditions)) + } } impl FetchInputs { @@ -3374,7 +3389,7 @@ impl DeferTracking { if let Some(parent_ref) = &defer_context.current_defer_ref { let Some(parent_info) = self.deferred.get_mut(parent_ref) else { - panic!("Cannot find info for parent {parent_ref} or {label}"); + bail!("Cannot find info for parent {parent_ref} or {label}") }; parent_info.deferred.insert(label.clone()); @@ -5267,11 +5282,10 @@ mod tests { ) } - fn cond_to_string(conditions: &[Name]) -> String { - if conditions.is_empty() { - return Default::default(); + fn cond_to_string(conditions: &Option>) -> String { + if let Some(conditions) = conditions { + return format!("|[{}]", conditions.iter().map(|n| n.to_string()).join(",")); } - - format!("|[{}]", conditions.iter().map(|n| n.to_string()).join(",")) + Default::default() } } diff --git a/apollo-federation/src/query_plan/mod.rs b/apollo-federation/src/query_plan/mod.rs index 313b9f6322..60e0b396a6 100644 --- a/apollo-federation/src/query_plan/mod.rs +++ b/apollo-federation/src/query_plan/mod.rs @@ -18,6 +18,16 @@ pub(crate) mod query_planning_traversal; pub type QueryPlanCost = f64; +// NOTE: This type implements `Serialize` for debugging purposes; however, it should not implement +// `Deserialize` until two requires are met. +// 1) `SelectionId`s and `OverrideId`s are only unique per lifetime of the application. To avoid +// problems when caching, this needs to be changes. +// 2) There are several types transatively used in the query plan that are from `apollo-compiler`. +// They are serialized as strings and use the `serialize` methods provided by that crate. In +// order to implement `Deserialize`, care needs to be taken to deserialize these correctly. +// Moreover, how we serialize these types should also be revisited to make sure we can and want +// to support how they are serialized long term (e.g. how `DirectiveList` is serialized can be +// optimized). #[derive(Debug, Default, PartialEq, Serialize)] pub struct QueryPlan { pub node: Option, @@ -68,15 +78,17 @@ pub struct FetchNode { /// `FragmentSpread`. // PORT_NOTE: This was its own type in the JS codebase, but it's likely simpler to just have the // constraint be implicit for router instead of creating a new type. - #[serde(serialize_with = "crate::display_helpers::serialize_optional_vec_as_string")] + #[serde( + serialize_with = "crate::utils::serde_bridge::serialize_optional_vec_of_exe_selection" + )] pub requires: Option>, // PORT_NOTE: We don't serialize the "operation" string in this struct, as these query plan // nodes are meant for direct consumption by router (without any serdes), so we leave the // question of whether it needs to be serialized to router. - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] + #[serde(serialize_with = "crate::utils::serde_bridge::serialize_valid_executable_document")] pub operation_document: Valid, pub operation_name: Option, - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] + #[serde(serialize_with = "crate::utils::serde_bridge::serialize_exe_operation_type")] pub operation_kind: executable::OperationType, /// Optionally describe a number of "rewrites" that query plan executors should apply to the /// data that is sent as the input of this fetch. Note that such rewrites should only impact the @@ -166,7 +178,7 @@ pub struct DeferredDeferBlock { pub query_path: Vec, /// The part of the original query that "selects" the data to send in the deferred response /// (once the plan in `node` completes). Will be set _unless_ `node` is a `DeferNode` itself. - #[serde(serialize_with = "crate::display_helpers::serialize_as_debug_string")] + #[serde(serialize_with = "crate::utils::serde_bridge::serialize_optional_exe_selection_set")] pub sub_selection: Option, /// The plan to get all the data for this deferred block. Usually set, but can be `None` for a /// `@defer` application where everything has been fetched in the "primary block" (i.e. when @@ -237,8 +249,8 @@ pub struct FetchDataKeyRenamer { /// elements. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] pub enum FetchDataPathElement { - Key(Name, Conditions), - AnyIndex(Conditions), + Key(Name, Option), + AnyIndex(Option), TypenameEquals(Name), Parent, } @@ -249,17 +261,8 @@ pub type Conditions = Vec; /// an inline fragment in a query. #[derive(Debug, Clone, PartialEq, serde::Serialize)] pub enum QueryPathElement { - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] + #[serde(serialize_with = "crate::utils::serde_bridge::serialize_exe_field")] Field(executable::Field), - #[serde(serialize_with = "crate::display_helpers::serialize_as_string")] + #[serde(serialize_with = "crate::utils::serde_bridge::serialize_exe_inline_fragment")] InlineFragment(executable::InlineFragment), } - -impl QueryPlan { - fn new(node: impl Into, statistics: QueryPlanningStatistics) -> Self { - Self { - node: Some(node.into()), - statistics, - } - } -} diff --git a/apollo-federation/src/query_plan/query_planner.rs b/apollo-federation/src/query_plan/query_planner.rs index ed2a4b2589..6398c7e738 100644 --- a/apollo-federation/src/query_plan/query_planner.rs +++ b/apollo-federation/src/query_plan/query_planner.rs @@ -14,6 +14,7 @@ use tracing::trace; use super::fetch_dependency_graph::FetchIdGenerator; use super::ConditionNode; +use crate::bail; use crate::error::FederationError; use crate::error::SingleFederationError; use crate::operation::normalize_operation; @@ -31,10 +32,10 @@ use crate::query_plan::fetch_dependency_graph::FetchDependencyGraphNodePath; use crate::query_plan::fetch_dependency_graph_processor::FetchDependencyGraphProcessor; use crate::query_plan::fetch_dependency_graph_processor::FetchDependencyGraphToCostProcessor; use crate::query_plan::fetch_dependency_graph_processor::FetchDependencyGraphToQueryPlanProcessor; +use crate::query_plan::query_planning_traversal::convert_type_from_subgraph; use crate::query_plan::query_planning_traversal::BestQueryPlanInfo; use crate::query_plan::query_planning_traversal::QueryPlanningParameters; use crate::query_plan::query_planning_traversal::QueryPlanningTraversal; -use crate::query_plan::FetchNode; use crate::query_plan::PlanNode; use crate::query_plan::QueryPlan; use crate::query_plan::SequenceNode; @@ -53,16 +54,6 @@ use crate::Supergraph; #[derive(Debug, Clone, Hash, Serialize)] pub struct QueryPlannerConfig { - /// Whether the query planner should try to reuse the named fragments of the planned query in - /// subgraph fetches. - /// - /// Reusing fragments requires complicated validations, so it can take a long time on large - /// queries with many fragments. This option may be removed in the future in favour of - /// [`generate_query_fragments`][QueryPlannerConfig::generate_query_fragments]. - /// - /// Defaults to false. - pub reuse_query_fragments: bool, - /// If enabled, the query planner will extract inline fragments into fragment /// definitions before sending queries to subgraphs. This can significantly /// reduce the size of the query sent to subgraphs. @@ -104,7 +95,6 @@ pub struct QueryPlannerConfig { impl Default for QueryPlannerConfig { fn default() -> Self { Self { - reuse_query_fragments: false, generate_query_fragments: false, subgraph_graphql_validation: false, incremental_delivery: Default::default(), @@ -131,11 +121,6 @@ pub struct QueryPlanIncrementalDeliveryConfig { #[derive(Debug, Clone, Hash, Serialize)] pub struct QueryPlannerDebugConfig { - /// If used and the supergraph is built from a single subgraph, then user queries do not go - /// through the normal query planning and instead a fetch to the one subgraph is built directly - /// from the input query. - pub bypass_planner_for_single_subgraph: bool, - /// Query planning is an exploratory process. Depending on the specificities and feature used by /// subgraphs, there could exist may different theoretical valid (if not always efficient) plans /// for a given query, and at a high level, the query planner generates those possible choices, @@ -173,7 +158,6 @@ pub struct QueryPlannerDebugConfig { impl Default for QueryPlannerDebugConfig { fn default() -> Self { Self { - bypass_planner_for_single_subgraph: false, max_evaluated_plans: NonZeroU32::new(10_000).unwrap(), paths_limit: None, } @@ -186,15 +170,6 @@ pub struct QueryPlanningStatistics { pub evaluated_plan_count: Cell, } -impl QueryPlannerConfig { - /// Panics if options are used together in unsupported ways. - fn assert_valid(&self) { - if self.incremental_delivery.enable_defer { - assert!(!self.debug.bypass_planner_for_single_subgraph, "Cannot use the `debug.bypass_planner_for_single_subgraph` query planner option when @defer support is enabled"); - } - } -} - #[derive(Debug, Default, Clone)] pub struct QueryPlanOptions { /// A set of labels which will be used _during query planning_ to @@ -241,8 +216,6 @@ impl QueryPlanner { supergraph: &Supergraph, config: QueryPlannerConfig, ) -> Result { - config.assert_valid(); - let supergraph_schema = supergraph.schema.clone(); let api_schema = supergraph.to_api_schema(ApiSchemaOptions { include_defer: config.incremental_delivery.enable_defer, @@ -353,46 +326,23 @@ impl QueryPlanner { ) -> Result { let operation = document .operations - .get(operation_name.as_ref().map(|name| name.as_str()))?; - + .get(operation_name.as_ref().map(|name| name.as_str())) + .map_err(|_| { + if operation_name.is_some() { + SingleFederationError::UnknownOperation + } else { + SingleFederationError::OperationNameNotProvided + } + })?; if operation.selection_set.is_empty() { // This should never happen because `operation` comes from a known-valid document. - // We could panic here but we are returning a `Result` already anyways, so shrug! - return Err(FederationError::internal( - "Invalid operation: empty selection set", - )); + crate::bail!("Invalid operation: empty selection set") } let is_subscription = operation.is_subscription(); let statistics = QueryPlanningStatistics::default(); - if self.config.debug.bypass_planner_for_single_subgraph { - let mut subgraphs = self.federated_query_graph.subgraphs(); - if let (Some((subgraph_name, _subgraph_schema)), None) = - (subgraphs.next(), subgraphs.next()) - { - let node = FetchNode { - subgraph_name: subgraph_name.clone(), - operation_document: document.clone(), - operation_name: operation.name.clone(), - operation_kind: operation.operation_type, - id: None, - variable_usages: operation - .variables - .iter() - .map(|var| var.name.clone()) - .collect(), - requires: Default::default(), - input_rewrites: Default::default(), - output_rewrites: Default::default(), - context_rewrites: Default::default(), - }; - - return Ok(QueryPlan::new(node, statistics)); - } - } - let normalized_operation = normalize_operation( operation, NamedFragments::new(&document.fragments, &self.api_schema), @@ -443,24 +393,14 @@ impl QueryPlanner { .root_kinds_to_nodes()? .get(&normalized_operation.root_kind) else { - panic!( + bail!( "Shouldn't have a {0} operation if the subgraphs don't have a {0} root", normalized_operation.root_kind - ); + ) }; let operation_compression = if self.config.generate_query_fragments { SubgraphOperationCompression::GenerateFragments - } else if self.config.reuse_query_fragments { - // For all subgraph fetches we query `__typename` on every abstract types (see - // `FetchDependencyGraphNode::to_plan_node`) so if we want to have a chance to reuse - // fragments, we should make sure those fragments also query `__typename` for every - // abstract type. - SubgraphOperationCompression::ReuseFragments(RebasedFragments::new( - normalized_operation - .named_fragments - .add_typename_field_for_abstract_types_in_named_fragments()?, - )) } else { SubgraphOperationCompression::Disabled }; @@ -624,6 +564,7 @@ fn compute_root_serial_dependency_graph( ); compute_root_fetch_groups( operation.root_kind, + federated_query_graph, &mut fetch_dependency_graph, &prev_path, parameters.config.type_conditioned_fetching, @@ -661,6 +602,7 @@ fn only_root_subgraph(graph: &FetchDependencyGraph) -> Result, Federati )] pub(crate) fn compute_root_fetch_groups( root_kind: SchemaRootDefinitionKind, + federated_query_graph: &QueryGraph, dependency_graph: &mut FetchDependencyGraph, path: &OpPathTree, type_conditioned_fetching_enabled: bool, @@ -696,6 +638,12 @@ pub(crate) fn compute_root_fetch_groups( dependency_graph.to_dot(), "tree_with_root_node" ); + let subgraph_schema = federated_query_graph.schema_by_source(subgraph_name)?; + let supergraph_root_type = convert_type_from_subgraph( + root_type, + subgraph_schema, + &dependency_graph.supergraph_schema, + )?; compute_nodes_for_tree( dependency_graph, &child.tree, @@ -703,7 +651,7 @@ pub(crate) fn compute_root_fetch_groups( FetchDependencyGraphNodePath::new( dependency_graph.supergraph_schema.clone(), type_conditioned_fetching_enabled, - root_type, + supergraph_root_type, )?, Default::default(), &Default::default(), @@ -844,57 +792,15 @@ fn generate_condition_nodes<'a>( } } -/// Tracks fragments from the original operation, along with versions rebased on other subgraphs. -pub(crate) struct RebasedFragments { - original_fragments: NamedFragments, - /// Map key: subgraph name - rebased_fragments: IndexMap, NamedFragments>, -} - -impl RebasedFragments { - fn new(fragments: NamedFragments) -> Self { - Self { - original_fragments: fragments, - rebased_fragments: Default::default(), - } - } - - fn for_subgraph( - &mut self, - subgraph_name: impl Into>, - subgraph_schema: &ValidFederationSchema, - ) -> &NamedFragments { - self.rebased_fragments - .entry(subgraph_name.into()) - .or_insert_with(|| { - self.original_fragments - .rebase_on(subgraph_schema) - .unwrap_or_default() - }) - } -} - pub(crate) enum SubgraphOperationCompression { - ReuseFragments(RebasedFragments), GenerateFragments, Disabled, } impl SubgraphOperationCompression { /// Compress a subgraph operation. - pub(crate) fn compress( - &mut self, - subgraph_name: &Arc, - subgraph_schema: &ValidFederationSchema, - operation: Operation, - ) -> Result { + pub(crate) fn compress(&mut self, operation: Operation) -> Result { match self { - Self::ReuseFragments(fragments) => { - let rebased = fragments.for_subgraph(Arc::clone(subgraph_name), subgraph_schema); - let mut operation = operation; - operation.reuse_fragments(rebased)?; - Ok(operation) - } Self::GenerateFragments => { let mut operation = operation; operation.generate_fragments()?; @@ -1263,68 +1169,7 @@ type User } #[test] - fn bypass_planner_for_single_subgraph() { - let a = Subgraph::parse_and_expand( - "A", - "https://A", - r#" - type Query { - a: A - } - type A { - b: B - } - type B { - x: Int - y: String - } - "#, - ) - .unwrap(); - let subgraphs = vec![&a]; - let supergraph = Supergraph::compose(subgraphs).unwrap(); - let api_schema = supergraph.to_api_schema(Default::default()).unwrap(); - - let document = ExecutableDocument::parse_and_validate( - api_schema.schema(), - r#" - { - a { - b { - x - y - } - } - } - "#, - "", - ) - .unwrap(); - - let mut config = QueryPlannerConfig::default(); - config.debug.bypass_planner_for_single_subgraph = true; - let planner = QueryPlanner::new(&supergraph, config).unwrap(); - let plan = planner - .build_query_plan(&document, None, Default::default()) - .unwrap(); - insta::assert_snapshot!(plan, @r###" - QueryPlan { - Fetch(service: "A") { - { - a { - b { - x - y - } - } - } - }, - } - "###); - } - - #[test] - fn test_optimize_basic() { + fn test_optimize_no_fragments_generated() { let supergraph = Supergraph::new(TEST_SUPERGRAPH).unwrap(); let api_schema = supergraph.to_api_schema(Default::default()).unwrap(); let document = ExecutableDocument::parse_and_validate( @@ -1350,7 +1195,7 @@ type User .unwrap(); let config = QueryPlannerConfig { - reuse_query_fragments: true, + generate_query_fragments: true, ..Default::default() }; let planner = QueryPlanner::new(&supergraph, config).unwrap(); @@ -1362,150 +1207,15 @@ type User Fetch(service: "accounts") { { userById(id: 1) { - ...userFields id - } - another_user: userById(id: 2) { - ...userFields - } - } - - fragment userFields on User { - name - email - } - }, - } - "###); - } - - #[test] - fn test_optimize_inline_fragment() { - let supergraph = Supergraph::new(TEST_SUPERGRAPH).unwrap(); - let api_schema = supergraph.to_api_schema(Default::default()).unwrap(); - let document = ExecutableDocument::parse_and_validate( - api_schema.schema(), - r#" - { - userById(id: 1) { - id - ...userFields - }, - partial_optimize: userById(id: 2) { - ... on User { - id - name - email - } - }, - full_optimize: userById(id: 3) { - ... on User { - name - email - } - } - } - fragment userFields on User { name email - } - "#, - "operation.graphql", - ) - .unwrap(); - - let config = QueryPlannerConfig { - reuse_query_fragments: true, - ..Default::default() - }; - let planner = QueryPlanner::new(&supergraph, config).unwrap(); - let plan = planner - .build_query_plan(&document, None, Default::default()) - .unwrap(); - insta::assert_snapshot!(plan, @r###" - QueryPlan { - Fetch(service: "accounts") { - { - userById(id: 1) { - ...userFields - id - } - partial_optimize: userById(id: 2) { - ...userFields - id - } - full_optimize: userById(id: 3) { - ...userFields } - } - - fragment userFields on User { - name - email - } - }, - } - "###); - } - - #[test] - fn test_optimize_fragment_definition() { - let supergraph = Supergraph::new(TEST_SUPERGRAPH).unwrap(); - let api_schema = supergraph.to_api_schema(Default::default()).unwrap(); - let document = ExecutableDocument::parse_and_validate( - api_schema.schema(), - r#" - { - userById(id: 1) { - ...F1 - ...F2 - }, - case2: userById(id: 2) { - id - name - email - }, - } - fragment F1 on User { - name - email - } - fragment F2 on User { - id + another_user: userById(id: 2) { name email - } - "#, - "operation.graphql", - ) - .unwrap(); - - let config = QueryPlannerConfig { - reuse_query_fragments: true, - ..Default::default() - }; - let planner = QueryPlanner::new(&supergraph, config).unwrap(); - let plan = planner - .build_query_plan(&document, None, Default::default()) - .unwrap(); - // Make sure `fragment F2` contains `...F1`. - insta::assert_snapshot!(plan, @r###" - QueryPlan { - Fetch(service: "accounts") { - { - userById(id: 1) { - ...F2 - } - case2: userById(id: 2) { - ...F2 } } - - fragment F2 on User { - name - email - id - } }, } "###); diff --git a/apollo-federation/src/query_plan/query_planning_traversal.rs b/apollo-federation/src/query_plan/query_planning_traversal.rs index 21585e1b73..1739fc21cc 100644 --- a/apollo-federation/src/query_plan/query_planning_traversal.rs +++ b/apollo-federation/src/query_plan/query_planning_traversal.rs @@ -7,6 +7,7 @@ use serde::Serialize; use tracing::trace; use super::fetch_dependency_graph::FetchIdGenerator; +use crate::ensure; use crate::error::FederationError; use crate::operation::Operation; use crate::operation::Selection; @@ -187,6 +188,29 @@ impl BestQueryPlanInfo { } } +pub(crate) fn convert_type_from_subgraph( + ty: CompositeTypeDefinitionPosition, + subgraph_schema: &ValidFederationSchema, + supergraph_schema: &ValidFederationSchema, +) -> Result { + if subgraph_schema.is_interface_object_type(ty.clone().into())? { + let type_in_supergraph_pos: CompositeTypeDefinitionPosition = supergraph_schema + .get_type(ty.type_name().clone())? + .try_into()?; + ensure!( + matches!( + type_in_supergraph_pos, + CompositeTypeDefinitionPosition::Interface(_) + ), + "Type {} should be an interface in the supergraph", + ty.type_name() + ); + Ok(type_in_supergraph_pos) + } else { + Ok(ty) + } +} + impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { #[cfg_attr( feature = "snapshot_tracing", @@ -1004,6 +1028,7 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { if is_root_path_tree { compute_root_fetch_groups( self.root_kind, + &self.parameters.federated_query_graph, dependency_graph, path_tree, type_conditioned_fetching_enabled, @@ -1024,6 +1049,15 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { self.root_kind, root_type.clone(), )?; + let subgraph_schema = self + .parameters + .federated_query_graph + .schema_by_source(&query_graph_node.source)?; + let supergraph_root_type = convert_type_from_subgraph( + root_type, + subgraph_schema, + &dependency_graph.supergraph_schema, + )?; compute_nodes_for_tree( dependency_graph, path_tree, @@ -1031,7 +1065,7 @@ impl<'a: 'b, 'b> QueryPlanningTraversal<'a, 'b> { FetchDependencyGraphNodePath::new( dependency_graph.supergraph_schema.clone(), self.parameters.config.type_conditioned_fetching, - root_type, + supergraph_root_type, )?, Default::default(), &Default::default(), diff --git a/apollo-federation/src/sources/connect/json_selection/apply_to.rs b/apollo-federation/src/sources/connect/json_selection/apply_to.rs index 08cb5d0fcb..5c76c91414 100644 --- a/apollo-federation/src/sources/connect/json_selection/apply_to.rs +++ b/apollo-federation/src/sources/connect/json_selection/apply_to.rs @@ -92,6 +92,8 @@ impl ApplyToError { })) } + // This macro is useful for tests, but it absolutely should never be used with + // dynamic input at runtime, since it panics for any input that's not JSON. #[cfg(test)] fn from_json(json: &JSON) -> Self { if let JSON::Object(error) = json { diff --git a/apollo-federation/src/subgraph/spec.rs b/apollo-federation/src/subgraph/spec.rs index aa37af70e0..24a96ba2b4 100644 --- a/apollo-federation/src/subgraph/spec.rs +++ b/apollo-federation/src/subgraph/spec.rs @@ -85,6 +85,8 @@ pub const FEDERATION_V2_DIRECTIVE_NAMES: [Name; 13] = [ TAG_DIRECTIVE_NAME, ]; +pub(crate) const FEDERATION_V2_ELEMENT_NAMES: [Name; 1] = [FIELDSET_SCALAR_NAME]; + // This type and the subsequent IndexMap exist purely so we can use match with Names; see comment // in FederationSpecDefinitions.directive_definition() for more information. enum FederationDirectiveName { diff --git a/apollo-federation/src/supergraph/mod.rs b/apollo-federation/src/supergraph/mod.rs index a5547c27e6..d6d05a5d68 100644 --- a/apollo-federation/src/supergraph/mod.rs +++ b/apollo-federation/src/supergraph/mod.rs @@ -38,7 +38,6 @@ use itertools::Itertools; use lazy_static::lazy_static; use time::OffsetDateTime; -use self::schema::get_apollo_directive_names; pub(crate) use self::schema::new_empty_fed_2_subgraph_schema; use self::subgraph::FederationSubgraph; use self::subgraph::FederationSubgraphs; @@ -265,8 +264,6 @@ fn extract_subgraphs_from_fed_2_supergraph( context_spec_definition: Option<&'static ContextSpecDefinition>, filtered_types: &Vec, ) -> Result<(), FederationError> { - let original_directive_names = get_apollo_directive_names(supergraph_schema)?; - let TypeInfos { object_types, interface_types, @@ -281,7 +278,6 @@ fn extract_subgraphs_from_fed_2_supergraph( join_spec_definition, context_spec_definition, filtered_types, - &original_directive_names, )?; extract_object_type_content( @@ -291,7 +287,6 @@ fn extract_subgraphs_from_fed_2_supergraph( federation_spec_definitions, join_spec_definition, &object_types, - &original_directive_names, )?; extract_interface_type_content( supergraph_schema, @@ -300,7 +295,6 @@ fn extract_subgraphs_from_fed_2_supergraph( federation_spec_definitions, join_spec_definition, &interface_types, - &original_directive_names, )?; extract_union_type_content( supergraph_schema, @@ -313,19 +307,15 @@ fn extract_subgraphs_from_fed_2_supergraph( supergraph_schema, subgraphs, graph_enum_value_name_to_subgraph_name, - federation_spec_definitions, join_spec_definition, &enum_types, - &original_directive_names, )?; extract_input_object_type_content( supergraph_schema, subgraphs, graph_enum_value_name_to_subgraph_name, - federation_spec_definitions, join_spec_definition, &input_object_types, - &original_directive_names, )?; extract_join_directives( @@ -404,7 +394,6 @@ fn add_all_empty_subgraph_types( join_spec_definition: &'static JoinSpecDefinition, context_spec_definition: Option<&'static ContextSpecDefinition>, filtered_types: &Vec, - original_directive_names: &IndexMap, ) -> Result { let type_directive_definition = join_spec_definition.type_directive_definition(supergraph_schema)?; @@ -434,12 +423,6 @@ fn add_all_empty_subgraph_types( graph_enum_value_name_to_subgraph_name, &type_directive_application.graph, )?; - let federation_spec_definition = federation_spec_definitions - .get(&type_directive_application.graph) - .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { - message: "Subgraph unexpectedly does not use federation spec" - .to_owned(), - })?; pos.pre_insert(&mut subgraph.schema)?; pos.insert( @@ -451,16 +434,11 @@ fn add_all_empty_subgraph_types( }), )?; - if let Some(cost_spec_definition) = - federation_spec_definition.get_cost_spec_definition(&subgraph.schema) - { - cost_spec_definition.propagate_demand_control_directives_for_scalar( - &mut subgraph.schema, - pos.get(supergraph_schema.schema())?, - pos, - original_directive_names, - )?; - } + CostSpecDefinition::propagate_demand_control_directives_for_scalar( + supergraph_schema, + &mut subgraph.schema, + pos, + )?; } None } @@ -740,7 +718,6 @@ fn extract_object_type_content( federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], - original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -796,21 +773,12 @@ fn extract_object_type_content( graph_enum_value_name_to_subgraph_name, graph_enum_value, )?; - let federation_spec_definition = federation_spec_definitions - .get(graph_enum_value) - .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { - message: "Subgraph unexpectedly does not use federation spec".to_owned(), - })?; - if let Some(cost_spec_definition) = - federation_spec_definition.get_cost_spec_definition(&subgraph.schema) - { - cost_spec_definition.propagate_demand_control_directives_for_object( - &mut subgraph.schema, - type_, - &pos, - original_directive_names, - )?; - } + + CostSpecDefinition::propagate_demand_control_directives_for_object( + supergraph_schema, + &mut subgraph.schema, + &pos, + )?; } for (field_name, field) in type_.fields.iter() { @@ -836,17 +804,14 @@ fn extract_object_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; - let cost_spec_definition = - federation_spec_definition.get_cost_spec_definition(&subgraph.schema); add_subgraph_field( field_pos.clone().into(), field, + supergraph_schema, subgraph, federation_spec_definition, is_shareable, None, - cost_spec_definition, - original_directive_names, )?; } } else { @@ -877,8 +842,6 @@ fn extract_object_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; - let cost_spec_definition = - federation_spec_definition.get_cost_spec_definition(&subgraph.schema); if !subgraph_info.contains_key(graph_enum_value) { return Err( SingleFederationError::InvalidFederationSupergraph { @@ -894,12 +857,11 @@ fn extract_object_type_content( add_subgraph_field( field_pos.clone().into(), field, + supergraph_schema, subgraph, federation_spec_definition, is_shareable, Some(field_directive_application), - cost_spec_definition, - original_directive_names, )?; } } @@ -916,7 +878,6 @@ fn extract_interface_type_content( federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], - original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -1039,17 +1000,14 @@ fn extract_interface_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; - let cost_spec_definition = - federation_spec_definition.get_cost_spec_definition(&subgraph.schema); add_subgraph_field( pos.field(field_name.clone()), field, + supergraph_schema, subgraph, federation_spec_definition, false, None, - cost_spec_definition, - original_directive_names, )?; } } else { @@ -1073,8 +1031,6 @@ fn extract_interface_type_content( message: "Subgraph unexpectedly does not use federation spec" .to_owned(), })?; - let cost_spec_definition = - federation_spec_definition.get_cost_spec_definition(&subgraph.schema); if !subgraph_info.contains_key(graph_enum_value) { return Err( SingleFederationError::InvalidFederationSupergraph { @@ -1090,12 +1046,11 @@ fn extract_interface_type_content( add_subgraph_field( pos.field(field_name.clone()), field, + supergraph_schema, subgraph, federation_spec_definition, false, Some(field_directive_application), - cost_spec_definition, - original_directive_names, )?; } } @@ -1201,10 +1156,8 @@ fn extract_enum_type_content( supergraph_schema: &FederationSchema, subgraphs: &mut FederationSubgraphs, graph_enum_value_name_to_subgraph_name: &IndexMap>, - federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], - original_directive_names: &IndexMap, ) -> Result<(), FederationError> { // This was added in join 0.3, so it can genuinely be None. let enum_value_directive_definition = @@ -1226,21 +1179,12 @@ fn extract_enum_type_content( graph_enum_value_name_to_subgraph_name, graph_enum_value, )?; - let federation_spec_definition = federation_spec_definitions - .get(graph_enum_value) - .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { - message: "Subgraph unexpectedly does not use federation spec".to_owned(), - })?; - if let Some(cost_spec_definition) = - federation_spec_definition.get_cost_spec_definition(&subgraph.schema) - { - cost_spec_definition.propagate_demand_control_directives_for_enum( - &mut subgraph.schema, - type_, - &pos, - original_directive_names, - )?; - } + + CostSpecDefinition::propagate_demand_control_directives_for_enum( + supergraph_schema, + &mut subgraph.schema, + &pos, + )?; } for (value_name, value) in type_.values.iter() { @@ -1310,10 +1254,8 @@ fn extract_input_object_type_content( supergraph_schema: &FederationSchema, subgraphs: &mut FederationSubgraphs, graph_enum_value_name_to_subgraph_name: &IndexMap>, - federation_spec_definitions: &IndexMap, join_spec_definition: &JoinSpecDefinition, info: &[TypeInfo], - original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_definition = join_spec_definition.field_directive_definition(supergraph_schema)?; @@ -1345,21 +1287,12 @@ fn extract_input_object_type_content( graph_enum_value_name_to_subgraph_name, graph_enum_value, )?; - let federation_spec_definition = federation_spec_definitions - .get(graph_enum_value) - .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { - message: "Subgraph unexpectedly does not use federation spec" - .to_owned(), - })?; - let cost_spec_definition = - federation_spec_definition.get_cost_spec_definition(&subgraph.schema); add_subgraph_input_field( input_field_pos.clone(), input_field, + supergraph_schema, subgraph, None, - cost_spec_definition, - original_directive_names, )?; } } else { @@ -1375,14 +1308,6 @@ fn extract_input_object_type_content( graph_enum_value_name_to_subgraph_name, graph_enum_value, )?; - let federation_spec_definition = federation_spec_definitions - .get(graph_enum_value) - .ok_or_else(|| SingleFederationError::InvalidFederationSupergraph { - message: "Subgraph unexpectedly does not use federation spec" - .to_owned(), - })?; - let cost_spec_definition = - federation_spec_definition.get_cost_spec_definition(&subgraph.schema); if !subgraph_info.contains_key(graph_enum_value) { return Err( SingleFederationError::InvalidFederationSupergraph { @@ -1398,10 +1323,9 @@ fn extract_input_object_type_content( add_subgraph_input_field( input_field_pos.clone(), input_field, + supergraph_schema, subgraph, Some(field_directive_application), - cost_spec_definition, - original_directive_names, )?; } } @@ -1415,12 +1339,11 @@ fn extract_input_object_type_content( fn add_subgraph_field( object_or_interface_field_definition_position: ObjectOrInterfaceFieldDefinitionPosition, field: &FieldDefinition, + supergraph_schema: &FederationSchema, subgraph: &mut FederationSubgraph, federation_spec_definition: &'static FederationSpecDefinition, is_shareable: bool, field_directive_application: Option<&FieldDirectiveArguments>, - cost_spec_definition: Option<&'static CostSpecDefinition>, - original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_application = field_directive_application.unwrap_or_else(|| &FieldDirectiveArguments { @@ -1456,14 +1379,13 @@ fn add_subgraph_field( default_value: argument.default_value.clone(), directives: Default::default(), }; - if let Some(cost_spec_definition) = cost_spec_definition { - cost_spec_definition.propagate_demand_control_directives( - &subgraph.schema, - &argument.directives, - &mut destination_argument.directives, - original_directive_names, - )?; - } + + CostSpecDefinition::propagate_demand_control_directives( + supergraph_schema, + &argument.directives, + &subgraph.schema, + &mut destination_argument.directives, + )?; subgraph_field .arguments @@ -1509,14 +1431,12 @@ fn add_subgraph_field( )); } - if let Some(cost_spec_definition) = cost_spec_definition { - cost_spec_definition.propagate_demand_control_directives( - &subgraph.schema, - &field.directives, - &mut subgraph_field.directives, - original_directive_names, - )?; - } + CostSpecDefinition::propagate_demand_control_directives( + supergraph_schema, + &field.directives, + &subgraph.schema, + &mut subgraph_field.directives, + )?; if let Some(context_arguments) = &field_directive_application.context_arguments { for args in context_arguments { @@ -1563,10 +1483,9 @@ fn add_subgraph_field( fn add_subgraph_input_field( input_object_field_definition_position: InputObjectFieldDefinitionPosition, input_field: &InputValueDefinition, + supergraph_schema: &FederationSchema, subgraph: &mut FederationSubgraph, field_directive_application: Option<&FieldDirectiveArguments>, - cost_spec_definition: Option<&'static CostSpecDefinition>, - original_directive_names: &IndexMap, ) -> Result<(), FederationError> { let field_directive_application = field_directive_application.unwrap_or_else(|| &FieldDirectiveArguments { @@ -1592,14 +1511,12 @@ fn add_subgraph_input_field( directives: Default::default(), }; - if let Some(cost_spec_definition) = cost_spec_definition { - cost_spec_definition.propagate_demand_control_directives( - &subgraph.schema, - &input_field.directives, - &mut subgraph_input_field.directives, - original_directive_names, - )?; - } + CostSpecDefinition::propagate_demand_control_directives( + supergraph_schema, + &input_field.directives, + &subgraph.schema, + &mut subgraph_input_field.directives, + )?; input_object_field_definition_position .insert(&mut subgraph.schema, Component::from(subgraph_input_field))?; diff --git a/apollo-federation/src/supergraph/schema.rs b/apollo-federation/src/supergraph/schema.rs index 46aa19618e..700a52e0e4 100644 --- a/apollo-federation/src/supergraph/schema.rs +++ b/apollo-federation/src/supergraph/schema.rs @@ -1,49 +1,8 @@ -use apollo_compiler::collections::IndexMap; use apollo_compiler::schema::SchemaBuilder; -use apollo_compiler::Name; use crate::error::FederationError; -use crate::link::spec::APOLLO_SPEC_DOMAIN; -use crate::link::Link; use crate::schema::FederationSchema; -/// Builds a map of original name to new name for Apollo feature directives. This is -/// used to handle cases where a directive is renamed via an import statement. For -/// example, importing a directive with a custom name like -/// ```graphql -/// @link(url: "https://specs.apollo.dev/cost/v0.1", import: [{ name: "@cost", as: "@renamedCost" }]) -/// ``` -/// results in a map entry of `cost -> renamedCost` with the `@` prefix removed. -/// -/// If the directive is imported under its default name, that also results in an entry. So, -/// ```graphql -/// @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@cost"]) -/// ``` -/// results in a map entry of `cost -> cost`. This duals as a way to check if a directive -/// is included in the supergraph schema. -/// -/// **Important:** This map does _not_ include directives imported from identities other -/// than `specs.apollo.dev`. This helps us avoid extracting directives to subgraphs -/// when a custom directive's name conflicts with that of a default one. -pub(super) fn get_apollo_directive_names( - supergraph_schema: &FederationSchema, -) -> Result, FederationError> { - let mut hm: IndexMap = IndexMap::default(); - for directive in &supergraph_schema.schema().schema_definition.directives { - if directive.name.as_str() == "link" { - if let Ok(link) = Link::from_directive_application(directive) { - if link.url.identity.domain != APOLLO_SPEC_DOMAIN { - continue; - } - for import in link.imports { - hm.insert(import.element.clone(), import.imported_name().clone()); - } - } - } - } - Ok(hm) -} - /// TODO: Use the JS/programmatic approach instead of hard-coding definitions. pub(crate) fn new_empty_fed_2_subgraph_schema() -> Result { let builder = SchemaBuilder::new().adopt_orphan_extensions(); diff --git a/apollo-federation/src/utils/mod.rs b/apollo-federation/src/utils/mod.rs index e7132cf36d..75ee5b0891 100644 --- a/apollo-federation/src/utils/mod.rs +++ b/apollo-federation/src/utils/mod.rs @@ -2,6 +2,7 @@ mod fallible_iterator; pub(crate) mod logging; +pub(crate) mod serde_bridge; pub(crate) use fallible_iterator::*; diff --git a/apollo-federation/src/utils/serde_bridge.rs b/apollo-federation/src/utils/serde_bridge.rs new file mode 100644 index 0000000000..2cbc958809 --- /dev/null +++ b/apollo-federation/src/utils/serde_bridge.rs @@ -0,0 +1,89 @@ +/// This module contains functions used to bridge the apollo compiler serialization methods with +/// serialization with serde. +use apollo_compiler::executable; +use apollo_compiler::validation::Valid; +use apollo_compiler::ExecutableDocument; +use apollo_compiler::Node; +use serde::ser::SerializeSeq; +use serde::Serializer; + +pub(crate) fn serialize_exe_field( + field: &executable::Field, + ser: S, +) -> Result { + ser.serialize_str(&field.serialize().no_indent().to_string()) +} + +pub(crate) fn serialize_exe_inline_fragment( + fragment: &executable::InlineFragment, + ser: S, +) -> Result { + ser.serialize_str(&fragment.serialize().no_indent().to_string()) +} + +pub(crate) fn serialize_optional_exe_selection_set( + set: &Option, + ser: S, +) -> Result { + match set { + Some(set) => ser.serialize_str(&set.serialize().no_indent().to_string()), + None => ser.serialize_none(), + } +} + +pub(crate) fn serialize_optional_slice_of_exe_argument_nodes< + S: Serializer, + Args: AsRef<[Node]>, +>( + args: &Option, + ser: S, +) -> Result { + let Some(args) = args else { + return ser.serialize_none(); + }; + let args = args.as_ref(); + let mut ser = ser.serialize_seq(Some(args.len()))?; + args.iter().try_for_each(|arg| { + ser.serialize_element(&format!( + "{}: {}", + arg.name, + arg.value.serialize().no_indent() + )) + })?; + ser.end() +} + +pub(crate) fn serialize_exe_directive_list( + list: &executable::DirectiveList, + ser: S, +) -> Result { + ser.serialize_str(&list.serialize().no_indent().to_string()) +} + +pub(crate) fn serialize_optional_vec_of_exe_selection( + selection: &Option>, + ser: S, +) -> Result { + let Some(selections) = selection else { + return ser.serialize_none(); + }; + let mut ser = ser.serialize_seq(Some(selections.len()))?; + selections.iter().try_for_each(|selection| { + ser.serialize_element(&selection.serialize().no_indent().to_string()) + })?; + ser.end() +} + +pub(crate) fn serialize_valid_executable_document( + doc: &Valid, + ser: S, +) -> Result { + ser.serialize_str(&doc.serialize().no_indent().to_string()) +} + +pub(crate) fn serialize_exe_operation_type( + ty: &executable::OperationType, + ser: S, +) -> Result { + ser.serialize_str(&ty.to_string()) +} diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests.rs b/apollo-federation/tests/query_plan/build_query_plan_tests.rs index c0d85c7b62..4a063565e1 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests.rs @@ -34,6 +34,7 @@ fn some_name() { mod context; mod debug_max_evaluated_plans_configuration; mod defer; +mod entities; mod fetch_operation_names; mod field_merging_with_skip_and_include; mod fragment_autogeneration; @@ -44,8 +45,7 @@ mod interface_type_explosion; mod introspection_typename_handling; mod merged_abstract_types_handling; mod mutations; -mod named_fragments; -mod named_fragments_preservation; +mod named_fragments_expansion; mod overrides; mod provides; mod requires; diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/context.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/context.rs index 2a2c03bb3b..e487e44ab9 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/context.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/context.rs @@ -36,6 +36,48 @@ use apollo_federation::query_plan::FetchDataRewrite; use apollo_federation::query_plan::PlanNode; use apollo_federation::query_plan::TopLevelPlanNode; +fn parse_fetch_data_path_element(value: &str) -> FetchDataPathElement { + if value == ".." { + FetchDataPathElement::Parent + } else if let Some(("", ty)) = value.split_once("... on ") { + FetchDataPathElement::TypenameEquals(Name::new(ty).unwrap()) + } else { + FetchDataPathElement::Key(Name::new(value).unwrap(), Default::default()) + } +} + +macro_rules! node_assert { + ($plan: ident, $index: literal, $($rename_key_to: literal, $path: expr),+$(,)?) => { + let Some(TopLevelPlanNode::Sequence(node)) = $plan.node else { + panic!("failed to get sequence node"); + }; + let Some(PlanNode::Flatten(node)) = node.nodes.get($index) else { + panic!("failed to get fetch node"); + }; + let PlanNode::Fetch(node) = &*node.node else { + panic!("failed to get flatten node"); + }; + let expected_rewrites = &[ $( $rename_key_to ),+ ]; + let expected_paths = &[ $( $path.into_iter().map(parse_fetch_data_path_element).collect::>() ),+ ]; + assert_eq!(expected_rewrites.len(), expected_paths.len()); + assert_eq!(node.context_rewrites.len(), expected_rewrites.len()); + node + .context_rewrites + .iter() + .map(|rewriter| { + let FetchDataRewrite::KeyRenamer(renamer) = &**rewriter else { + panic!("Expected KeyRenamer"); + }; + renamer + }) + .zip(expected_rewrites.iter().zip(expected_paths)) + .for_each(|(actual, (rename_key_to, path))|{ + assert_eq!(&actual.rename_key_to.as_str(), rename_key_to); + assert_eq!(&actual.path, path); + }); + }; +} + #[test] fn set_context_test_variable_is_from_same_subgraph() { let planner = planner!( @@ -110,33 +152,12 @@ fn set_context_test_variable_is_from_same_subgraph() { } "### ); - match plan.node { - Some(TopLevelPlanNode::Sequence(node)) => match node.nodes.get(1) { - Some(PlanNode::Flatten(node)) => match &*node.node { - PlanNode::Fetch(node) => { - assert_eq!( - node.context_rewrites, - vec![Arc::new(FetchDataRewrite::KeyRenamer( - FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_1_0").unwrap(), - path: vec![ - FetchDataPathElement::Parent, - FetchDataPathElement::TypenameEquals(Name::new("T").unwrap()), - FetchDataPathElement::Key( - Name::new("prop").unwrap(), - Default::default() - ), - ], - } - )),] - ); - } - _ => panic!("failed to get fetch node"), - }, - _ => panic!("failed to get flatten node"), - }, - _ => panic!("failed to get sequence node"), - } + node_assert!( + plan, + 1, + "contextualArgument_1_0", + ["..", "... on T", "prop"] + ); } #[test] @@ -230,33 +251,12 @@ fn set_context_test_variable_is_from_different_subgraph() { } "###); - match plan.node { - Some(TopLevelPlanNode::Sequence(node)) => match node.nodes.get(2) { - Some(PlanNode::Flatten(node)) => match &*node.node { - PlanNode::Fetch(node) => { - assert_eq!( - node.context_rewrites, - vec![Arc::new(FetchDataRewrite::KeyRenamer( - FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_1_0").unwrap(), - path: vec![ - FetchDataPathElement::Parent, - FetchDataPathElement::TypenameEquals(Name::new("T").unwrap()), - FetchDataPathElement::Key( - Name::new("prop").unwrap(), - Default::default() - ), - ], - } - )),] - ); - } - _ => panic!("failed to get fetch node"), - }, - _ => panic!("failed to get flatten node"), - }, - _ => panic!("failed to get sequence node"), - } + node_assert!( + plan, + 2, + "contextualArgument_1_0", + ["..", "... on T", "prop"] + ); } #[test] @@ -540,33 +540,13 @@ fn set_context_test_fetched_as_a_list() { } "### ); - match plan.node { - Some(TopLevelPlanNode::Sequence(node)) => match node.nodes.get(1) { - Some(PlanNode::Flatten(node)) => match &*node.node { - PlanNode::Fetch(node) => { - assert_eq!( - node.context_rewrites, - vec![Arc::new(FetchDataRewrite::KeyRenamer( - FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_1_0").unwrap(), - path: vec![ - FetchDataPathElement::Parent, - FetchDataPathElement::TypenameEquals(Name::new("T").unwrap()), - FetchDataPathElement::Key( - Name::new("prop").unwrap(), - Default::default() - ), - ], - } - )),] - ); - } - _ => panic!("failed to get fetch node"), - }, - _ => panic!("failed to get flatten node"), - }, - _ => panic!("failed to get sequence node"), - } + + node_assert!( + plan, + 1, + "contextualArgument_1_0", + ["..", "... on T", "prop"] + ); } #[test] @@ -657,44 +637,15 @@ fn set_context_test_impacts_on_query_planning() { } "### ); - match plan.node { - Some(TopLevelPlanNode::Sequence(node)) => match node.nodes.get(1) { - Some(PlanNode::Flatten(node)) => match &*node.node { - PlanNode::Fetch(node) => { - assert_eq!( - node.context_rewrites, - vec![ - Arc::new(FetchDataRewrite::KeyRenamer(FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_1_0").unwrap(), - path: vec![ - FetchDataPathElement::Parent, - FetchDataPathElement::TypenameEquals(Name::new("A").unwrap()), - FetchDataPathElement::Key( - Name::new("prop").unwrap(), - Default::default() - ), - ], - })), - Arc::new(FetchDataRewrite::KeyRenamer(FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_1_0").unwrap(), - path: vec![ - FetchDataPathElement::Parent, - FetchDataPathElement::TypenameEquals(Name::new("B").unwrap()), - FetchDataPathElement::Key( - Name::new("prop").unwrap(), - Default::default() - ), - ], - })), - ] - ); - } - _ => panic!("failed to get fetch node"), - }, - _ => panic!("failed to get flatten node"), - }, - _ => panic!("failed to get sequence node"), - } + + node_assert!( + plan, + 1, + "contextualArgument_1_0", + ["..", "... on A", "prop"], + "contextualArgument_1_0", + ["..", "... on B", "prop"] + ); } #[test] @@ -806,44 +757,15 @@ fn set_context_test_with_type_conditions_for_union() { } "### ); - match plan.node { - Some(TopLevelPlanNode::Sequence(node)) => match node.nodes.get(1) { - Some(PlanNode::Flatten(node)) => match &*node.node { - PlanNode::Fetch(node) => { - assert_eq!( - node.context_rewrites, - vec![ - Arc::new(FetchDataRewrite::KeyRenamer(FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_1_0").unwrap(), - path: vec![ - FetchDataPathElement::Parent, - FetchDataPathElement::TypenameEquals(Name::new("A").unwrap()), - FetchDataPathElement::Key( - Name::new("prop").unwrap(), - Default::default() - ), - ], - })), - Arc::new(FetchDataRewrite::KeyRenamer(FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_1_0").unwrap(), - path: vec![ - FetchDataPathElement::Parent, - FetchDataPathElement::TypenameEquals(Name::new("B").unwrap()), - FetchDataPathElement::Key( - Name::new("prop").unwrap(), - Default::default() - ), - ], - })), - ] - ); - } - _ => panic!("failed to get fetch node"), - }, - _ => panic!("failed to get flatten node"), - }, - _ => panic!("failed to get sequence node"), - } + + node_assert!( + plan, + 1, + "contextualArgument_1_0", + ["..", "... on A", "prop"], + "contextualArgument_1_0", + ["..", "... on B", "prop"] + ); } #[test] @@ -921,36 +843,8 @@ fn set_context_test_accesses_a_different_top_level_query() { } "### ); - match plan.node { - Some(TopLevelPlanNode::Sequence(node)) => match node.nodes.get(1) { - Some(PlanNode::Flatten(node)) => match &*node.node { - PlanNode::Fetch(node) => { - assert_eq!( - node.context_rewrites, - vec![Arc::new(FetchDataRewrite::KeyRenamer( - FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_1_0").unwrap(), - path: vec![ - FetchDataPathElement::Parent, - FetchDataPathElement::Key( - Name::new("me").unwrap(), - Default::default() - ), - FetchDataPathElement::Key( - Name::new("locale").unwrap(), - Default::default() - ), - ], - } - )),] - ); - } - _ => panic!("failed to get fetch node"), - }, - _ => panic!("failed to get flatten node"), - }, - _ => panic!("failed to get sequence node"), - } + + node_assert!(plan, 1, "contextualArgument_1_0", ["..", "me", "locale"]); } #[test] @@ -1022,33 +916,13 @@ fn set_context_one_subgraph() { } "### ); - match plan.node { - Some(TopLevelPlanNode::Sequence(node)) => match node.nodes.get(1) { - Some(PlanNode::Flatten(node)) => match &*node.node { - PlanNode::Fetch(node) => { - assert_eq!( - node.context_rewrites, - vec![Arc::new(FetchDataRewrite::KeyRenamer( - FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_1_0").unwrap(), - path: vec![ - FetchDataPathElement::Parent, - FetchDataPathElement::TypenameEquals(Name::new("T").unwrap()), - FetchDataPathElement::Key( - Name::new("prop").unwrap(), - Default::default() - ), - ], - } - )),] - ); - } - _ => panic!("failed to get fetch node"), - }, - _ => panic!("failed to get flatten node"), - }, - _ => panic!("failed to get sequence node"), - } + + node_assert!( + plan, + 1, + "contextualArgument_1_0", + ["..", "... on T", "prop"] + ); } #[test] @@ -1185,45 +1059,13 @@ fn set_context_required_field_is_several_levels_deep_going_back_and_forth_betwee } "### ); - match plan.node { - Some(TopLevelPlanNode::Sequence(node)) => match node.nodes.get(3) { - Some(PlanNode::Flatten(node)) => match &*node.node { - PlanNode::Fetch(node) => { - assert_eq!( - node.context_rewrites, - vec![Arc::new(FetchDataRewrite::KeyRenamer( - FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_1_0").unwrap(), - path: vec![ - FetchDataPathElement::Parent, - FetchDataPathElement::TypenameEquals(Name::new("T").unwrap()), - FetchDataPathElement::Key( - Name::new("a").unwrap(), - Default::default() - ), - FetchDataPathElement::Key( - Name::new("b").unwrap(), - Default::default() - ), - FetchDataPathElement::Key( - Name::new("c").unwrap(), - Default::default() - ), - FetchDataPathElement::Key( - Name::new("prop").unwrap(), - Default::default() - ), - ], - } - )),] - ); - } - _ => panic!("failed to get fetch node"), - }, - _ => panic!("failed to get flatten node"), - }, - _ => panic!("failed to get sequence node"), - } + + node_assert!( + plan, + 3, + "contextualArgument_1_0", + ["..", "... on T", "a", "b", "c", "prop"] + ); } #[test] @@ -1452,40 +1294,13 @@ fn set_context_test_efficiently_merge_fetch_groups() { } "### ); - match plan.node { - Some(TopLevelPlanNode::Sequence(node)) => match node.nodes.get(1) { - Some(PlanNode::Flatten(node)) => match &*node.node { - PlanNode::Fetch(node) => { - assert_eq!( - node.context_rewrites, - vec![ - Arc::new(FetchDataRewrite::KeyRenamer(FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_3_0").unwrap(), - path: vec![ - FetchDataPathElement::Key( - Name::new_unchecked("identifiers"), - Default::default() - ), - FetchDataPathElement::Key( - Name::new_unchecked("id5"), - Default::default() - ), - ], - })), - Arc::new(FetchDataRewrite::KeyRenamer(FetchDataKeyRenamer { - rename_key_to: Name::new("contextualArgument_3_1").unwrap(), - path: vec![FetchDataPathElement::Key( - Name::new_unchecked("mid"), - Default::default() - ),], - })), - ] - ); - } - _ => panic!("failed to get fetch node"), - }, - _ => panic!("failed to get flatten node"), - }, - _ => panic!("failed to get sequence node"), - } + + node_assert!( + plan, + 1, + "contextualArgument_3_0", + ["identifiers", "id5"], + "contextualArgument_3_1", + ["mid"] + ); } diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/entities.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/entities.rs new file mode 100644 index 0000000000..53f50aad38 --- /dev/null +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/entities.rs @@ -0,0 +1,142 @@ +// TODO this test shows inefficient QP where we make multiple parallel +// fetches of the same entity from the same subgraph but for different paths +#[test] +fn inefficient_entity_fetches_to_same_subgraph() { + let planner = planner!( + Subgraph1: r#" + type V @shareable { + x: Int + } + + interface I { + v: V + } + + type Outer implements I @key(fields: "id") { + id: ID! + v: V + } + "#, + Subgraph2: r#" + type Query { + outer1: Outer + outer2: Outer + } + + type V @shareable { + x: Int + } + + interface I { + v: V + w: Int + } + + type Inner implements I { + v: V + w: Int + } + + type Outer @key(fields: "id") { + id: ID! + inner: Inner + w: Int + } + "#, + ); + assert_plan!( + &planner, + r#" + query { + outer1 { + ...OuterFrag + } + outer2 { + ...OuterFrag + } + } + + fragment OuterFrag on Outer { + ...IFrag + inner { + ...IFrag + } + } + + fragment IFrag on I { + v { + x + } + w + } + "#, + @r#" + QueryPlan { + Sequence { + Fetch(service: "Subgraph2") { + { + outer1 { + __typename + id + w + inner { + v { + x + } + w + } + } + outer2 { + __typename + id + w + inner { + v { + x + } + w + } + } + } + }, + Parallel { + Flatten(path: "outer2") { + Fetch(service: "Subgraph1") { + { + ... on Outer { + __typename + id + } + } => + { + ... on Outer { + v { + x + } + } + } + }, + }, + Flatten(path: "outer1") { + Fetch(service: "Subgraph1") { + { + ... on Outer { + __typename + id + } + } => + { + ... on Outer { + v { + x + } + } + } + }, + }, + }, + }, + } + "# + ); +} diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs index 03b43c6245..e80f57953f 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/fragment_autogeneration.rs @@ -1,5 +1,12 @@ use apollo_federation::query_plan::query_planner::QueryPlannerConfig; +fn generate_fragments_config() -> QueryPlannerConfig { + QueryPlannerConfig { + generate_query_fragments: true, + ..Default::default() + } +} + const SUBGRAPH: &str = r#" directive @custom on INLINE_FRAGMENT | FRAGMENT_SPREAD @@ -25,7 +32,7 @@ const SUBGRAPH: &str = r#" #[test] fn it_respects_generate_query_fragments_option() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, + config = generate_fragments_config(), Subgraph1: SUBGRAPH, ); assert_plan!( @@ -73,7 +80,7 @@ fn it_respects_generate_query_fragments_option() { #[test] fn it_handles_nested_fragment_generation() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, + config = generate_fragments_config(), Subgraph1: SUBGRAPH, ); assert_plan!( @@ -131,10 +138,250 @@ fn it_handles_nested_fragment_generation() { ); } +// TODO this test shows a clearly worse plan than reused fragments when fragments +// target concrete types +#[test] +fn it_handles_nested_fragment_generation_from_operation_with_fragments() { + let planner = planner!( + config = generate_fragments_config(), + Subgraph1: r#" + type Query { + a: Anything + } + + union Anything = A1 | A2 | A3 + + interface Foo { + foo: String + child: Foo + child2: Foo + } + + type A1 implements Foo { + foo: String + child: Foo + child2: Foo + } + + type A2 implements Foo { + foo: String + child: Foo + child2: Foo + } + + type A3 implements Foo { + foo: String + child: Foo + child2: Foo + } + "#, + ); + assert_plan!( + &planner, + r#" + query { + a { + ... on A1 { + ...FooSelect + } + ... on A2 { + ...FooSelect + } + ... on A3 { + ...FooSelect + } + } + } + + fragment FooSelect on Foo { + __typename + foo + child { + ...FooChildSelect + } + child2 { + ...FooChildSelect + } + } + + fragment FooChildSelect on Foo { + __typename + foo + child { + child { + child { + foo + } + } + } + } + "#, + + // This is a test case that shows worse result + // QueryPlan { + // Fetch(service: "Subgraph1") { + // { + // a { + // __typename + // ... on A1 { + // ...FooSelect + // } + // ... on A2 { + // ...FooSelect + // } + // ... on A3 { + // ...FooSelect + // } + // } + // } + // + // fragment FooChildSelect on Foo { + // __typename + // foo + // child { + // __typename + // child { + // __typename + // child { + // __typename + // foo + // } + // } + // } + // } + // + // fragment FooSelect on Foo { + // __typename + // foo + // child { + // ...FooChildSelect + // } + // child2 { + // ...FooChildSelect + // } + // } + // }, + // } + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + a { + __typename + ..._generated_onA14_0 + ..._generated_onA24_0 + ..._generated_onA34_0 + } + } + + fragment _generated_onA14_0 on A1 { + __typename + foo + child { + __typename + foo + child { + __typename + child { + __typename + child { + __typename + foo + } + } + } + } + child2 { + __typename + foo + child { + __typename + child { + __typename + child { + __typename + foo + } + } + } + } + } + + fragment _generated_onA24_0 on A2 { + __typename + foo + child { + __typename + foo + child { + __typename + child { + __typename + child { + __typename + foo + } + } + } + } + child2 { + __typename + foo + child { + __typename + child { + __typename + child { + __typename + foo + } + } + } + } + } + + fragment _generated_onA34_0 on A3 { + __typename + foo + child { + __typename + foo + child { + __typename + child { + __typename + child { + __typename + foo + } + } + } + } + child2 { + __typename + foo + child { + __typename + child { + __typename + child { + __typename + foo + } + } + } + } + } + }, + } + "### + ); +} + #[test] fn it_handles_fragments_with_one_non_leaf_field() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, + config = generate_fragments_config(), Subgraph1: SUBGRAPH, ); @@ -183,7 +430,7 @@ fn it_handles_fragments_with_one_non_leaf_field() { #[test] fn it_migrates_skip_include() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, + config = generate_fragments_config(), Subgraph1: SUBGRAPH, ); assert_plan!( @@ -250,10 +497,11 @@ fn it_migrates_skip_include() { "### ); } + #[test] fn it_identifies_and_reuses_equivalent_fragments_that_arent_identical() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, + config = generate_fragments_config(), Subgraph1: SUBGRAPH, ); assert_plan!( @@ -301,7 +549,7 @@ fn it_identifies_and_reuses_equivalent_fragments_that_arent_identical() { #[test] fn fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment_definitions() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, + config = generate_fragments_config(), Subgraph1: SUBGRAPH, ); assert_plan!( @@ -354,7 +602,7 @@ fn fragments_that_share_a_hash_but_are_not_identical_generate_their_own_fragment #[test] fn same_as_js_router798() { let planner = planner!( - config = QueryPlannerConfig { generate_query_fragments: true, reuse_query_fragments: false, ..Default::default() }, + config = generate_fragments_config(), Subgraph1: r#" interface Interface { a: Int } type Y implements Interface { a: Int b: Int } @@ -398,6 +646,7 @@ fn same_as_js_router798() { #[test] fn works_with_key_chains() { let planner = planner!( + config = generate_fragments_config(), Subgraph1: r#" type Query { t: T @@ -471,10 +720,12 @@ fn works_with_key_chains() { } } => { - ... on T { - x - y - } + ..._generated_onT2_0 + } + + fragment _generated_onT2_0 on T { + x + y } }, }, @@ -483,3 +734,192 @@ fn works_with_key_chains() { "### ); } + +#[test] +fn another_mix_of_fragments_indirection_and_unions() { + // This tests that the issue reported on https://github.com/apollographql/router/issues/3172 is resolved. + let planner = planner!( + config = generate_fragments_config(), + Subgraph1: r#" + type Query { + owner: Owner! + } + + interface OItf { + id: ID! + v0: String! + } + + type Owner implements OItf { + id: ID! + v0: String! + u: [U] + } + + union U = T1 | T2 + + interface I { + id1: ID! + id2: ID! + } + + type T1 implements I { + id1: ID! + id2: ID! + owner: Owner! + } + + type T2 implements I { + id1: ID! + id2: ID! + } + "#, + ); + assert_plan!( + &planner, + r#" + { + owner { + u { + ... on I { + id1 + id2 + } + ...Fragment1 + ...Fragment2 + } + } + } + + fragment Fragment1 on T1 { + owner { + ... on Owner { + ...Fragment3 + } + } + } + + fragment Fragment2 on T2 { + ...Fragment4 + id1 + } + + fragment Fragment3 on OItf { + v0 + } + + fragment Fragment4 on I { + id1 + id2 + __typename + } + "#, + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + owner { + u { + __typename + ..._generated_onI3_0 + ..._generated_onT11_0 + ..._generated_onT23_0 + } + } + } + + fragment _generated_onI3_0 on I { + __typename + id1 + id2 + } + + fragment _generated_onT11_0 on T1 { + owner { + v0 + } + } + + fragment _generated_onT23_0 on T2 { + __typename + id1 + id2 + } + }, + } + "### + ); + + assert_plan!( + &planner, + r#" + { + owner { + u { + ... on I { + id1 + id2 + } + ...Fragment1 + ...Fragment2 + } + } + } + + fragment Fragment1 on T1 { + owner { + ... on Owner { + ...Fragment3 + } + } + } + + fragment Fragment2 on T2 { + ...Fragment4 + id1 + } + + fragment Fragment3 on OItf { + v0 + } + + fragment Fragment4 on I { + id1 + id2 + } + "#, + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + owner { + u { + __typename + ..._generated_onI3_0 + ..._generated_onT11_0 + ..._generated_onT22_0 + } + } + } + + fragment _generated_onI3_0 on I { + __typename + id1 + id2 + } + + fragment _generated_onT11_0 on T1 { + owner { + v0 + } + } + + fragment _generated_onT22_0 on T2 { + id1 + id2 + } + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/interface_object.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/interface_object.rs index 2f8ec2a798..6331175baa 100644 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/interface_object.rs +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/interface_object.rs @@ -1,5 +1,6 @@ use std::ops::Deref; +use apollo_federation::query_plan::query_planner::QueryPlannerConfig; use apollo_federation::query_plan::FetchDataPathElement; use apollo_federation::query_plan::FetchDataRewrite; @@ -956,3 +957,103 @@ fn test_interface_object_advance_with_non_collecting_and_type_preserving_transit "### ); } + +#[test] +fn test_type_conditioned_fetching_with_interface_object_does_not_crash() { + let planner = planner!( + config = QueryPlannerConfig { + type_conditioned_fetching: true, + ..Default::default() + }, + S1: r#" + type I @interfaceObject @key(fields: "id") { + id: ID! + t: T + } + + type T { + relatedIs: [I] + } + "#, + S2: r#" + type Query { + i: I + } + + interface I @key(fields: "id") { + id: ID! + a: Int + } + + type A implements I @key(fields: "id") { + id: ID! + a: Int + } + "#, + ); + assert_plan!( + &planner, + r#" + { + i { + t { + relatedIs { + a + } + } + } + } + "#, + + @r###" + QueryPlan { + Sequence { + Fetch(service: "S2") { + { + i { + __typename + id + } + } + }, + Flatten(path: "i") { + Fetch(service: "S1") { + { + ... on I { + __typename + id + } + } => + { + ... on I { + t { + relatedIs { + __typename + id + } + } + } + } + }, + }, + Flatten(path: "i.t.relatedIs.@") { + Fetch(service: "S2") { + { + ... on I { + __typename + id + } + } => + { + ... on I { + __typename + a + } + } + }, + }, + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/named_fragments.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/named_fragments.rs deleted file mode 100644 index 959069588c..0000000000 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/named_fragments.rs +++ /dev/null @@ -1,563 +0,0 @@ -use apollo_federation::query_plan::query_planner::QueryPlannerConfig; - -fn reuse_fragments_config() -> QueryPlannerConfig { - QueryPlannerConfig { - reuse_query_fragments: true, - ..Default::default() - } -} - -#[test] -fn handles_mix_of_fragments_indirection_and_unions() { - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - parent: Parent - } - - union CatOrPerson = Cat | Parent | Child - - type Parent { - childs: [Child] - } - - type Child { - id: ID! - } - - type Cat { - name: String - } - "#, - ); - assert_plan!( - &planner, - r#" - query { - parent { - ...F_indirection1_parent - } - } - - fragment F_indirection1_parent on Parent { - ...F_indirection2_catOrPerson - } - - fragment F_indirection2_catOrPerson on CatOrPerson { - ...F_catOrPerson - } - - fragment F_catOrPerson on CatOrPerson { - __typename - ... on Cat { - name - } - ... on Parent { - childs { - __typename - id - } - } - } - "#, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - parent { - __typename - childs { - __typename - id - } - } - } - }, - } - "### - ); -} - -#[test] -fn another_mix_of_fragments_indirection_and_unions() { - // This tests that the issue reported on https://github.com/apollographql/router/issues/3172 is resolved. - - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - owner: Owner! - } - - interface OItf { - id: ID! - v0: String! - } - - type Owner implements OItf { - id: ID! - v0: String! - u: [U] - } - - union U = T1 | T2 - - interface I { - id1: ID! - id2: ID! - } - - type T1 implements I { - id1: ID! - id2: ID! - owner: Owner! - } - - type T2 implements I { - id1: ID! - id2: ID! - } - "#, - ); - assert_plan!( - &planner, - r#" - { - owner { - u { - ... on I { - id1 - id2 - } - ...Fragment1 - ...Fragment2 - } - } - } - - fragment Fragment1 on T1 { - owner { - ... on Owner { - ...Fragment3 - } - } - } - - fragment Fragment2 on T2 { - ...Fragment4 - id1 - } - - fragment Fragment3 on OItf { - v0 - } - - fragment Fragment4 on I { - id1 - id2 - __typename - } - "#, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - owner { - u { - __typename - ...Fragment4 - ... on T1 { - owner { - v0 - } - } - ... on T2 { - ...Fragment4 - } - } - } - } - - fragment Fragment4 on I { - __typename - id1 - id2 - } - }, - } - "### - ); - - assert_plan!( - &planner, - r#" - { - owner { - u { - ... on I { - id1 - id2 - } - ...Fragment1 - ...Fragment2 - } - } - } - - fragment Fragment1 on T1 { - owner { - ... on Owner { - ...Fragment3 - } - } - } - - fragment Fragment2 on T2 { - ...Fragment4 - id1 - } - - fragment Fragment3 on OItf { - v0 - } - - fragment Fragment4 on I { - id1 - id2 - } - "#, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - owner { - u { - __typename - ... on I { - __typename - ...Fragment4 - } - ... on T1 { - owner { - v0 - } - } - ... on T2 { - ...Fragment4 - } - } - } - } - - fragment Fragment4 on I { - id1 - id2 - } - }, - } - "### - ); -} - -#[test] -fn handles_fragments_with_interface_field_subtyping() { - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - t1: T1! - } - - interface I { - id: ID! - other: I! - } - - type T1 implements I { - id: ID! - other: T1! - } - - type T2 implements I { - id: ID! - other: T2! - } - "#, - ); - assert_plan!( - &planner, - r#" - { - t1 { - ...Fragment1 - } - } - - fragment Fragment1 on I { - other { - ... on T1 { - id - } - ... on T2 { - id - } - } - } - "#, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t1 { - other { - __typename - id - } - } - } - }, - } - "### - ); -} - -#[test] -fn can_reuse_fragments_in_subgraph_where_they_only_partially_apply_in_root_fetch() { - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - t1: T - t2: T - } - - type T @key(fields: "id") { - id: ID! - v0: Int - v1: Int - v2: Int - } - "#, - Subgraph2: r#" - type T @key(fields: "id") { - id: ID! - v3: Int - } - "#, - ); - assert_plan!( - &planner, - r#" - { - t1 { - ...allTFields - } - t2 { - ...allTFields - } - } - - fragment allTFields on T { - v0 - v1 - v2 - v3 - } - "#, - @r###" - QueryPlan { - Sequence { - Fetch(service: "Subgraph1") { - { - t1 { - __typename - ...allTFields - id - } - t2 { - __typename - ...allTFields - id - } - } - - fragment allTFields on T { - v0 - v1 - v2 - } - }, - Parallel { - Flatten(path: "t2") { - Fetch(service: "Subgraph2") { - { - ... on T { - __typename - id - } - } => - { - ... on T { - v3 - } - } - }, - }, - Flatten(path: "t1") { - Fetch(service: "Subgraph2") { - { - ... on T { - __typename - id - } - } => - { - ... on T { - v3 - } - } - }, - }, - }, - }, - } - "### - ); -} - -#[test] -fn can_reuse_fragments_in_subgraph_where_they_only_partially_apply_in_entity_fetch() { - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - t: T - } - - type T @key(fields: "id") { - id: ID! - } - "#, - Subgraph2: r#" - type T @key(fields: "id") { - id: ID! - u1: U - u2: U - } - - type U @key(fields: "id") { - id: ID! - v0: Int - v1: Int - } - "#, - Subgraph3: r#" - type U @key(fields: "id") { - id: ID! - v2: Int - v3: Int - } - "#, - ); - - assert_plan!( - &planner, - r#" - { - t { - u1 { - ...allUFields - } - u2 { - ...allUFields - } - } - } - - fragment allUFields on U { - v0 - v1 - v2 - v3 - } - "#, - @r###" - QueryPlan { - Sequence { - Fetch(service: "Subgraph1") { - { - t { - __typename - id - } - } - }, - Flatten(path: "t") { - Fetch(service: "Subgraph2") { - { - ... on T { - __typename - id - } - } => - { - ... on T { - u1 { - __typename - ...allUFields - id - } - u2 { - __typename - ...allUFields - id - } - } - } - - fragment allUFields on U { - v0 - v1 - } - }, - }, - Parallel { - Flatten(path: "t.u2") { - Fetch(service: "Subgraph3") { - { - ... on U { - __typename - id - } - } => - { - ... on U { - v2 - v3 - } - } - }, - }, - Flatten(path: "t.u1") { - Fetch(service: "Subgraph3") { - { - ... on U { - __typename - id - } - } => - { - ... on U { - v2 - v3 - } - } - }, - }, - }, - }, - } - "### - ); -} diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/named_fragments_expansion.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/named_fragments_expansion.rs new file mode 100644 index 0000000000..5b68d3e059 --- /dev/null +++ b/apollo-federation/tests/query_plan/build_query_plan_tests/named_fragments_expansion.rs @@ -0,0 +1,369 @@ +#[test] +fn handles_mix_of_fragments_indirection_and_unions() { + let planner = planner!( + Subgraph1: r#" + type Query { + parent: Parent + } + + union CatOrPerson = Cat | Parent | Child + + type Parent { + childs: [Child] + } + + type Child { + id: ID! + } + + type Cat { + name: String + } + "#, + ); + assert_plan!( + &planner, + r#" + query { + parent { + ...F_indirection1_parent + } + } + + fragment F_indirection1_parent on Parent { + ...F_indirection2_catOrPerson + } + + fragment F_indirection2_catOrPerson on CatOrPerson { + ...F_catOrPerson + } + + fragment F_catOrPerson on CatOrPerson { + __typename + ... on Cat { + name + } + ... on Parent { + childs { + __typename + id + } + } + } + "#, + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + parent { + __typename + childs { + __typename + id + } + } + } + }, + } + "### + ); +} + +#[test] +fn handles_fragments_with_interface_field_subtyping() { + let planner = planner!( + Subgraph1: r#" + type Query { + t1: T1! + } + + interface I { + id: ID! + other: I! + } + + type T1 implements I { + id: ID! + other: T1! + } + + type T2 implements I { + id: ID! + other: T2! + } + "#, + ); + + assert_plan!( + &planner, + r#" + { + t1 { + ...Fragment1 + } + } + + fragment Fragment1 on I { + other { + ... on T1 { + id + } + ... on T2 { + id + } + } + } + "#, + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + t1 { + other { + id + } + } + } + }, + } + "### + ); +} + +#[test] +fn it_preserves_directives() { + // (because used only once) + let planner = planner!( + Subgraph1: r#" + type Query { + t: T + } + + type T @key(fields: "id") { + id: ID! + a: Int + b: Int + } + "#, + ); + assert_plan!( + &planner, + r#" + query test($if: Boolean!) { + t { + id + ...OnT @include(if: $if) + } + } + + fragment OnT on T { + a + b + } + "#, + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + id + ... on T @include(if: $if) { + a + b + } + } + } + }, + } + "### + ); +} + +#[test] +fn it_preserves_directives_when_fragment_is_reused() { + let planner = planner!( + Subgraph1: r#" + type Query { + t: T + } + + type T @key(fields: "id") { + id: ID! + a: Int + b: Int + } + "#, + ); + assert_plan!( + &planner, + r#" + query test($test1: Boolean!, $test2: Boolean!) { + t { + id + ...OnT @include(if: $test1) + ...OnT @include(if: $test2) + } + } + + fragment OnT on T { + a + b + } + "#, + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + id + ... on T @include(if: $test1) { + a + b + } + ... on T @include(if: $test2) { + a + b + } + } + } + }, + } + "### + ); +} + +#[test] +fn it_preserves_directives_on_collapsed_fragments() { + let planner = planner!( + Subgraph1: r#" + type Query { + t: T + } + + type T { + id: ID! + t1: V + t2: V + } + + type V { + v1: Int + v2: Int + } + "#, + ); + assert_plan!( + &planner, + r#" + query($test: Boolean!) { + t { + ...OnT + } + } + + fragment OnT on T { + id + ...OnTInner @include(if: $test) + } + + fragment OnTInner on T { + t1 { + ...OnV + } + t2 { + ...OnV + } + } + + fragment OnV on V { + v1 + v2 + } + "#, + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + id + ... on T @include(if: $test) { + t1 { + v1 + v2 + } + t2 { + v1 + v2 + } + } + } + } + }, + } + "### + ); +} + +#[test] +fn it_expands_nested_fragments() { + let planner = planner!( + Subgraph1: r#" + type Query { + t: T + } + + type T @key(fields: "id") { + id: ID! + a: V + b: V + } + + type V { + v1: Int + v2: Int + } + "#, + ); + assert_plan!( + &planner, + r#" + { + t { + ...OnT + } + } + + fragment OnT on T { + a { + ...OnV + } + b { + ...OnV + } + } + + fragment OnV on V { + v1 + v2 + } + "#, + @r###" + QueryPlan { + Fetch(service: "Subgraph1") { + { + t { + a { + v1 + v2 + } + b { + v1 + v2 + } + } + } + }, + } + "### + ); +} diff --git a/apollo-federation/tests/query_plan/build_query_plan_tests/named_fragments_preservation.rs b/apollo-federation/tests/query_plan/build_query_plan_tests/named_fragments_preservation.rs deleted file mode 100644 index da90c3edb8..0000000000 --- a/apollo-federation/tests/query_plan/build_query_plan_tests/named_fragments_preservation.rs +++ /dev/null @@ -1,1384 +0,0 @@ -use apollo_federation::query_plan::query_planner::QueryPlannerConfig; - -fn reuse_fragments_config() -> QueryPlannerConfig { - QueryPlannerConfig { - reuse_query_fragments: true, - ..Default::default() - } -} - -#[test] -fn it_works_with_nested_fragments_1() { - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - a: Anything - } - - union Anything = A1 | A2 | A3 - - interface Foo { - foo: String - child: Foo - child2: Foo - } - - type A1 implements Foo { - foo: String - child: Foo - child2: Foo - } - - type A2 implements Foo { - foo: String - child: Foo - child2: Foo - } - - type A3 implements Foo { - foo: String - child: Foo - child2: Foo - } - "#, - ); - assert_plan!( - &planner, - r#" - query { - a { - ... on A1 { - ...FooSelect - } - ... on A2 { - ...FooSelect - } - ... on A3 { - ...FooSelect - } - } - } - - fragment FooSelect on Foo { - __typename - foo - child { - ...FooChildSelect - } - child2 { - ...FooChildSelect - } - } - - fragment FooChildSelect on Foo { - __typename - foo - child { - child { - child { - foo - } - } - } - } - "#, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - a { - __typename - ... on A1 { - ...FooSelect - } - ... on A2 { - ...FooSelect - } - ... on A3 { - ...FooSelect - } - } - } - - fragment FooChildSelect on Foo { - __typename - foo - child { - __typename - child { - __typename - child { - __typename - foo - } - } - } - } - - fragment FooSelect on Foo { - __typename - foo - child { - ...FooChildSelect - } - child2 { - ...FooChildSelect - } - } - }, - } - "### - ); -} - -#[test] -fn it_avoid_fragments_usable_only_once() { - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - t: T - } - - type T @key(fields: "id") { - id: ID! - v1: V - } - - type V @shareable { - a: Int - b: Int - c: Int - } - "#, - Subgraph2: r#" - type T @key(fields: "id") { - id: ID! - v2: V - v3: V - } - - type V @shareable { - a: Int - b: Int - c: Int - } - "#, - ); - - // We use a fragment which does save some on the original query, but as each - // field gets to a different subgraph, the fragment would only be used one - // on each sub-fetch and we make sure the fragment is not used in that case. - assert_plan!( - &planner, - r#" - query { - t { - v1 { - ...OnV - } - v2 { - ...OnV - } - } - } - - fragment OnV on V { - a - b - c - } - "#, - @r###" - QueryPlan { - Sequence { - Fetch(service: "Subgraph1") { - { - t { - __typename - id - v1 { - a - b - c - } - } - } - }, - Flatten(path: "t") { - Fetch(service: "Subgraph2") { - { - ... on T { - __typename - id - } - } => - { - ... on T { - v2 { - a - b - c - } - } - } - }, - }, - }, - } - "### - ); - - // But double-check that if we query 2 fields from the same subgraph, then - // the fragment gets used now. - assert_plan!( - &planner, - r#" - query { - t { - v2 { - ...OnV - } - v3 { - ...OnV - } - } - } - - fragment OnV on V { - a - b - c - } - "#, - @r###" - QueryPlan { - Sequence { - Fetch(service: "Subgraph1") { - { - t { - __typename - id - } - } - }, - Flatten(path: "t") { - Fetch(service: "Subgraph2") { - { - ... on T { - __typename - id - } - } => - { - ... on T { - v2 { - ...OnV - } - v3 { - ...OnV - } - } - } - - fragment OnV on V { - a - b - c - } - }, - }, - }, - } - "### - ); -} - -mod respects_query_planner_option_reuse_query_fragments { - use super::*; - - const SUBGRAPH1: &str = r#" - type Query { - t: T - } - - type T { - a1: A - a2: A - } - - type A { - x: Int - y: Int - } - "#; - const QUERY: &str = r#" - query { - t { - a1 { - ...Selection - } - a2 { - ...Selection - } - } - } - - fragment Selection on A { - x - y - } - "#; - - #[test] - fn respects_query_planner_option_reuse_query_fragments_true() { - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: SUBGRAPH1, - ); - let query = QUERY; - - assert_plan!( - &planner, - query, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - a1 { - ...Selection - } - a2 { - ...Selection - } - } - } - - fragment Selection on A { - x - y - } - }, - } - "### - ); - } - - #[test] - fn respects_query_planner_option_reuse_query_fragments_false() { - let reuse_query_fragments = false; - let planner = planner!( - config = QueryPlannerConfig {reuse_query_fragments, ..Default::default()}, - Subgraph1: SUBGRAPH1, - ); - let query = QUERY; - - assert_plan!( - &planner, - query, - @r#" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - a1 { - x - y - } - a2 { - x - y - } - } - } - }, - } - "# - ); - } -} - -#[test] -fn it_works_with_nested_fragments_when_only_the_nested_fragment_gets_preserved() { - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - t: T - } - - type T @key(fields: "id") { - id: ID! - a: V - b: V - } - - type V { - v1: Int - v2: Int - } - "#, - ); - assert_plan!( - &planner, - r#" - { - t { - ...OnT - } - } - - fragment OnT on T { - a { - ...OnV - } - b { - ...OnV - } - } - - fragment OnV on V { - v1 - v2 - } - "#, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - a { - ...OnV - } - b { - ...OnV - } - } - } - - fragment OnV on V { - v1 - v2 - } - }, - } - "### - ); -} - -#[test] -fn it_preserves_directives_when_fragment_not_used() { - // (because used only once) - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - t: T - } - - type T @key(fields: "id") { - id: ID! - a: Int - b: Int - } - "#, - ); - assert_plan!( - &planner, - r#" - query test($if: Boolean!) { - t { - id - ...OnT @include(if: $if) - } - } - - fragment OnT on T { - a - b - } - "#, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - id - ... on T @include(if: $if) { - a - b - } - } - } - }, - } - "### - ); -} - -#[test] -fn it_preserves_directives_when_fragment_is_reused() { - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - t: T - } - - type T @key(fields: "id") { - id: ID! - a: Int - b: Int - } - "#, - ); - assert_plan!( - &planner, - r#" - query test($test1: Boolean!, $test2: Boolean!) { - t { - id - ...OnT @include(if: $test1) - ...OnT @include(if: $test2) - } - } - - fragment OnT on T { - a - b - } - "#, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - id - ...OnT @include(if: $test1) - ...OnT @include(if: $test2) - } - } - - fragment OnT on T { - a - b - } - }, - } - "### - ); -} - -#[test] -fn it_does_not_try_to_apply_fragments_that_are_not_valid_for_the_subgraph() { - // Slightly artificial example for simplicity, but this highlight the problem. - // In that example, the only queried subgraph is the first one (there is in fact - // no way to ever reach the 2nd one), so the plan should mostly simply forward - // the query to the 1st subgraph, but a subtlety is that the named fragment used - // in the query is *not* valid for Subgraph1, because it queries `b` on `I`, but - // there is no `I.b` in Subgraph1. - // So including the named fragment in the fetch would be erroneous: the subgraph - // server would reject it when validating the query, and we must make sure it - // is not reused. - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - i1: I - i2: I - } - - interface I { - a: Int - } - - type T implements I { - a: Int - b: Int - } - "#, - Subgraph2: r#" - interface I { - a: Int - b: Int - } - "#, - ); - assert_plan!( - &planner, - r#" - query { - i1 { - ... on T { - ...Frag - } - } - i2 { - ... on T { - ...Frag - } - } - } - - fragment Frag on I { - b - } - "#, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - i1 { - __typename - ... on T { - b - } - } - i2 { - __typename - ... on T { - b - } - } - } - }, - } - "### - ); -} - -#[test] -fn it_handles_fragment_rebasing_in_a_subgraph_where_some_subtyping_relation_differs() { - // This test is designed such that type `Outer` implements the interface `I` in `Subgraph1` - // but not in `Subgraph2`, yet `I` exists in `Subgraph2` (but only `Inner` implements it - // there). Further, the operations we test have a fragment on I (`IFrag` below) that is - // used "in the context of `Outer`" (at the top-level of fragment `OuterFrag`). - // - // What this all means is that `IFrag` can be rebased in `Subgraph2` "as is" because `I` - // exists there with all its fields, but as we rebase `OuterFrag` on `Subgraph2`, we - // cannot use `...IFrag` inside it (at the top-level), because `I` and `Outer` do - // no intersect in `Subgraph2` and this would be an invalid selection. - // - // Previous versions of the code were not handling this case and were error out by - // creating the invalid selection (#2721), and this test ensures this is fixed. - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type V @shareable { - x: Int - } - - interface I { - v: V - } - - type Outer implements I @key(fields: "id") { - id: ID! - v: V - } - "#, - Subgraph2: r#" - type Query { - outer1: Outer - outer2: Outer - } - - type V @shareable { - x: Int - } - - interface I { - v: V - w: Int - } - - type Inner implements I { - v: V - w: Int - } - - type Outer @key(fields: "id") { - id: ID! - inner: Inner - w: Int - } - "#, - ); - assert_plan!( - &planner, - r#" - query { - outer1 { - ...OuterFrag - } - outer2 { - ...OuterFrag - } - } - - fragment OuterFrag on Outer { - ...IFrag - inner { - ...IFrag - } - } - - fragment IFrag on I { - v { - x - } - } - "#, - @r#" - QueryPlan { - Sequence { - Fetch(service: "Subgraph2") { - { - outer1 { - __typename - ...OuterFrag - id - } - outer2 { - __typename - ...OuterFrag - id - } - } - - fragment OuterFrag on Outer { - inner { - v { - x - } - } - } - }, - Parallel { - Flatten(path: "outer2") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v { - x - } - } - } - }, - }, - Flatten(path: "outer1") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v { - x - } - } - } - }, - }, - }, - }, - } - "# - ); - - // We very slighly modify the operation to add an artificial indirection within `IFrag`. - // This does not really change the query, and should result in the same plan, but - // ensure the code handle correctly such indirection. - assert_plan!( - &planner, - r#" - query { - outer1 { - ...OuterFrag - } - outer2 { - ...OuterFrag - } - } - - fragment OuterFrag on Outer { - ...IFrag - inner { - ...IFrag - } - } - - fragment IFrag on I { - ...IFragDelegate - } - - fragment IFragDelegate on I { - v { - x - } - } - "#, - @r#" - QueryPlan { - Sequence { - Fetch(service: "Subgraph2") { - { - outer1 { - __typename - ...OuterFrag - id - } - outer2 { - __typename - ...OuterFrag - id - } - } - - fragment OuterFrag on Outer { - inner { - v { - x - } - } - } - }, - Parallel { - Flatten(path: "outer2") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v { - x - } - } - } - }, - }, - Flatten(path: "outer1") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v { - x - } - } - } - }, - }, - }, - }, - } - "# - ); - - // The previous cases tests the cases where nothing in the `...IFrag` spread at the - // top-level of `OuterFrag` applied at all: it all gets eliminated in the plan. But - // in the schema of `Subgraph2`, while `Outer` does not implement `I` (and does not - // have `v` in particular), it does contains field `w` that `I` also have, so we - // add that field to `IFrag` and make sure we still correctly query that field. - - assert_plan!( - &planner, - r#" - query { - outer1 { - ...OuterFrag - } - outer2 { - ...OuterFrag - } - } - - fragment OuterFrag on Outer { - ...IFrag - inner { - ...IFrag - } - } - - fragment IFrag on I { - v { - x - } - w - } - "#, - @r#" - QueryPlan { - Sequence { - Fetch(service: "Subgraph2") { - { - outer1 { - __typename - ...OuterFrag - id - } - outer2 { - __typename - ...OuterFrag - id - } - } - - fragment OuterFrag on Outer { - w - inner { - v { - x - } - w - } - } - }, - Parallel { - Flatten(path: "outer2") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v { - x - } - } - } - }, - }, - Flatten(path: "outer1") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v { - x - } - } - } - }, - }, - }, - }, - } - "# - ); -} - -#[test] -fn it_handles_fragment_rebasing_in_a_subgraph_where_some_union_membership_relation_differs() { - // This test is similar to the subtyping case (it tests the same problems), but test the case - // of unions instead of interfaces. - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type V @shareable { - x: Int - } - - union U = Outer - - type Outer @key(fields: "id") { - id: ID! - v: Int - } - "#, - Subgraph2: r#" - type Query { - outer1: Outer - outer2: Outer - } - - union U = Inner - - type Inner { - v: Int - w: Int - } - - type Outer @key(fields: "id") { - id: ID! - inner: Inner - w: Int - } - "#, - ); - assert_plan!( - &planner, - r#" - query { - outer1 { - ...OuterFrag - } - outer2 { - ...OuterFrag - } - } - - fragment OuterFrag on Outer { - ...UFrag - inner { - ...UFrag - } - } - - fragment UFrag on U { - ... on Outer { - v - } - ... on Inner { - v - } - } - "#, - @r#" - QueryPlan { - Sequence { - Fetch(service: "Subgraph2") { - { - outer1 { - __typename - ...OuterFrag - id - } - outer2 { - __typename - ...OuterFrag - id - } - } - - fragment OuterFrag on Outer { - inner { - v - } - } - }, - Parallel { - Flatten(path: "outer2") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v - } - } - }, - }, - Flatten(path: "outer1") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v - } - } - }, - }, - }, - }, - } - "# - ); - - // We very slighly modify the operation to add an artificial indirection within `IFrag`. - // This does not really change the query, and should result in the same plan, but - // ensure the code handle correctly such indirection. - assert_plan!( - &planner, - r#" - query { - outer1 { - ...OuterFrag - } - outer2 { - ...OuterFrag - } - } - - fragment OuterFrag on Outer { - ...UFrag - inner { - ...UFrag - } - } - - fragment UFrag on U { - ...UFragDelegate - } - - fragment UFragDelegate on U { - ... on Outer { - v - } - ... on Inner { - v - } - } - "#, - @r#" - QueryPlan { - Sequence { - Fetch(service: "Subgraph2") { - { - outer1 { - __typename - ...OuterFrag - id - } - outer2 { - __typename - ...OuterFrag - id - } - } - - fragment OuterFrag on Outer { - inner { - v - } - } - }, - Parallel { - Flatten(path: "outer2") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v - } - } - }, - }, - Flatten(path: "outer1") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v - } - } - }, - }, - }, - }, - } - "# - ); - - // The previous cases tests the cases where nothing in the `...IFrag` spread at the - // top-level of `OuterFrag` applied at all: it all gets eliminated in the plan. But - // in the schema of `Subgraph2`, while `Outer` does not implement `I` (and does not - // have `v` in particular), it does contains field `w` that `I` also have, so we - // add that field to `IFrag` and make sure we still correctly query that field. - assert_plan!( - &planner, - r#" - query { - outer1 { - ...OuterFrag - } - outer2 { - ...OuterFrag - } - } - - fragment OuterFrag on Outer { - ...UFrag - inner { - ...UFrag - } - } - - fragment UFrag on U { - ... on Outer { - v - w - } - ... on Inner { - v - } - } - "#, - @r#" - QueryPlan { - Sequence { - Fetch(service: "Subgraph2") { - { - outer1 { - __typename - ...OuterFrag - id - } - outer2 { - __typename - ...OuterFrag - id - } - } - - fragment OuterFrag on Outer { - w - inner { - v - } - } - }, - Parallel { - Flatten(path: "outer2") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v - } - } - }, - }, - Flatten(path: "outer1") { - Fetch(service: "Subgraph1") { - { - ... on Outer { - __typename - id - } - } => - { - ... on Outer { - v - } - } - }, - }, - }, - }, - } - "# - ); -} - -#[test] -fn it_preserves_nested_fragments_when_outer_one_has_directives_and_is_eliminated() { - let planner = planner!( - config = reuse_fragments_config(), - Subgraph1: r#" - type Query { - t: T - } - - type T { - id: ID! - t1: V - t2: V - } - - type V { - v1: Int - v2: Int - } - "#, - ); - assert_plan!( - &planner, - r#" - query($test: Boolean!) { - t { - ...OnT @include(if: $test) - } - } - - fragment OnT on T { - t1 { - ...OnV - } - t2 { - ...OnV - } - } - - fragment OnV on V { - v1 - v2 - } - "#, - @r###" - QueryPlan { - Fetch(service: "Subgraph1") { - { - t { - ... on T @include(if: $test) { - t1 { - ...OnV - } - t2 { - ...OnV - } - } - } - } - - fragment OnV on V { - v1 - v2 - } - }, - } - "### - ); -} diff --git a/apollo-federation/tests/query_plan/supergraphs/inefficient_entity_fetches_to_same_subgraph.graphql b/apollo-federation/tests/query_plan/supergraphs/inefficient_entity_fetches_to_same_subgraph.graphql new file mode 100644 index 0000000000..8aaa3f274f --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/inefficient_entity_fetches_to_same_subgraph.graphql @@ -0,0 +1,97 @@ +# Composed from subgraphs with hash: b2221050efb89f6e4df71823675d2ea1fbe66a31 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) +{ + query: Query +} + +directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +interface I + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + v: V + w: Int @join__field(graph: SUBGRAPH2) +} + +type Inner implements I + @join__implements(graph: SUBGRAPH2, interface: "I") + @join__type(graph: SUBGRAPH2) +{ + v: V + w: Int +} + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") + SUBGRAPH2 @join__graph(name: "Subgraph2", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Outer implements I + @join__implements(graph: SUBGRAPH1, interface: "I") + @join__type(graph: SUBGRAPH1, key: "id") + @join__type(graph: SUBGRAPH2, key: "id") +{ + id: ID! + v: V @join__field(graph: SUBGRAPH1) + inner: Inner @join__field(graph: SUBGRAPH2) + w: Int @join__field(graph: SUBGRAPH2) +} + +type Query + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + outer1: Outer @join__field(graph: SUBGRAPH2) + outer2: Outer @join__field(graph: SUBGRAPH2) +} + +type V + @join__type(graph: SUBGRAPH1) + @join__type(graph: SUBGRAPH2) +{ + x: Int +} diff --git a/apollo-federation/tests/query_plan/supergraphs/it_expands_nested_fragments.graphql b/apollo-federation/tests/query_plan/supergraphs/it_expands_nested_fragments.graphql new file mode 100644 index 0000000000..0d1594dcca --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/it_expands_nested_fragments.graphql @@ -0,0 +1,75 @@ +# Composed from subgraphs with hash: af8642bd2cc335a2823e7c95f48ce005d3c809f0 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) +{ + query: Query +} + +directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPH1) +{ + t: T +} + +type T + @join__type(graph: SUBGRAPH1, key: "id") +{ + id: ID! + a: V + b: V +} + +type V + @join__type(graph: SUBGRAPH1) +{ + v1: Int + v2: Int +} diff --git a/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation_from_operation_with_fragments.graphql b/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation_from_operation_with_fragments.graphql new file mode 100644 index 0000000000..bf45161fb0 --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/it_handles_nested_fragment_generation_from_operation_with_fragments.graphql @@ -0,0 +1,102 @@ +# Composed from subgraphs with hash: 7cb80bbad99a03ca0bb30082bd6f9eb6f7c1beff +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) +{ + query: Query +} + +directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +type A1 implements Foo + @join__implements(graph: SUBGRAPH1, interface: "Foo") + @join__type(graph: SUBGRAPH1) +{ + foo: String + child: Foo + child2: Foo +} + +type A2 implements Foo + @join__implements(graph: SUBGRAPH1, interface: "Foo") + @join__type(graph: SUBGRAPH1) +{ + foo: String + child: Foo + child2: Foo +} + +type A3 implements Foo + @join__implements(graph: SUBGRAPH1, interface: "Foo") + @join__type(graph: SUBGRAPH1) +{ + foo: String + child: Foo + child2: Foo +} + +union Anything + @join__type(graph: SUBGRAPH1) + @join__unionMember(graph: SUBGRAPH1, member: "A1") + @join__unionMember(graph: SUBGRAPH1, member: "A2") + @join__unionMember(graph: SUBGRAPH1, member: "A3") + = A1 | A2 | A3 + +interface Foo + @join__type(graph: SUBGRAPH1) +{ + foo: String + child: Foo + child2: Foo +} + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPH1) +{ + a: Anything +} diff --git a/apollo-federation/tests/query_plan/supergraphs/it_preserves_directives.graphql b/apollo-federation/tests/query_plan/supergraphs/it_preserves_directives.graphql new file mode 100644 index 0000000000..95316d4353 --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/it_preserves_directives.graphql @@ -0,0 +1,68 @@ +# Composed from subgraphs with hash: 136ac120ab3c0a9b8ea4cb22cb440886a1b4a961 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) +{ + query: Query +} + +directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPH1) +{ + t: T +} + +type T + @join__type(graph: SUBGRAPH1, key: "id") +{ + id: ID! + a: Int + b: Int +} diff --git a/apollo-federation/tests/query_plan/supergraphs/it_preserves_directives_on_collapsed_fragments.graphql b/apollo-federation/tests/query_plan/supergraphs/it_preserves_directives_on_collapsed_fragments.graphql new file mode 100644 index 0000000000..7b9af26713 --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/it_preserves_directives_on_collapsed_fragments.graphql @@ -0,0 +1,75 @@ +# Composed from subgraphs with hash: fd162a5fc982fc2cd0a8d33e271831822b681137 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) +{ + query: Query +} + +directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: SUBGRAPH1) +{ + t: T +} + +type T + @join__type(graph: SUBGRAPH1) +{ + id: ID! + t1: V + t2: V +} + +type V + @join__type(graph: SUBGRAPH1) +{ + v1: Int + v2: Int +} diff --git a/apollo-federation/tests/query_plan/supergraphs/test_type_conditioned_fetching_with_interface_object_does_not_crash.graphql b/apollo-federation/tests/query_plan/supergraphs/test_type_conditioned_fetching_with_interface_object_does_not_crash.graphql new file mode 100644 index 0000000000..4a21fe7ba1 --- /dev/null +++ b/apollo-federation/tests/query_plan/supergraphs/test_type_conditioned_fetching_with_interface_object_does_not_crash.graphql @@ -0,0 +1,86 @@ +# Composed from subgraphs with hash: 161c48cab8f2c97bc5fef235b557994f82dc7e51 +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) +{ + query: Query +} + +directive @join__directive(graphs: [join__Graph!], name: String!, args: join__DirectiveArguments) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean, overrideLabel: String, contextArguments: [join__ContextArgument!]) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + +directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION + +directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + +type A implements I + @join__implements(graph: S2, interface: "I") + @join__type(graph: S2, key: "id") +{ + id: ID! + a: Int + t: T @join__field +} + +interface I + @join__type(graph: S1, key: "id", isInterfaceObject: true) + @join__type(graph: S2, key: "id") +{ + id: ID! + t: T @join__field(graph: S1) + a: Int @join__field(graph: S2) +} + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + S1 @join__graph(name: "S1", url: "none") + S2 @join__graph(name: "S2", url: "none") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query + @join__type(graph: S1) + @join__type(graph: S2) +{ + i: I @join__field(graph: S2) +} + +type T + @join__type(graph: S1) +{ + relatedIs: [I] +} diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index 0a0f627142..53757506a1 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.58.1" +version = "1.59.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index a4d9414020..3ce55f07d0 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.58.1" +version = "1.59.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.template.toml b/apollo-router-scaffold/templates/base/Cargo.template.toml index 3c6cbb58e9..7e188815b2 100644 --- a/apollo-router-scaffold/templates/base/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/Cargo.template.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.58.1" +apollo-router = "1.59.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml index 2d695c9daf..59e99a9f2e 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.template.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.58.1" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.59.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 2cdb3fa8f6..061e24593e 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.58.1" +version = "1.59.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" @@ -66,7 +66,7 @@ features = ["docs_rs"] access-json = "0.1.0" anyhow = "1.0.86" apollo-compiler.workspace = true -apollo-federation = { path = "../apollo-federation", version = "=1.58.1" } +apollo-federation = { path = "../apollo-federation", version = "=1.59.0" } arc-swap = "1.6.0" async-channel = "1.9.0" async-compression = { version = "0.4.6", features = [ @@ -212,6 +212,7 @@ serde_yaml = "0.8.26" static_assertions = "1.1.0" strum_macros = "0.26.0" sys-info = "0.9.1" +sysinfo = { version = "0.32.0", features = ["windows"] } thiserror = "1.0.61" tokio.workspace = true tokio-stream = { version = "0.1.15", features = ["sync", "net"] } @@ -240,7 +241,7 @@ tracing = "0.1.40" tracing-core = "0.1.32" tracing-futures = { version = "0.2.5", features = ["futures-03"] } tracing-subscriber = { version = "0.3.18", features = ["env-filter", "json"] } -url = { version = "2.5.2", features = ["serde"] } +url = { version = "2.5.4", features = ["serde"] } urlencoding = "2.1.3" uuid = { version = "1.9.1", features = ["serde", "v4"] } yaml-rust = "0.4.5" @@ -276,6 +277,7 @@ bytesize = { version = "1.3.0", features = ["serde"] } ahash = "0.8.11" itoa = "1.0.9" ryu = "1.0.15" +apollo-environment-detector = "0.1.0" [target.'cfg(macos)'.dependencies] uname = "0.1.1" diff --git a/apollo-router/build/main.rs b/apollo-router/build/main.rs index 763d894df0..b323c668bb 100644 --- a/apollo-router/build/main.rs +++ b/apollo-router/build/main.rs @@ -1,37 +1,5 @@ -use std::fs; -use std::path::PathBuf; - mod studio; fn main() -> Result<(), Box> { - let cargo_manifest: serde_json::Value = basic_toml::from_str( - &fs::read_to_string(PathBuf::from(&env!("CARGO_MANIFEST_DIR")).join("Cargo.toml")) - .expect("could not read Cargo.toml"), - ) - .expect("could not parse Cargo.toml"); - - let router_bridge = cargo_manifest - .get("dependencies") - .expect("Cargo.toml does not contain dependencies") - .as_object() - .expect("Cargo.toml dependencies key is not an object") - .get("router-bridge") - .expect("Cargo.toml dependencies does not have an entry for router-bridge"); - let router_bridge_version = router_bridge - .as_str() - .or_else(|| { - router_bridge - .as_object() - .and_then(|o| o.get("version")) - .and_then(|version| version.as_str()) - }) - .expect("router-bridge does not have a version"); - - let mut it = router_bridge_version.split('+'); - let _ = it.next(); - let fed_version = it.next().expect("invalid router-bridge version format"); - - println!("cargo:rustc-env=FEDERATION_VERSION={fed_version}"); - studio::main() } diff --git a/apollo-router/src/batching.rs b/apollo-router/src/batching.rs index a66aca8d87..daa7884d99 100644 --- a/apollo-router/src/batching.rs +++ b/apollo-router/src/batching.rs @@ -741,7 +741,7 @@ mod tests { // Extract info about this operation let (subgraph, count): (String, usize) = { - let re = regex::Regex::new(r"entry([AB])\(count:([0-9]+)\)").unwrap(); + let re = regex::Regex::new(r"entry([AB])\(count: ?([0-9]+)\)").unwrap(); let captures = re.captures(requests[0].query.as_ref().unwrap()).unwrap(); (captures[1].to_string(), captures[2].parse().unwrap()) @@ -757,7 +757,7 @@ mod tests { assert_eq!( request.query, Some(format!( - "query op{index}__{}__0{{entry{}(count:{count}){{index}}}}", + "query op{index}__{}__0 {{ entry{}(count: {count}) {{ index }} }}", subgraph.to_lowercase(), subgraph )) diff --git a/apollo-router/src/cache/storage.rs b/apollo-router/src/cache/storage.rs index 15f452ed28..4408d24c5c 100644 --- a/apollo-router/src/cache/storage.rs +++ b/apollo-router/src/cache/storage.rs @@ -170,10 +170,12 @@ where match res { Some(v) => { - tracing::info!( - monotonic_counter.apollo_router_cache_hit_count = 1u64, - kind = %self.caller, - storage = &tracing::field::display(CacheStorageName::Memory), + u64_counter!( + "apollo_router_cache_hit_count", + "Number of cache hits", + 1, + kind = self.caller, + storage = CacheStorageName::Memory.to_string() ); let duration = instant_memory.elapsed().as_secs_f64(); tracing::info!( @@ -190,10 +192,12 @@ where kind = %self.caller, storage = &tracing::field::display(CacheStorageName::Memory), ); - tracing::info!( - monotonic_counter.apollo_router_cache_miss_count = 1u64, - kind = %self.caller, - storage = &tracing::field::display(CacheStorageName::Memory), + u64_counter!( + "apollo_router_cache_miss_count", + "Number of cache misses", + 1, + kind = self.caller, + storage = CacheStorageName::Memory.to_string() ); let instant_redis = Instant::now(); @@ -214,10 +218,12 @@ where Some(v) => { self.insert_in_memory(key.clone(), v.0.clone()).await; - tracing::info!( - monotonic_counter.apollo_router_cache_hit_count = 1u64, - kind = %self.caller, - storage = &tracing::field::display(CacheStorageName::Redis), + u64_counter!( + "apollo_router_cache_hit_count", + "Number of cache hits", + 1, + kind = self.caller, + storage = CacheStorageName::Redis.to_string() ); let duration = instant_redis.elapsed().as_secs_f64(); tracing::info!( @@ -228,10 +234,12 @@ where Some(v.0) } None => { - tracing::info!( - monotonic_counter.apollo_router_cache_miss_count = 1u64, - kind = %self.caller, - storage = &tracing::field::display(CacheStorageName::Redis), + u64_counter!( + "apollo_router_cache_miss_count", + "Number of cache misses", + 1, + kind = self.caller, + storage = CacheStorageName::Redis.to_string() ); let duration = instant_redis.elapsed().as_secs_f64(); tracing::info!( diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 6ea8121a69..7241950fd7 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -565,6 +565,7 @@ impl InstrumentData { super::QueryPlannerMode::Both => "both", super::QueryPlannerMode::BothBestEffort => "both_best_effort", super::QueryPlannerMode::New => "new", + super::QueryPlannerMode::NewBestEffort => "new_best_effort", }; self.data.insert( diff --git a/apollo-router/src/configuration/migrations/0031-reuse-query-fragments.yaml b/apollo-router/src/configuration/migrations/0031-reuse-query-fragments.yaml new file mode 100644 index 0000000000..97ea457053 --- /dev/null +++ b/apollo-router/src/configuration/migrations/0031-reuse-query-fragments.yaml @@ -0,0 +1,6 @@ +description: supergraph.experimental_reuse_query_fragments is deprecated +actions: + - type: log + level: warn + path: supergraph.experimental_reuse_query_fragments + log: "'supergraph.experimental_reuse_query_fragments' is not supported by the native query planner and this configuration option will be removed in the next release. Use 'supergraph.generate_query_fragments' instead." \ No newline at end of file diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 50371fd5dd..8bdc97b52a 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -224,8 +224,11 @@ pub(crate) enum QueryPlannerMode { /// Falls back to `legacy` with a warning /// if the the new planner does not support the schema /// (such as using legacy Apollo Federation 1) - #[default] BothBestEffort, + /// Use the new Rust-based implementation but fall back to the legacy one + /// for supergraph schemas composed with legacy Apollo Federation 1. + #[default] + NewBestEffort, } impl<'de> serde::Deserialize<'de> for Configuration { @@ -430,7 +433,6 @@ impl Configuration { .unwrap_or(NonZeroU32::new(10_000).expect("it is not zero")); QueryPlannerConfig { - reuse_query_fragments: self.supergraph.reuse_query_fragments.unwrap_or(true), subgraph_graphql_validation: false, generate_query_fragments: self.supergraph.generate_query_fragments, incremental_delivery: QueryPlanIncrementalDeliveryConfig { @@ -438,7 +440,6 @@ impl Configuration { }, type_conditioned_fetching: self.experimental_type_conditioned_fetching, debug: QueryPlannerDebugConfig { - bypass_planner_for_single_subgraph: false, max_evaluated_plans, paths_limit: self.supergraph.query_planning.experimental_paths_limit, }, @@ -700,7 +701,8 @@ pub(crate) struct Supergraph { pub(crate) introspection: bool, /// Enable reuse of query fragments - /// Default: depends on the federation version + /// This feature is deprecated and will be removed in next release. + /// The config can only be set when the legacy query planner is explicitly enabled. #[serde(rename = "experimental_reuse_query_fragments")] pub(crate) reuse_query_fragments: Option, @@ -781,7 +783,8 @@ impl Supergraph { Some(false) } else { reuse_query_fragments } ), - generate_query_fragments: generate_query_fragments.unwrap_or_else(default_generate_query_fragments), + generate_query_fragments: generate_query_fragments + .unwrap_or_else(default_generate_query_fragments), early_cancel: early_cancel.unwrap_or_default(), experimental_log_on_broken_pipe: experimental_log_on_broken_pipe.unwrap_or_default(), } @@ -818,7 +821,8 @@ impl Supergraph { Some(false) } else { reuse_query_fragments } ), - generate_query_fragments: generate_query_fragments.unwrap_or_else(default_generate_query_fragments), + generate_query_fragments: generate_query_fragments + .unwrap_or_else(default_generate_query_fragments), early_cancel: early_cancel.unwrap_or_default(), experimental_log_on_broken_pipe: experimental_log_on_broken_pipe.unwrap_or_default(), } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__experimental_mode_metrics_2.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__experimental_mode_metrics_2.snap index 43cb1b8568..e976e8391d 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__experimental_mode_metrics_2.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__experimental_mode_metrics_2.snap @@ -7,4 +7,4 @@ expression: "&metrics.non_zero()" datapoints: - value: 1 attributes: - mode: both_best_effort + mode: new_best_effort diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 0dc3c12eab..4ae8457f17 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1,7 +1,6 @@ --- source: apollo-router/src/configuration/tests.rs expression: "&schema" -snapshot_kind: text --- { "$schema": "http://json-schema.org/draft-07/schema#", @@ -1267,6 +1266,9 @@ snapshot_kind: text "type": "object" }, "Conf5": { + "type": "object" + }, + "Conf6": { "anyOf": [ { "additionalProperties": { @@ -1278,7 +1280,7 @@ snapshot_kind: text ], "description": "Subgraph URL mappings" }, - "Conf6": { + "Conf7": { "additionalProperties": false, "description": "Configuration for the Rhai Plugin", "properties": { @@ -1295,7 +1297,7 @@ snapshot_kind: text }, "type": "object" }, - "Conf7": { + "Conf8": { "additionalProperties": false, "description": "Telemetry configuration", "properties": { @@ -1745,7 +1747,7 @@ snapshot_kind: text }, "client_name_header": { "default": "apollographql-client-name", - "description": "The name of the header to extract from requests when populating 'client nane' for traces and metrics in Apollo Studio.", + "description": "The name of the header to extract from requests when populating 'client name' for traces and metrics in Apollo Studio.", "nullable": true, "type": "string" }, @@ -4481,6 +4483,13 @@ snapshot_kind: text "both_best_effort" ], "type": "string" + }, + { + "description": "Use the new Rust-based implementation but fall back to the legacy one for supergraph schemas composed with legacy Apollo Federation 1.", + "enum": [ + "new_best_effort" + ], + "type": "string" } ] }, @@ -5741,6 +5750,11 @@ snapshot_kind: text "SubgraphAttributes": { "additionalProperties": false, "properties": { + "http.request.resend_count": { + "$ref": "#/definitions/StandardAttribute", + "description": "#/definitions/StandardAttribute", + "nullable": true + }, "subgraph.graphql.document": { "$ref": "#/definitions/StandardAttribute", "description": "#/definitions/StandardAttribute", @@ -6235,6 +6249,24 @@ snapshot_kind: text ], "type": "object" }, + { + "additionalProperties": false, + "properties": { + "default": { + "$ref": "#/definitions/AttributeValue", + "description": "#/definitions/AttributeValue", + "nullable": true + }, + "subgraph_resend_count": { + "description": "The subgraph http resend count", + "type": "boolean" + } + }, + "required": [ + "subgraph_resend_count" + ], + "type": "object" + }, { "additionalProperties": false, "properties": { @@ -6619,7 +6651,7 @@ snapshot_kind: text }, "experimental_reuse_query_fragments": { "default": null, - "description": "Enable reuse of query fragments Default: depends on the federation version", + "description": "Enable reuse of query fragments This feature is deprecated and will be removed in next release. The config can only be set when the legacy query planner is explicitly enabled.", "nullable": true, "type": "boolean" }, @@ -7753,6 +7785,11 @@ snapshot_kind: text "description": "#/definitions/conditional_attribute_apollo_router::plugins::telemetry::config_new::selectors::SubgraphSelector" }, "properties": { + "http.request.resend_count": { + "$ref": "#/definitions/StandardAttribute", + "description": "#/definitions/StandardAttribute", + "nullable": true + }, "subgraph.graphql.document": { "$ref": "#/definitions/StandardAttribute", "description": "#/definitions/StandardAttribute", @@ -7782,6 +7819,11 @@ snapshot_kind: text "description": "#/definitions/SubgraphSelector" }, "properties": { + "http.request.resend_count": { + "$ref": "#/definitions/StandardAttribute", + "description": "#/definitions/StandardAttribute", + "nullable": true + }, "subgraph.graphql.document": { "$ref": "#/definitions/StandardAttribute", "description": "#/definitions/StandardAttribute", @@ -8343,6 +8385,10 @@ snapshot_kind: text "description": "Type conditioned fetching configuration.", "type": "boolean" }, + "fleet_detector": { + "$ref": "#/definitions/Conf5", + "description": "#/definitions/Conf5" + }, "forbid_mutations": { "$ref": "#/definitions/ForbidMutationsConfig", "description": "#/definitions/ForbidMutationsConfig" @@ -8368,8 +8414,8 @@ snapshot_kind: text "description": "#/definitions/Config" }, "override_subgraph_url": { - "$ref": "#/definitions/Conf5", - "description": "#/definitions/Conf5" + "$ref": "#/definitions/Conf6", + "description": "#/definitions/Conf6" }, "persisted_queries": { "$ref": "#/definitions/PersistedQueries", @@ -8392,8 +8438,8 @@ snapshot_kind: text "description": "#/definitions/Config8" }, "rhai": { - "$ref": "#/definitions/Conf6", - "description": "#/definitions/Conf6" + "$ref": "#/definitions/Conf7", + "description": "#/definitions/Conf7" }, "sandbox": { "$ref": "#/definitions/Sandbox", @@ -8408,8 +8454,8 @@ snapshot_kind: text "description": "#/definitions/Supergraph" }, "telemetry": { - "$ref": "#/definitions/Conf7", - "description": "#/definitions/Conf7" + "$ref": "#/definitions/Conf8", + "description": "#/definitions/Conf8" }, "tls": { "$ref": "#/definitions/Tls", diff --git a/apollo-router/src/configuration/tests.rs b/apollo-router/src/configuration/tests.rs index 21cb5fdb50..4a93e496cc 100644 --- a/apollo-router/src/configuration/tests.rs +++ b/apollo-router/src/configuration/tests.rs @@ -1096,19 +1096,3 @@ fn find_struct_name(lines: &[&str], line_number: usize) -> Option { }) .next() } - -#[test] -fn it_prevents_reuse_and_generate_query_fragments_simultaneously() { - let conf = Configuration::builder() - .supergraph( - Supergraph::builder() - .generate_query_fragments(true) - .reuse_query_fragments(true) - .build(), - ) - .build() - .unwrap(); - - assert!(conf.supergraph.generate_query_fragments); - assert_eq!(conf.supergraph.reuse_query_fragments, Some(false)); -} diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index d78cd86728..850634be2b 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -316,8 +316,60 @@ pub(crate) enum QueryPlannerError { PoolProcessing(String), /// Federation error: {0} - // TODO: make `FederationError` serializable and store it as-is? - FederationError(String), + FederationError(FederationErrorBridge), +} + +impl From for QueryPlannerError { + fn from(value: FederationErrorBridge) -> Self { + Self::FederationError(value) + } +} + +/// A temporary error type used to extract a few variants from `apollo-federation`'s +/// `FederationError`. For backwards compatability, these other variant need to be extracted so +/// that the correct status code (GRAPHQL_VALIDATION_ERROR) can be added to the response. For +/// router 2.0, apollo-federation should split its error type into internal and external types. +/// When this happens, this temp type should be replaced with that type. +// TODO(@TylerBloom): See the comment above +#[derive(Error, Debug, Display, Clone, Serialize, Deserialize)] +pub(crate) enum FederationErrorBridge { + /// {0} + UnknownOperation(String), + /// {0} + OperationNameNotProvided(String), + /// {0} + Other(String), +} + +impl From for FederationErrorBridge { + fn from(value: FederationError) -> Self { + match &value { + err @ FederationError::SingleFederationError( + apollo_federation::error::SingleFederationError::UnknownOperation, + ) => Self::UnknownOperation(err.to_string()), + err @ FederationError::SingleFederationError( + apollo_federation::error::SingleFederationError::OperationNameNotProvided, + ) => Self::OperationNameNotProvided(err.to_string()), + err => Self::Other(err.to_string()), + } + } +} + +impl IntoGraphQLErrors for FederationErrorBridge { + fn into_graphql_errors(self) -> Result, Self> { + match self { + FederationErrorBridge::UnknownOperation(msg) => Ok(vec![Error::builder() + .message(msg) + .extension_code("GRAPHQL_VALIDATION_FAILED") + .build()]), + FederationErrorBridge::OperationNameNotProvided(msg) => Ok(vec![Error::builder() + .message(msg) + .extension_code("GRAPHQL_VALIDATION_FAILED") + .build()]), + // All other errors will be pushed on and be treated as internal server errors + err => Err(err), + } + } } impl IntoGraphQLErrors for Vec { @@ -408,6 +460,9 @@ impl IntoGraphQLErrors for QueryPlannerError { ); Ok(errors) } + QueryPlannerError::FederationError(err) => err + .into_graphql_errors() + .map_err(QueryPlannerError::FederationError), err => Err(err), } } @@ -574,7 +629,7 @@ pub(crate) enum SchemaError { /// GraphQL validation error: {0} Validate(ValidationErrors), /// Federation error: {0} - FederationError(apollo_federation::error::FederationError), + FederationError(FederationError), /// Api error(s): {0} #[from(ignore)] Api(String), diff --git a/apollo-router/src/executable.rs b/apollo-router/src/executable.rs index 86bdee162f..afe70ff552 100644 --- a/apollo-router/src/executable.rs +++ b/apollo-router/src/executable.rs @@ -1,6 +1,5 @@ //! Main entry point for CLI command to start server. -use std::cell::Cell; use std::env; use std::fmt::Debug; use std::net::SocketAddr; @@ -62,6 +61,7 @@ pub(crate) static mut DHAT_HEAP_PROFILER: OnceCell = OnceCell::n pub(crate) static mut DHAT_AD_HOC_PROFILER: OnceCell = OnceCell::new(); pub(crate) const APOLLO_ROUTER_DEV_ENV: &str = "APOLLO_ROUTER_DEV"; +pub(crate) const APOLLO_TELEMETRY_DISABLED: &str = "APOLLO_TELEMETRY_DISABLED"; // Note: Constructor/Destructor functions may not play nicely with tracing, since they run after // main completes, so don't use tracing, use println!() and eprintln!().. @@ -240,7 +240,7 @@ pub struct Opt { apollo_uplink_poll_interval: Duration, /// Disable sending anonymous usage information to Apollo. - #[clap(long, env = "APOLLO_TELEMETRY_DISABLED", value_parser = FalseyValueParser::new())] + #[clap(long, env = APOLLO_TELEMETRY_DISABLED, value_parser = FalseyValueParser::new())] anonymous_telemetry_disabled: bool, /// The timeout for an http call to Apollo uplink. Defaults to 30s. @@ -750,18 +750,11 @@ fn setup_panic_handler() { } else { tracing::error!("{}", e) } - if !USING_CATCH_UNWIND.get() { - // Once we've panic'ed the behaviour of the router is non-deterministic - // We've logged out the panic details. Terminate with an error code - std::process::exit(1); - } - })); -} -// TODO: once the Rust query planner does not use `todo!()` anymore, -// remove this and the use of `catch_unwind` to call it. -thread_local! { - pub(crate) static USING_CATCH_UNWIND: Cell = const { Cell::new(false) }; + // Once we've panic'ed the behaviour of the router is non-deterministic + // We've logged out the panic details. Terminate with an error code + std::process::exit(1); + })); } static COPIED: AtomicBool = AtomicBool::new(false); diff --git a/apollo-router/src/graphql/request.rs b/apollo-router/src/graphql/request.rs index 1e51262dbf..fc572f70cb 100644 --- a/apollo-router/src/graphql/request.rs +++ b/apollo-router/src/graphql/request.rs @@ -202,9 +202,11 @@ impl Request { mode = %BatchingMode::BatchHttpLink // Only supported mode right now ); - tracing::info!( - monotonic_counter.apollo.router.operations.batching = 1u64, - mode = %BatchingMode::BatchHttpLink // Only supported mode right now + u64_counter!( + "apollo.router.operations.batching", + "Total requests with batched operations", + 1, + mode = BatchingMode::BatchHttpLink.to_string() // Only supported mode right now ); for entry in value .as_array() @@ -229,9 +231,11 @@ impl Request { mode = "batch_http_link" // Only supported mode right now ); - tracing::info!( - monotonic_counter.apollo.router.operations.batching = 1u64, - mode = "batch_http_link" // Only supported mode right now + u64_counter!( + "apollo.router.operations.batching", + "Total requests with batched operations", + 1, + mode = BatchingMode::BatchHttpLink.to_string() // Only supported mode right now ); for entry in value .as_array() diff --git a/apollo-router/src/introspection.rs b/apollo-router/src/introspection.rs index b69b03c68b..20098af96b 100644 --- a/apollo-router/src/introspection.rs +++ b/apollo-router/src/introspection.rs @@ -55,10 +55,16 @@ impl IntrospectionCache { ) -> ControlFlow { Self::maybe_lone_root_typename(schema, doc)?; if doc.operation.is_query() { - if doc.has_explicit_root_fields && doc.has_schema_introspection { - ControlFlow::Break(Self::mixed_fields_error())?; + if doc.has_schema_introspection { + if doc.has_explicit_root_fields { + ControlFlow::Break(Self::mixed_fields_error())?; + } else { + ControlFlow::Break(self.cached_introspection(schema, key, doc).await)? + } } else if !doc.has_explicit_root_fields { - ControlFlow::Break(self.cached_introspection(schema, key, doc).await)? + // root __typename only, probably a small query + // Execute it without caching: + ControlFlow::Break(Self::execute_introspection(schema, doc))? } } ControlFlow::Continue(()) diff --git a/apollo-router/src/json_ext.rs b/apollo-router/src/json_ext.rs index 81423deb8b..7fe1f5e3f5 100644 --- a/apollo-router/src/json_ext.rs +++ b/apollo-router/src/json_ext.rs @@ -26,10 +26,26 @@ pub(crate) type Object = Map; const FRAGMENT_PREFIX: &str = "... on "; static TYPE_CONDITIONS_REGEX: Lazy = Lazy::new(|| { - Regex::new(r"(?:\|\[)(?.+?)(?:,\s*|)(?:\])") + Regex::new(r"\|\[(?.+?)?\]") .expect("this regex to check for type conditions is valid") }); +/// Extract the condition list from the regex captures. +fn extract_matched_conditions(caps: &Captures) -> TypeConditions { + caps.name("condition") + .map(|c| c.as_str().split(',').map(|s| s.to_string()).collect()) + .unwrap_or_default() +} + +fn split_path_element_and_type_conditions(s: &str) -> (String, Option) { + let mut type_conditions = None; + let path_element = TYPE_CONDITIONS_REGEX.replace(s, |caps: &Captures| { + type_conditions = Some(extract_matched_conditions(caps)); + "" + }); + (path_element.to_string(), type_conditions) +} + macro_rules! extract_key_value_from_object { ($object:expr, $key:literal, $pattern:pat => $var:ident) => {{ match $object.remove($key) { @@ -842,7 +858,7 @@ impl<'de> serde::de::Visitor<'de> for FlattenVisitor { fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!( formatter, - "a string that is '@', potentially preceded of followed by type conditions" + "a string that is '@', potentially followed by type conditions" ) } @@ -850,23 +866,9 @@ impl<'de> serde::de::Visitor<'de> for FlattenVisitor { where E: serde::de::Error, { - let mut type_conditions: Vec = Vec::new(); - let path = TYPE_CONDITIONS_REGEX.replace(s, |caps: &Captures| { - type_conditions.extend( - caps.name("condition") - .map(|c| { - c.as_str() - .split(',') - .map(|s| s.to_string()) - .collect::>() - }) - .unwrap_or_default(), - ); - "" - }); - - if path == "@" { - Ok((!type_conditions.is_empty()).then_some(type_conditions)) + let (path_element, type_conditions) = split_path_element_and_type_conditions(s); + if path_element == "@" { + Ok(type_conditions) } else { Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(s), @@ -884,11 +886,7 @@ where S: serde::Serializer, { let tc_string = if let Some(c) = type_conditions { - if !c.is_empty() { - format!("|[{}]", c.join(",")) - } else { - "".to_string() - } + format!("|[{}]", c.join(",")) } else { "".to_string() }; @@ -911,7 +909,7 @@ impl<'de> serde::de::Visitor<'de> for KeyVisitor { fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!( formatter, - "a string, potentially preceded of followed by type conditions" + "a string, potentially followed by type conditions" ) } @@ -919,21 +917,7 @@ impl<'de> serde::de::Visitor<'de> for KeyVisitor { where E: serde::de::Error, { - let mut type_conditions = Vec::new(); - let key = TYPE_CONDITIONS_REGEX.replace(s, |caps: &Captures| { - type_conditions.extend( - caps.extract::<1>() - .1 - .map(|s| s.split(',').map(|s| s.to_string())) - .into_iter() - .flatten(), - ); - "" - }); - Ok(( - key.to_string(), - (!type_conditions.is_empty()).then_some(type_conditions), - )) + Ok(split_path_element_and_type_conditions(s)) } } @@ -946,11 +930,7 @@ where S: serde::Serializer, { let tc_string = if let Some(c) = type_conditions { - if !c.is_empty() { - format!("|[{}]", c.join(",")) - } else { - "".to_string() - } + format!("|[{}]", c.join(",")) } else { "".to_string() }; @@ -992,43 +972,16 @@ where } fn flatten_from_str(s: &str) -> Result { - let mut type_conditions = Vec::new(); - let path = TYPE_CONDITIONS_REGEX.replace(s, |caps: &Captures| { - type_conditions.extend( - caps.extract::<1>() - .1 - .map(|s| s.split(',').map(|s| s.to_string())) - .into_iter() - .flatten(), - ); - "" - }); - - if path != "@" { + let (path_element, type_conditions) = split_path_element_and_type_conditions(s); + if path_element != "@" { return Err("invalid flatten".to_string()); } - Ok(PathElement::Flatten( - (!type_conditions.is_empty()).then_some(type_conditions), - )) + Ok(PathElement::Flatten(type_conditions)) } fn key_from_str(s: &str) -> Result { - let mut type_conditions = Vec::new(); - let key = TYPE_CONDITIONS_REGEX.replace(s, |caps: &Captures| { - type_conditions.extend( - caps.extract::<1>() - .1 - .map(|s| s.split(',').map(|s| s.to_string())) - .into_iter() - .flatten(), - ); - "" - }); - - Ok(PathElement::Key( - key.to_string(), - (!type_conditions.is_empty()).then_some(type_conditions), - )) + let (key, type_conditions) = split_path_element_and_type_conditions(s); + Ok(PathElement::Key(key, type_conditions)) } /// A path into the result document. @@ -1119,9 +1072,7 @@ impl Path { PathElement::Key(key, type_conditions) => { let mut tc = String::new(); if let Some(c) = type_conditions { - if !c.is_empty() { - tc = format!("|[{}]", c.join(",")); - } + tc = format!("|[{}]", c.join(",")); }; Some(format!("{}{}", key, tc)) } @@ -1191,17 +1142,13 @@ impl fmt::Display for Path { PathElement::Key(key, type_conditions) => { write!(f, "{key}")?; if let Some(c) = type_conditions { - if !c.is_empty() { - write!(f, "|[{}]", c.join(","))?; - } + write!(f, "|[{}]", c.join(","))?; }; } PathElement::Flatten(type_conditions) => { write!(f, "@")?; if let Some(c) = type_conditions { - if !c.is_empty() { - write!(f, "|[{}]", c.join(","))?; - } + write!(f, "|[{}]", c.join(","))?; }; } PathElement::Fragment(name) => { diff --git a/apollo-router/src/lib.rs b/apollo-router/src/lib.rs index 6f30ab6239..cc39b79aea 100644 --- a/apollo-router/src/lib.rs +++ b/apollo-router/src/lib.rs @@ -116,10 +116,10 @@ pub mod _private { pub use crate::plugin::PluginFactory; pub use crate::plugin::PLUGINS; // For comparison/fuzzing - pub use crate::query_planner::bridge_query_planner::render_diff; pub use crate::query_planner::bridge_query_planner::QueryPlanResult; - pub use crate::query_planner::dual_query_planner::diff_plan; - pub use crate::query_planner::dual_query_planner::plan_matches; + pub use crate::query_planner::plan_compare::diff_plan; + pub use crate::query_planner::plan_compare::plan_matches; + pub use crate::query_planner::plan_compare::render_diff; // For tests pub use crate::router_factory::create_test_service_factory_from_yaml; } diff --git a/apollo-router/src/metrics/filter.rs b/apollo-router/src/metrics/filter.rs index fa6dad38df..6f30335794 100644 --- a/apollo-router/src/metrics/filter.rs +++ b/apollo-router/src/metrics/filter.rs @@ -94,7 +94,7 @@ impl FilterMeterProvider { .delegate(delegate) .allow( Regex::new( - r"apollo\.(graphos\.cloud|router\.(operations?|lifecycle|config|schema|query|query_planning|telemetry))(\..*|$)|apollo_router_uplink_fetch_count_total|apollo_router_uplink_fetch_duration_seconds", + r"apollo\.(graphos\.cloud|router\.(operations?|lifecycle|config|schema|query|query_planning|telemetry|instance))(\..*|$)|apollo_router_uplink_fetch_count_total|apollo_router_uplink_fetch_duration_seconds", ) .expect("regex should have been valid"), ) @@ -105,7 +105,7 @@ impl FilterMeterProvider { FilterMeterProvider::builder() .delegate(delegate) .deny( - Regex::new(r"apollo\.router\.(config|entities)(\..*|$)") + Regex::new(r"apollo\.router\.(config|entities|instance|operations\.(fetch|request_size|response_size))(\..*|$)") .expect("regex should have been valid"), ) .build() @@ -244,7 +244,6 @@ impl opentelemetry::metrics::MeterProvider for FilterMeterProvider { #[cfg(test)] mod test { - use opentelemetry::metrics::MeterProvider; use opentelemetry::metrics::Unit; use opentelemetry::runtime; diff --git a/apollo-router/src/metrics/mod.rs b/apollo-router/src/metrics/mod.rs index e24317cd06..1cb90bc2ae 100644 --- a/apollo-router/src/metrics/mod.rs +++ b/apollo-router/src/metrics/mod.rs @@ -496,7 +496,9 @@ pub(crate) fn meter_provider() -> AggregateMeterProvider { } #[macro_export] -/// Get or create a u64 monotonic counter metric and add a value to it +/// Get or create a `u64` monotonic counter metric and add a value to it. +/// +/// Each metric needs a description. /// /// This macro is a replacement for the telemetry crate's MetricsLayer. We will eventually convert all metrics to use these macros and deprecate the MetricsLayer. /// The reason for this is that the MetricsLayer has: @@ -506,6 +508,34 @@ pub(crate) fn meter_provider() -> AggregateMeterProvider { /// * Imperfect mapping to metrics API that can only be checked at runtime. /// /// New metrics should be added using these macros. +/// +/// # Examples +/// ```ignore +/// // Count a thing: +/// u64_counter!( +/// "apollo.router.operations.frobbles", +/// "The amount of frobbles we've operated on", +/// 1 +/// ); +/// // Count a thing with attributes: +/// u64_counter!( +/// "apollo.router.operations.frobbles", +/// "The amount of frobbles we've operated on", +/// 1, +/// frobbles.color = "blue" +/// ); +/// // Count a thing with dynamic attributes: +/// let attributes = vec![]; +/// if (frobbled) { +/// attributes.push(opentelemetry::KeyValue::new("frobbles.color".to_string(), "blue".into())); +/// } +/// u64_counter!( +/// "apollo.router.operations.frobbles", +/// "The amount of frobbles we've operated on", +/// 1, +/// attributes +/// ); +/// ``` #[allow(unused_macros)] macro_rules! u64_counter { ($($name:ident).+, $description:literal, $value: expr, $($attr_key:literal = $attr_value:expr),+) => { @@ -910,6 +940,11 @@ macro_rules! assert_counter { assert_metric!(result, $name, Some($value.into()), None, &attributes); }; + ($name:literal, $value: expr, $attributes: expr) => { + let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Counter, $value, $attributes); + assert_metric!(result, $name, Some($value.into()), None, &$attributes); + }; + ($name:literal, $value: expr) => { let result = crate::metrics::collect_metrics().assert($name, crate::metrics::test_utils::MetricType::Counter, $value, &[]); assert_metric!(result, $name, Some($value.into()), None, &[]); @@ -1180,6 +1215,7 @@ mod test { let attributes = vec![KeyValue::new("attr", "val")]; u64_counter!("test", "test description", 1, attributes); assert_counter!("test", 1, "attr" = "val"); + assert_counter!("test", 1, &attributes); } #[test] diff --git a/apollo-router/src/notification.rs b/apollo-router/src/notification.rs index 7cfba87e7a..ac1f33645d 100644 --- a/apollo-router/src/notification.rs +++ b/apollo-router/src/notification.rs @@ -510,7 +510,11 @@ where match Pin::new(&mut this.msg_receiver).poll_next(cx) { Poll::Ready(Some(Err(BroadcastStreamRecvError::Lagged(_)))) => { - tracing::info!(monotonic_counter.apollo_router_skipped_event_count = 1u64,); + u64_counter!( + "apollo_router_skipped_event_count", + "Amount of events dropped from the internal message queue", + 1u64 + ); self.poll_next(cx) } Poll::Ready(None) => Poll::Ready(None), diff --git a/apollo-router/src/plugin/mod.rs b/apollo-router/src/plugin/mod.rs index 3354868491..9479e7f91a 100644 --- a/apollo-router/src/plugin/mod.rs +++ b/apollo-router/src/plugin/mod.rs @@ -77,6 +77,9 @@ pub struct PluginInit { /// The parsed subgraph schemas from the query planner, keyed by subgraph name pub(crate) subgraph_schemas: Arc, + /// Launch ID + pub(crate) launch_id: Option>, + pub(crate) notify: Notify, } @@ -137,6 +140,7 @@ where .supergraph_schema_id(crate::spec::Schema::schema_id(&supergraph_sdl).into()) .supergraph_sdl(supergraph_sdl) .supergraph_schema(supergraph_schema) + .launch_id(Arc::new("launch_id".to_string())) .notify(Notify::for_tests()) .build() } @@ -173,6 +177,7 @@ where supergraph_schema_id: Arc, supergraph_schema: Arc>, subgraph_schemas: Option>, + launch_id: Option>>, notify: Notify, ) -> Self { PluginInit { @@ -181,6 +186,7 @@ where supergraph_schema_id, supergraph_schema, subgraph_schemas: subgraph_schemas.unwrap_or_default(), + launch_id: launch_id.flatten(), notify, } } @@ -196,6 +202,7 @@ where supergraph_schema_id: Arc, supergraph_schema: Arc>, subgraph_schemas: Option>, + launch_id: Option>, notify: Notify, ) -> Result { let config: T = serde_json::from_value(config)?; @@ -205,6 +212,7 @@ where supergraph_schema, supergraph_schema_id, subgraph_schemas: subgraph_schemas.unwrap_or_default(), + launch_id, notify, }) } @@ -217,6 +225,7 @@ where supergraph_schema_id: Option>, supergraph_schema: Option>>, subgraph_schemas: Option>, + launch_id: Option>, notify: Option>, ) -> Self { PluginInit { @@ -226,6 +235,7 @@ where supergraph_schema: supergraph_schema .unwrap_or_else(|| Arc::new(Valid::assume_valid(Schema::new()))), subgraph_schemas: subgraph_schemas.unwrap_or_default(), + launch_id, notify: notify.unwrap_or_else(Notify::for_tests), } } @@ -630,6 +640,9 @@ pub(crate) trait PluginPrivate: Send + Sync + 'static { fn web_endpoints(&self) -> MultiMap { MultiMap::new() } + + /// The point of no return this plugin is about to go live + fn activate(&self) {} } #[async_trait] @@ -677,6 +690,8 @@ where fn web_endpoints(&self) -> MultiMap { PluginUnstable::web_endpoints(self) } + + fn activate(&self) {} } fn get_type_of(_: &T) -> &'static str { @@ -733,6 +748,9 @@ pub(crate) trait DynPlugin: Send + Sync + 'static { /// Support downcasting #[cfg(test)] fn as_any_mut(&mut self) -> &mut dyn std::any::Any; + + /// The point of no return, this plugin is about to go live + fn activate(&self) {} } #[async_trait] @@ -783,6 +801,19 @@ where fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } + + fn activate(&self) { + self.activate() + } +} + +impl From for Box +where + T: PluginPrivate, +{ + fn from(value: T) -> Self { + Box::new(value) + } } /// Register a plugin with a group and a name diff --git a/apollo-router/src/plugins/authentication/mod.rs b/apollo-router/src/plugins/authentication/mod.rs index 9f9abc8040..0239e1f005 100644 --- a/apollo-router/src/plugins/authentication/mod.rs +++ b/apollo-router/src/plugins/authentication/mod.rs @@ -539,8 +539,6 @@ fn authenticate( jwks_manager: &JwksManager, request: router::Request, ) -> ControlFlow { - const AUTHENTICATION_KIND: &str = "JWT"; - // We are going to do a lot of similar checking so let's define a local function // to help reduce repetition fn failure_message( @@ -549,17 +547,16 @@ fn authenticate( status: StatusCode, ) -> ControlFlow { // This is a metric and will not appear in the logs - tracing::info!( - monotonic_counter.apollo_authentication_failure_count = 1u64, - kind = %AUTHENTICATION_KIND + u64_counter!( + "apollo_authentication_failure_count", + "Number of requests with failed JWT authentication (deprecated)", + 1, + kind = "JWT" ); - tracing::info!( - monotonic_counter - .apollo - .router - .operations - .authentication - .jwt = 1, + u64_counter!( + "apollo.router.operations.authentication.jwt", + "Number of requests with JWT authentication", + 1, authentication.jwt.failed = true ); tracing::info!(message = %error, "jwt authentication failure"); @@ -662,11 +659,17 @@ fn authenticate( ); } // This is a metric and will not appear in the logs - tracing::info!( - monotonic_counter.apollo_authentication_success_count = 1u64, - kind = %AUTHENTICATION_KIND + u64_counter!( + "apollo_authentication_success_count", + "Number of requests with successful JWT authentication (deprecated)", + 1, + kind = "JWT" + ); + u64_counter!( + "apollo.router.operations.jwt", + "Number of requests with JWT authentication", + 1 ); - tracing::info!(monotonic_counter.apollo.router.operations.jwt = 1u64); return ControlFlow::Continue(request); } diff --git a/apollo-router/src/plugins/authentication/subgraph.rs b/apollo-router/src/plugins/authentication/subgraph.rs index 568aa9c8ac..4a0bcd4d65 100644 --- a/apollo-router/src/plugins/authentication/subgraph.rs +++ b/apollo-router/src/plugins/authentication/subgraph.rs @@ -409,17 +409,21 @@ impl SigningParamsConfig { } fn increment_success_counter(subgraph_name: &str) { - tracing::info!( - monotonic_counter.apollo.router.operations.authentication.aws.sigv4 = 1u64, + u64_counter!( + "apollo.router.operations.authentication.aws.sigv4", + "Number of subgraph requests signed with AWS SigV4", + 1, authentication.aws.sigv4.failed = false, - subgraph.service.name = %subgraph_name, + subgraph.service.name = subgraph_name.to_string() ); } fn increment_failure_counter(subgraph_name: &str) { - tracing::info!( - monotonic_counter.apollo.router.operations.authentication.aws.sigv4 = 1u64, + u64_counter!( + "apollo.router.operations.authentication.aws.sigv4", + "Number of subgraph requests signed with AWS SigV4", + 1, authentication.aws.sigv4.failed = true, - subgraph.service.name = %subgraph_name, + subgraph.service.name = subgraph_name.to_string() ); } diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs index 331641a726..63b2062a05 100644 --- a/apollo-router/src/plugins/authorization/mod.rs +++ b/apollo-router/src/plugins/authorization/mod.rs @@ -556,8 +556,10 @@ impl Plugin for AuthorizationPlugin { Ok(ControlFlow::Continue(request)) } else { // This is a metric and will not appear in the logs - tracing::info!( - monotonic_counter.apollo_require_authentication_failure_count = 1u64, + u64_counter!( + "apollo_require_authentication_failure_count", + "Number of unauthenticated requests (deprecated)", + 1 ); tracing::error!("rejecting unauthenticated request"); let response = supergraph::Response::error_builder() @@ -588,11 +590,13 @@ impl Plugin for AuthorizationPlugin { let needs_requires_scopes = request.context.contains_key(REQUIRED_SCOPES_KEY); if needs_authenticated || needs_requires_scopes { - tracing::info!( - monotonic_counter.apollo.router.operations.authorization = 1u64, + u64_counter!( + "apollo.router.operations.authorization", + "Number of subgraph requests requiring authorization", + 1, authorization.filtered = filtered, authorization.needs_authenticated = needs_authenticated, - authorization.needs_requires_scopes = needs_requires_scopes, + authorization.needs_requires_scopes = needs_requires_scopes ); } diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert-5.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert-5.snap index dd1ee738c2..ed078770d9 100644 --- a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert-5.snap +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert-5.snap @@ -4,12 +4,12 @@ expression: cache_keys --- [ { - "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:d0b09a1a50750b5e95f73a196acf6ef5a8d60bf19599854b0dbee5dec6ee7ed6:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", + "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:ab9056ba140750aa8fe58360172b450fa717e7ea177e4a3c9426fe1291a88da2:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", "status": "cached", "cache_control": "public" }, { - "key": "version:1.0:subgraph:user:type:Query:hash:a3b7f56680be04e3ae646cf8a025aed165e8dd0f6c3dc7c95d745f8cb1348083:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", + "key": "version:1.0:subgraph:user:type:Query:hash:0d4d253b049bbea514a54a892902fa4b9b658aedc9b8f2a1308323cdeef3c0ca:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", "status": "cached", "cache_control": "public" } diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert.snap index 1375808b78..a49a81580f 100644 --- a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert.snap +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__insert.snap @@ -4,12 +4,12 @@ expression: cache_keys --- [ { - "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:d0b09a1a50750b5e95f73a196acf6ef5a8d60bf19599854b0dbee5dec6ee7ed6:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", + "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:ab9056ba140750aa8fe58360172b450fa717e7ea177e4a3c9426fe1291a88da2:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", "status": "new", "cache_control": "public" }, { - "key": "version:1.0:subgraph:user:type:Query:hash:a3b7f56680be04e3ae646cf8a025aed165e8dd0f6c3dc7c95d745f8cb1348083:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", + "key": "version:1.0:subgraph:user:type:Query:hash:0d4d253b049bbea514a54a892902fa4b9b658aedc9b8f2a1308323cdeef3c0ca:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", "status": "new", "cache_control": "public" } diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-3.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-3.snap index 1322b59275..fbe291783c 100644 --- a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-3.snap +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data-3.snap @@ -4,12 +4,12 @@ expression: cache_keys --- [ { - "key": "version:1.0:subgraph:orga:type:Organization:entity:5221ff42b311b757445c096c023cee4fefab5de49735e421c494f1119326317b:hash:cffb47a84aff0aea6a447e33caf3b275bdc7f71689d75f56647242b3b9f5e13b:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", + "key": "version:1.0:subgraph:orga:type:Organization:entity:5221ff42b311b757445c096c023cee4fefab5de49735e421c494f1119326317b:hash:4913f52405bb614177e7c718d43da695c2f0e7411707c2f77f1c62380153c8d8:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", "status": "cached", "cache_control": "[REDACTED]" }, { - "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:cffb47a84aff0aea6a447e33caf3b275bdc7f71689d75f56647242b3b9f5e13b:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", + "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:4913f52405bb614177e7c718d43da695c2f0e7411707c2f77f1c62380153c8d8:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", "status": "cached", "cache_control": "[REDACTED]" } diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap index 87c750131f..d32bd8453c 100644 --- a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__no_data.snap @@ -4,12 +4,12 @@ expression: cache_keys --- [ { - "key": "version:1.0:subgraph:orga:type:Organization:entity:5221ff42b311b757445c096c023cee4fefab5de49735e421c494f1119326317b:hash:cffb47a84aff0aea6a447e33caf3b275bdc7f71689d75f56647242b3b9f5e13b:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", + "key": "version:1.0:subgraph:orga:type:Organization:entity:5221ff42b311b757445c096c023cee4fefab5de49735e421c494f1119326317b:hash:4913f52405bb614177e7c718d43da695c2f0e7411707c2f77f1c62380153c8d8:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", "status": "new", "cache_control": "[REDACTED]" }, { - "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:cffb47a84aff0aea6a447e33caf3b275bdc7f71689d75f56647242b3b9f5e13b:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", + "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:4913f52405bb614177e7c718d43da695c2f0e7411707c2f77f1c62380153c8d8:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", "status": "new", "cache_control": "[REDACTED]" } diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private-3.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private-3.snap index c9839a8823..76fd27f7fa 100644 --- a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private-3.snap +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private-3.snap @@ -4,12 +4,12 @@ expression: cache_keys --- [ { - "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:d0b09a1a50750b5e95f73a196acf6ef5a8d60bf19599854b0dbee5dec6ee7ed6:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c:03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4", + "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:ab9056ba140750aa8fe58360172b450fa717e7ea177e4a3c9426fe1291a88da2:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c:03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4", "status": "cached", "cache_control": "private" }, { - "key": "version:1.0:subgraph:user:type:Query:hash:a3b7f56680be04e3ae646cf8a025aed165e8dd0f6c3dc7c95d745f8cb1348083:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c:03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4", + "key": "version:1.0:subgraph:user:type:Query:hash:0d4d253b049bbea514a54a892902fa4b9b658aedc9b8f2a1308323cdeef3c0ca:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c:03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4", "status": "cached", "cache_control": "private" } diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private-5.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private-5.snap index c9839a8823..76fd27f7fa 100644 --- a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private-5.snap +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private-5.snap @@ -4,12 +4,12 @@ expression: cache_keys --- [ { - "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:d0b09a1a50750b5e95f73a196acf6ef5a8d60bf19599854b0dbee5dec6ee7ed6:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c:03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4", + "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:ab9056ba140750aa8fe58360172b450fa717e7ea177e4a3c9426fe1291a88da2:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c:03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4", "status": "cached", "cache_control": "private" }, { - "key": "version:1.0:subgraph:user:type:Query:hash:a3b7f56680be04e3ae646cf8a025aed165e8dd0f6c3dc7c95d745f8cb1348083:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c:03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4", + "key": "version:1.0:subgraph:user:type:Query:hash:0d4d253b049bbea514a54a892902fa4b9b658aedc9b8f2a1308323cdeef3c0ca:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c:03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4", "status": "cached", "cache_control": "private" } diff --git a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private.snap b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private.snap index 69027e4644..65da6044f9 100644 --- a/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private.snap +++ b/apollo-router/src/plugins/cache/snapshots/apollo_router__plugins__cache__tests__private.snap @@ -4,12 +4,12 @@ expression: cache_keys --- [ { - "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:d0b09a1a50750b5e95f73a196acf6ef5a8d60bf19599854b0dbee5dec6ee7ed6:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", + "key": "version:1.0:subgraph:orga:type:Organization:entity:5811967f540d300d249ab30ae681359a7815fdb5d3dc71a94be1d491006a6b27:hash:ab9056ba140750aa8fe58360172b450fa717e7ea177e4a3c9426fe1291a88da2:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c", "status": "new", "cache_control": "private" }, { - "key": "version:1.0:subgraph:user:type:Query:hash:a3b7f56680be04e3ae646cf8a025aed165e8dd0f6c3dc7c95d745f8cb1348083:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c:03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4", + "key": "version:1.0:subgraph:user:type:Query:hash:0d4d253b049bbea514a54a892902fa4b9b658aedc9b8f2a1308323cdeef3c0ca:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c:03ac674216f3e15c761ee1a5e255f067953623c8b388b4459e13f978d7c846f4", "status": "new", "cache_control": "private" } diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs b/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs index cf819478e1..0b3c6738ae 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/directives.rs @@ -1,114 +1,20 @@ use ahash::HashMap; use ahash::HashMapExt; use ahash::HashSet; -use apollo_compiler::ast::DirectiveList; use apollo_compiler::ast::FieldDefinition; -use apollo_compiler::ast::InputValueDefinition; use apollo_compiler::ast::NamedType; use apollo_compiler::executable::Field; use apollo_compiler::executable::SelectionSet; -use apollo_compiler::name; use apollo_compiler::parser::Parser; -use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; -use apollo_compiler::Name; use apollo_compiler::Schema; -use apollo_federation::link::spec::APOLLO_SPEC_DOMAIN; -use apollo_federation::link::Link; +use apollo_federation::link::cost_spec_definition::ListSizeDirective as ParsedListSizeDirective; use tower::BoxError; use crate::json_ext::Object; use crate::json_ext::ValueExt; use crate::plugins::demand_control::DemandControlError; -const COST_DIRECTIVE_NAME: Name = name!("cost"); -const COST_DIRECTIVE_DEFAULT_NAME: Name = name!("federation__cost"); -const COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME: Name = name!("weight"); - -const LIST_SIZE_DIRECTIVE_NAME: Name = name!("listSize"); -const LIST_SIZE_DIRECTIVE_DEFAULT_NAME: Name = name!("federation__listSize"); -const LIST_SIZE_DIRECTIVE_ASSUMED_SIZE_ARGUMENT_NAME: Name = name!("assumedSize"); -const LIST_SIZE_DIRECTIVE_SLICING_ARGUMENTS_ARGUMENT_NAME: Name = name!("slicingArguments"); -const LIST_SIZE_DIRECTIVE_SIZED_FIELDS_ARGUMENT_NAME: Name = name!("sizedFields"); -const LIST_SIZE_DIRECTIVE_REQUIRE_ONE_SLICING_ARGUMENT_ARGUMENT_NAME: Name = - name!("requireOneSlicingArgument"); - -pub(in crate::plugins::demand_control) fn get_apollo_directive_names( - schema: &Schema, -) -> HashMap { - let mut hm: HashMap = HashMap::new(); - for directive in &schema.schema_definition.directives { - if directive.name.as_str() == "link" { - if let Ok(link) = Link::from_directive_application(directive) { - if link.url.identity.domain != APOLLO_SPEC_DOMAIN { - continue; - } - for import in link.imports { - hm.insert(import.element.clone(), import.imported_name().clone()); - } - } - } - } - hm -} - -pub(in crate::plugins::demand_control) struct CostDirective { - weight: i32, -} - -impl CostDirective { - pub(in crate::plugins::demand_control) fn weight(&self) -> f64 { - self.weight as f64 - } - - pub(in crate::plugins::demand_control) fn from_argument( - directive_name_map: &HashMap, - argument: &InputValueDefinition, - ) -> Option { - Self::from_directives(directive_name_map, &argument.directives) - } - - pub(in crate::plugins::demand_control) fn from_field( - directive_name_map: &HashMap, - field: &FieldDefinition, - ) -> Option { - Self::from_directives(directive_name_map, &field.directives) - } - - pub(in crate::plugins::demand_control) fn from_type( - directive_name_map: &HashMap, - ty: &ExtendedType, - ) -> Option { - Self::from_schema_directives(directive_name_map, ty.directives()) - } - - fn from_directives( - directive_name_map: &HashMap, - directives: &DirectiveList, - ) -> Option { - directive_name_map - .get(&COST_DIRECTIVE_NAME) - .and_then(|name| directives.get(name)) - .or(directives.get(&COST_DIRECTIVE_DEFAULT_NAME)) - .and_then(|cost| cost.specified_argument_by_name(&COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME)) - .and_then(|weight| weight.to_i32()) - .map(|weight| Self { weight }) - } - - pub(in crate::plugins::demand_control) fn from_schema_directives( - directive_name_map: &HashMap, - directives: &apollo_compiler::schema::DirectiveList, - ) -> Option { - directive_name_map - .get(&COST_DIRECTIVE_NAME) - .and_then(|name| directives.get(name)) - .or(directives.get(&COST_DIRECTIVE_DEFAULT_NAME)) - .and_then(|cost| cost.specified_argument_by_name(&COST_DIRECTIVE_WEIGHT_ARGUMENT_NAME)) - .and_then(|weight| weight.to_i32()) - .map(|weight| Self { weight }) - } -} - pub(in crate::plugins::demand_control) struct IncludeDirective { pub(in crate::plugins::demand_control) is_included: bool, } @@ -134,86 +40,13 @@ pub(in crate::plugins::demand_control) struct ListSizeDirective<'schema> { } impl<'schema> ListSizeDirective<'schema> { - pub(in crate::plugins::demand_control) fn size_of(&self, field: &Field) -> Option { - if self - .sized_fields - .as_ref() - .is_some_and(|sf| sf.contains(field.name.as_str())) - { - self.expected_size - } else { - None - } - } -} - -/// The `@listSize` directive from a field definition, which can be converted to -/// `ListSizeDirective` with a concrete field from a request. -pub(in crate::plugins::demand_control) struct DefinitionListSizeDirective { - assumed_size: Option, - slicing_argument_names: Option>, - sized_fields: Option>, - require_one_slicing_argument: bool, -} - -impl DefinitionListSizeDirective { - pub(in crate::plugins::demand_control) fn from_field_definition( - directive_name_map: &HashMap, - definition: &FieldDefinition, - ) -> Result, DemandControlError> { - let directive = directive_name_map - .get(&LIST_SIZE_DIRECTIVE_NAME) - .and_then(|name| definition.directives.get(name)) - .or(definition.directives.get(&LIST_SIZE_DIRECTIVE_DEFAULT_NAME)); - if let Some(directive) = directive { - let assumed_size = directive - .specified_argument_by_name(&LIST_SIZE_DIRECTIVE_ASSUMED_SIZE_ARGUMENT_NAME) - .and_then(|arg| arg.to_i32()); - let slicing_argument_names = directive - .specified_argument_by_name(&LIST_SIZE_DIRECTIVE_SLICING_ARGUMENTS_ARGUMENT_NAME) - .and_then(|arg| arg.as_list()) - .map(|arg_list| { - arg_list - .iter() - .flat_map(|arg| arg.as_str()) - .map(String::from) - .collect() - }); - let sized_fields = directive - .specified_argument_by_name(&LIST_SIZE_DIRECTIVE_SIZED_FIELDS_ARGUMENT_NAME) - .and_then(|arg| arg.as_list()) - .map(|arg_list| { - arg_list - .iter() - .flat_map(|arg| arg.as_str()) - .map(String::from) - .collect() - }); - let require_one_slicing_argument = directive - .specified_argument_by_name( - &LIST_SIZE_DIRECTIVE_REQUIRE_ONE_SLICING_ARGUMENT_ARGUMENT_NAME, - ) - .and_then(|arg| arg.to_bool()) - .unwrap_or(true); - - Ok(Some(Self { - assumed_size, - slicing_argument_names, - sized_fields, - require_one_slicing_argument, - })) - } else { - Ok(None) - } - } - - pub(in crate::plugins::demand_control) fn with_field_and_variables( - &self, + pub(in crate::plugins::demand_control) fn new( + parsed: &'schema ParsedListSizeDirective, field: &Field, variables: &Object, - ) -> Result { + ) -> Result { let mut slicing_arguments: HashMap<&str, i32> = HashMap::new(); - if let Some(slicing_argument_names) = self.slicing_argument_names.as_ref() { + if let Some(slicing_argument_names) = parsed.slicing_argument_names.as_ref() { // First, collect the default values for each argument for argument in &field.definition.arguments { if slicing_argument_names.contains(argument.name.as_str()) { @@ -240,7 +73,7 @@ impl DefinitionListSizeDirective { } } - if self.require_one_slicing_argument && slicing_arguments.len() != 1 { + if parsed.require_one_slicing_argument && slicing_arguments.len() != 1 { return Err(DemandControlError::QueryParseFailure(format!( "Exactly one slicing argument is required, but found {}", slicing_arguments.len() @@ -252,16 +85,28 @@ impl DefinitionListSizeDirective { .values() .max() .cloned() - .or(self.assumed_size); + .or(parsed.assumed_size); - Ok(ListSizeDirective { + Ok(Self { expected_size, - sized_fields: self + sized_fields: parsed .sized_fields .as_ref() .map(|set| set.iter().map(|s| s.as_str()).collect()), }) } + + pub(in crate::plugins::demand_control) fn size_of(&self, field: &Field) -> Option { + if self + .sized_fields + .as_ref() + .is_some_and(|sf| sf.contains(field.name.as_str())) + { + self.expected_size + } else { + None + } + } } pub(in crate::plugins::demand_control) struct RequiresDirective { diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql index d966512be1..02184164a9 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql +++ b/apollo-router/src/plugins/demand_control/cost_calculator/fixtures/custom_cost_schema.graphql @@ -1,10 +1,7 @@ schema @link(url: "https://specs.apollo.dev/link/v1.0") @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) - @link( - url: "https://specs.apollo.dev/cost/v0.1" - import: ["@cost", "@listSize"] - ) { + @link(url: "https://specs.apollo.dev/cost/v0.1", import: ["@listSize"]) { query: Query } @@ -12,13 +9,6 @@ directive @cost( weight: Int! ) on ARGUMENT_DEFINITION | ENUM | FIELD_DEFINITION | INPUT_FIELD_DEFINITION | OBJECT | SCALAR -directive @cost__listSize( - assumedSize: Int - slicingArguments: [String!] - sizedFields: [String!] - requireOneSlicingArgument: Boolean = true -) on FIELD_DEFINITION - directive @join__directive( graphs: [join__Graph!] name: String! diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/schema.rs b/apollo-router/src/plugins/demand_control/cost_calculator/schema.rs index 6a46ee9fe9..d59243f4d5 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/schema.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/schema.rs @@ -3,20 +3,21 @@ use std::sync::Arc; use ahash::HashMap; use ahash::HashMapExt; +use apollo_compiler::ast::InputValueDefinition; use apollo_compiler::schema::ExtendedType; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Schema; +use apollo_federation::link::cost_spec_definition::CostDirective; +use apollo_federation::link::cost_spec_definition::CostSpecDefinition; +use apollo_federation::link::cost_spec_definition::ListSizeDirective; +use apollo_federation::schema::ValidFederationSchema; -use super::directives::get_apollo_directive_names; -use super::directives::CostDirective; -use super::directives::DefinitionListSizeDirective as ListSizeDirective; use super::directives::RequiresDirective; use crate::plugins::demand_control::DemandControlError; pub(crate) struct DemandControlledSchema { - directive_name_map: HashMap, - inner: Arc>, + inner: ValidFederationSchema, type_field_cost_directives: HashMap>, type_field_list_size_directives: HashMap>, type_field_requires_directives: HashMap>, @@ -24,8 +25,7 @@ pub(crate) struct DemandControlledSchema { impl DemandControlledSchema { pub(crate) fn new(schema: Arc>) -> Result { - let directive_name_map = get_apollo_directive_names(&schema); - + let fed_schema = ValidFederationSchema::new((*schema).clone())?; let mut type_field_cost_directives: HashMap> = HashMap::new(); let mut type_field_list_size_directives: HashMap> = @@ -55,17 +55,20 @@ impl DemandControlledSchema { )) })?; - if let Some(cost_directive) = - CostDirective::from_field(&directive_name_map, field_definition) - .or(CostDirective::from_type(&directive_name_map, field_type)) - { + if let Some(cost_directive) = CostSpecDefinition::cost_directive_from_field( + &fed_schema, + field_definition, + field_type, + )? { field_cost_directives.insert(field_name.clone(), cost_directive); } - if let Some(list_size_directive) = ListSizeDirective::from_field_definition( - &directive_name_map, - field_definition, - )? { + if let Some(list_size_directive) = + CostSpecDefinition::list_size_directive_from_field_definition( + &fed_schema, + field_definition, + )? + { field_list_size_directives .insert(field_name.clone(), list_size_directive); } @@ -90,17 +93,20 @@ impl DemandControlledSchema { )) })?; - if let Some(cost_directive) = - CostDirective::from_field(&directive_name_map, field_definition) - .or(CostDirective::from_type(&directive_name_map, field_type)) - { + if let Some(cost_directive) = CostSpecDefinition::cost_directive_from_field( + &fed_schema, + field_definition, + field_type, + )? { field_cost_directives.insert(field_name.clone(), cost_directive); } - if let Some(list_size_directive) = ListSizeDirective::from_field_definition( - &directive_name_map, - field_definition, - )? { + if let Some(list_size_directive) = + CostSpecDefinition::list_size_directive_from_field_definition( + &fed_schema, + field_definition, + )? + { field_list_size_directives .insert(field_name.clone(), list_size_directive); } @@ -122,18 +128,13 @@ impl DemandControlledSchema { } Ok(Self { - directive_name_map, - inner: schema, + inner: fed_schema, type_field_cost_directives, type_field_list_size_directives, type_field_requires_directives, }) } - pub(in crate::plugins::demand_control) fn directive_name_map(&self) -> &HashMap { - &self.directive_name_map - } - pub(in crate::plugins::demand_control) fn type_field_cost_directive( &self, type_name: &str, @@ -163,11 +164,24 @@ impl DemandControlledSchema { .get(type_name)? .get(field_name) } + + pub(in crate::plugins::demand_control) fn argument_cost_directive( + &self, + definition: &InputValueDefinition, + ty: &ExtendedType, + ) -> Option { + // For now, we ignore FederationError and return None because this should not block the whole scoring + // process at runtime. Later, this should be pushed into the constructor and propagate any federation + // errors encountered when parsing. + CostSpecDefinition::cost_directive_from_argument(&self.inner, definition, ty) + .ok() + .flatten() + } } impl AsRef> for DemandControlledSchema { fn as_ref(&self) -> &Valid { - &self.inner + self.inner.schema() } } @@ -175,6 +189,6 @@ impl Deref for DemandControlledSchema { type Target = Schema; fn deref(&self) -> &Self::Target { - &self.inner + self.inner.schema() } } diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs index 397f33e9b6..ca7217ba7f 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs @@ -22,7 +22,6 @@ use super::DemandControlError; use crate::graphql::Response; use crate::graphql::ResponseVisitor; use crate::json_ext::Object; -use crate::plugins::demand_control::cost_calculator::directives::CostDirective; use crate::plugins::demand_control::cost_calculator::directives::ListSizeDirective; use crate::query_planner::fetch::SubgraphOperation; use crate::query_planner::DeferredNode; @@ -59,9 +58,7 @@ fn score_argument( argument_definition.ty.inner_named_type() )) })?; - let cost_directive = - CostDirective::from_argument(schema.directive_name_map(), argument_definition) - .or(CostDirective::from_type(schema.directive_name_map(), ty)); + let cost_directive = schema.argument_cost_directive(argument_definition, ty); match (argument, ty) { (_, ExtendedType::Interface(_)) @@ -124,9 +121,7 @@ fn score_variable( argument_definition.ty.inner_named_type() )) })?; - let cost_directive = - CostDirective::from_argument(schema.directive_name_map(), argument_definition) - .or(CostDirective::from_type(schema.directive_name_map(), ty)); + let cost_directive = schema.argument_cost_directive(argument_definition, ty); match (variable, ty) { (_, ExtendedType::Interface(_)) @@ -221,7 +216,7 @@ impl StaticCostCalculator { .schema .type_field_list_size_directive(parent_type, &field.name) { - Some(dir) => dir.with_field_and_variables(field, ctx.variables).map(Some), + Some(dir) => ListSizeDirective::new(dir, field, ctx.variables).map(Some), None => Ok(None), }?; let instance_count = if !field.ty().is_list() { diff --git a/apollo-router/src/plugins/demand_control/mod.rs b/apollo-router/src/plugins/demand_control/mod.rs index 5e3ba587f8..8e8d419ab8 100644 --- a/apollo-router/src/plugins/demand_control/mod.rs +++ b/apollo-router/src/plugins/demand_control/mod.rs @@ -11,6 +11,7 @@ use apollo_compiler::schema::FieldLookupError; use apollo_compiler::validation::Valid; use apollo_compiler::validation::WithErrors; use apollo_compiler::ExecutableDocument; +use apollo_federation::error::FederationError; use displaydoc::Display; use futures::future::Either; use futures::stream; @@ -123,6 +124,8 @@ pub(crate) enum DemandControlError { SubgraphOperationNotInitialized(crate::query_planner::fetch::SubgraphOperationNotInitialized), /// {0} ContextSerializationError(String), + /// {0} + FederationError(FederationError), } impl IntoGraphQLErrors for DemandControlError { @@ -163,6 +166,10 @@ impl IntoGraphQLErrors for DemandControlError { .extension_code(self.code()) .message(self.to_string()) .build()]), + DemandControlError::FederationError(_) => Ok(vec![graphql::Error::builder() + .extension_code(self.code()) + .message(self.to_string()) + .build()]), } } } @@ -175,6 +182,7 @@ impl DemandControlError { DemandControlError::QueryParseFailure(_) => "COST_QUERY_PARSE_FAILURE", DemandControlError::SubgraphOperationNotInitialized(e) => e.code(), DemandControlError::ContextSerializationError(_) => "COST_CONTEXT_SERIALIZATION_ERROR", + DemandControlError::FederationError(_) => "FEDERATION_ERROR", } } } @@ -201,6 +209,12 @@ impl<'a> From> for DemandControlError { } } +impl From for DemandControlError { + fn from(value: FederationError) -> Self { + DemandControlError::FederationError(value) + } +} + #[derive(Clone)] pub(crate) struct DemandControlContext { pub(crate) strategy: Strategy, diff --git a/apollo-router/src/plugins/fleet_detector.rs b/apollo-router/src/plugins/fleet_detector.rs new file mode 100644 index 0000000000..46fadedd55 --- /dev/null +++ b/apollo-router/src/plugins/fleet_detector.rs @@ -0,0 +1,738 @@ +use std::env; +use std::env::consts::ARCH; +use std::env::consts::OS; +use std::sync::Arc; +use std::sync::Mutex; +use std::time::Duration; +use std::time::Instant; + +use futures::StreamExt; +use opentelemetry::metrics::MeterProvider; +use opentelemetry_api::metrics::ObservableGauge; +use opentelemetry_api::metrics::Unit; +use opentelemetry_api::KeyValue; +use schemars::JsonSchema; +use serde::Deserialize; +use sysinfo::System; +use tower::util::BoxService; +use tower::BoxError; +use tower::ServiceExt as _; +use tracing::debug; + +use crate::executable::APOLLO_TELEMETRY_DISABLED; +use crate::metrics::meter_provider; +use crate::plugin::PluginInit; +use crate::plugin::PluginPrivate; +use crate::services::http::HttpRequest; +use crate::services::http::HttpResponse; +use crate::services::router; +use crate::services::router::body::RouterBody; + +const REFRESH_INTERVAL: Duration = Duration::from_secs(60); +const COMPUTE_DETECTOR_THRESHOLD: u16 = 24576; +const OFFICIAL_HELM_CHART_VAR: &str = "APOLLO_ROUTER_OFFICIAL_HELM_CHART"; + +#[derive(Debug, Default, Deserialize, JsonSchema)] +struct Conf {} + +#[derive(Debug)] +struct SystemGetter { + system: System, + start: Instant, +} + +impl SystemGetter { + fn new() -> Self { + let mut system = System::new(); + system.refresh_all(); + Self { + system, + start: Instant::now(), + } + } + + fn get_system(&mut self) -> &System { + if self.start.elapsed() >= REFRESH_INTERVAL { + self.start = Instant::now(); + self.system.refresh_cpu_all(); + self.system.refresh_memory(); + } + &self.system + } +} + +#[derive(Default)] +enum GaugeStore { + #[default] + Disabled, + Pending, + Active(Vec>), +} + +impl GaugeStore { + fn active(opts: &GaugeOptions) -> GaugeStore { + let system_getter = Arc::new(Mutex::new(SystemGetter::new())); + let meter = meter_provider().meter("apollo/router"); + + let mut gauges = Vec::new(); + // apollo.router.instance + { + let mut attributes = Vec::new(); + // CPU architecture + attributes.push(KeyValue::new("host.arch", get_otel_arch())); + // Operating System + attributes.push(KeyValue::new("os.type", get_otel_os())); + if OS == "linux" { + attributes.push(KeyValue::new( + "linux.distribution", + System::distribution_id(), + )); + } + // Compute Environment + if let Some(env) = apollo_environment_detector::detect_one(COMPUTE_DETECTOR_THRESHOLD) { + attributes.push(KeyValue::new("cloud.platform", env.platform_code())); + if let Some(cloud_provider) = env.cloud_provider() { + attributes.push(KeyValue::new("cloud.provider", cloud_provider.code())); + } + } + // Deployment type + attributes.push(KeyValue::new("deployment.type", get_deployment_type())); + gauges.push( + meter + .u64_observable_gauge("apollo.router.instance") + .with_description("The number of instances the router is running on") + .with_callback(move |i| { + i.observe(1, &attributes); + }) + .init(), + ); + } + // apollo.router.instance.cpu_freq + { + let system_getter = system_getter.clone(); + gauges.push( + meter + .u64_observable_gauge("apollo.router.instance.cpu_freq") + .with_description( + "The CPU frequency of the underlying instance the router is deployed to", + ) + .with_unit(Unit::new("Mhz")) + .with_callback(move |gauge| { + let local_system_getter = system_getter.clone(); + let mut system_getter = local_system_getter.lock().unwrap(); + let system = system_getter.get_system(); + let cpus = system.cpus(); + let cpu_freq = + cpus.iter().map(|cpu| cpu.frequency()).sum::() / cpus.len() as u64; + gauge.observe(cpu_freq, &[]) + }) + .init(), + ); + } + // apollo.router.instance.cpu_count + { + let system_getter = system_getter.clone(); + gauges.push( + meter + .u64_observable_gauge("apollo.router.instance.cpu_count") + .with_description( + "The number of CPUs reported by the instance the router is running on", + ) + .with_callback(move |gauge| { + let local_system_getter = system_getter.clone(); + let mut system_getter = local_system_getter.lock().unwrap(); + let system = system_getter.get_system(); + let cpu_count = detect_cpu_count(system); + gauge.observe(cpu_count, &[KeyValue::new("host.arch", get_otel_arch())]) + }) + .init(), + ); + } + // apollo.router.instance.total_memory + { + let system_getter = system_getter.clone(); + gauges.push( + meter + .u64_observable_gauge("apollo.router.instance.total_memory") + .with_description( + "The amount of memory reported by the instance the router is running on", + ) + .with_callback(move |gauge| { + let local_system_getter = system_getter.clone(); + let mut system_getter = local_system_getter.lock().unwrap(); + let system = system_getter.get_system(); + gauge.observe( + system.total_memory(), + &[KeyValue::new("host.arch", get_otel_arch())], + ) + }) + .with_unit(Unit::new("bytes")) + .init(), + ); + } + { + let opts = opts.clone(); + gauges.push( + meter + .u64_observable_gauge("apollo.router.instance.schema") + .with_description("Details about the current in-use schema") + .with_callback(move |gauge| { + // NOTE: this is a fixed gauge. We only care about observing the included + // attributes. + let mut attributes: Vec = vec![KeyValue::new( + "schema_hash", + opts.supergraph_schema_hash.clone(), + )]; + if let Some(launch_id) = opts.launch_id.as_ref() { + attributes.push(KeyValue::new("launch_id", launch_id.to_string())); + } + gauge.observe(1, attributes.as_slice()) + }) + .init(), + ) + } + GaugeStore::Active(gauges) + } +} + +#[derive(Clone, Default)] +struct GaugeOptions { + supergraph_schema_hash: String, + launch_id: Option, +} + +#[derive(Default)] +struct FleetDetector { + enabled: bool, + gauge_store: Mutex, + + // Options passed to the gauge_store during activation. + gauge_options: GaugeOptions, +} + +#[async_trait::async_trait] +impl PluginPrivate for FleetDetector { + type Config = Conf; + + async fn new(plugin: PluginInit) -> Result { + debug!("initialising fleet detection plugin"); + if let Ok(val) = env::var(APOLLO_TELEMETRY_DISABLED) { + if val == "true" { + debug!("fleet detection disabled, no telemetry will be sent"); + return Ok(FleetDetector::default()); + } + } + + let gauge_options = GaugeOptions { + supergraph_schema_hash: plugin.supergraph_schema_id.to_string(), + launch_id: plugin.launch_id.map(|s| s.to_string()), + }; + + Ok(FleetDetector { + enabled: true, + gauge_store: Mutex::new(GaugeStore::Pending), + gauge_options, + }) + } + + fn activate(&self) { + let mut store = self.gauge_store.lock().expect("lock poisoned"); + if matches!(*store, GaugeStore::Pending) { + *store = GaugeStore::active(&self.gauge_options); + } + } + + fn router_service(&self, service: router::BoxService) -> router::BoxService { + if !self.enabled { + return service; + } + + service + // Count the number of request bytes from clients to the router + .map_request(move |req: router::Request| router::Request { + router_request: req.router_request.map(move |body| { + router::Body::wrap_stream(body.inspect(|res| { + if let Ok(bytes) = res { + u64_counter!( + "apollo.router.operations.request_size", + "Total number of request bytes from clients", + bytes.len() as u64 + ); + } + })) + }), + context: req.context, + }) + // Count the number of response bytes from the router to clients + .map_response(move |res: router::Response| router::Response { + response: res.response.map(move |body| { + router::Body::wrap_stream(body.inspect(|res| { + if let Ok(bytes) = res { + u64_counter!( + "apollo.router.operations.response_size", + "Total number of response bytes to clients", + bytes.len() as u64 + ); + } + })) + }), + context: res.context, + }) + .boxed() + } + + fn http_client_service( + &self, + subgraph_name: &str, + service: BoxService, + ) -> BoxService { + if !self.enabled { + return service; + } + let sn_req = Arc::new(subgraph_name.to_string()); + let sn_res = sn_req.clone(); + service + // Count the number of bytes per subgraph fetch request + .map_request(move |req: HttpRequest| { + let sn = sn_req.clone(); + HttpRequest { + http_request: req.http_request.map(move |body| { + let sn = sn.clone(); + RouterBody::wrap_stream(body.inspect(move |res| { + if let Ok(bytes) = res { + let sn = sn.clone(); + u64_counter!( + "apollo.router.operations.fetch.request_size", + "Total number of request bytes for subgraph fetches", + bytes.len() as u64, + subgraph.name = sn.to_string() + ); + } + })) + }), + context: req.context, + } + }) + // Count the number of fetches, and the number of bytes per subgraph fetch response + .map_result(move |res| { + let sn = sn_res.clone(); + match res { + Ok(res) => { + u64_counter!( + "apollo.router.operations.fetch", + "Number of subgraph fetches", + 1u64, + subgraph.name = sn.to_string(), + client_error = false, + http.response.status_code = res.http_response.status().as_u16() as i64 + ); + let sn = sn_res.clone(); + Ok(HttpResponse { + http_response: res.http_response.map(move |body| { + let sn = sn.clone(); + RouterBody::wrap_stream(body.inspect(move |res| { + if let Ok(bytes) = res { + let sn = sn.clone(); + u64_counter!( + "apollo.router.operations.fetch.response_size", + "Total number of response bytes for subgraph fetches", + bytes.len() as u64, + subgraph.name = sn.to_string() + ); + } + })) + }), + context: res.context, + }) + } + Err(err) => { + u64_counter!( + "apollo.router.operations.fetch", + "Number of subgraph fetches", + 1u64, + subgraph.name = sn.to_string(), + client_error = true + ); + Err(err) + } + } + }) + .boxed() + } +} + +#[cfg(not(target_os = "linux"))] +fn detect_cpu_count(system: &System) -> u64 { + system.cpus().len() as u64 +} + +// Because Linux provides CGroups as a way of controlling the proportion of CPU time each +// process gets we can perform slightly more introspection here than simply appealing to the +// raw number of processors. Hence, the extra logic including below. +#[cfg(target_os = "linux")] +fn detect_cpu_count(system: &System) -> u64 { + use std::collections::HashSet; + use std::fs; + + let system_cpus = system.cpus().len() as u64; + // Grab the contents of /proc/filesystems + let fses: HashSet = match fs::read_to_string("/proc/filesystems") { + Ok(content) => content + .lines() + .map(|x| x.split_whitespace().next().unwrap_or("").to_string()) + .filter(|x| x.contains("cgroup")) + .collect(), + Err(_) => return system_cpus, + }; + + if fses.contains("cgroup2") { + // If we're looking at cgroup2 then we need to look in `cpu.max` + match fs::read_to_string("/sys/fs/cgroup/cpu.max") { + Ok(readings) => { + // The format of the file lists the quota first, followed by the period, + // but the quota could also be max which would mean there are no restrictions. + if readings.starts_with("max") { + system_cpus + } else { + // If it's not max then divide the two to get an integer answer + match readings.split_once(' ') { + None => system_cpus, + Some((quota, period)) => { + calculate_cpu_count_with_default(system_cpus, quota, period) + } + } + } + } + Err(_) => system_cpus, + } + } else if fses.contains("cgroup") { + // If we're in cgroup v1 then we need to read from two separate files + let quota = fs::read_to_string("/sys/fs/cgroup/cpu/cpu.cfs_quota_us") + .map(|s| String::from(s.trim())) + .ok(); + let period = fs::read_to_string("/sys/fs/cgroup/cpu/cpu.cfs_period_us") + .map(|s| String::from(s.trim())) + .ok(); + match (quota, period) { + (Some(quota), Some(period)) => { + // In v1 quota being -1 indicates no restrictions so return the maximum (all + // system CPUs) otherwise divide the two. + if quota == "-1" { + system_cpus + } else { + calculate_cpu_count_with_default(system_cpus, "a, &period) + } + } + _ => system_cpus, + } + } else { + system_cpus + } +} + +#[cfg(target_os = "linux")] +fn calculate_cpu_count_with_default(default: u64, quota: &str, period: &str) -> u64 { + if let (Ok(q), Ok(p)) = (quota.parse::(), period.parse::()) { + q / p + } else { + default + } +} + +fn get_otel_arch() -> &'static str { + match ARCH { + "x86_64" => "amd64", + "aarch64" => "arm64", + "arm" => "arm32", + "powerpc" => "ppc32", + "powerpc64" => "ppc64", + a => a, + } +} + +fn get_otel_os() -> &'static str { + match OS { + "apple" => "darwin", + "dragonfly" => "dragonflybsd", + "macos" => "darwin", + "ios" => "darwin", + a => a, + } +} + +fn get_deployment_type() -> &'static str { + // Official Apollo helm chart + if std::env::var_os(OFFICIAL_HELM_CHART_VAR).is_some() { + return "official_helm_chart"; + } + "unknown" +} + +register_private_plugin!("apollo", "fleet_detector", FleetDetector); + +#[cfg(test)] +mod tests { + use http::StatusCode; + use tower::Service as _; + + use super::*; + use crate::metrics::collect_metrics; + use crate::metrics::test_utils::MetricType; + use crate::metrics::FutureMetricsExt as _; + use crate::plugin::test::MockHttpClientService; + use crate::plugin::test::MockRouterService; + use crate::services::Body; + + #[tokio::test] + async fn test_disabled_router_service() { + async { + // WHEN the plugin is disabled + let plugin = FleetDetector::default(); + + // GIVEN a router service request + let mut mock_bad_request_service = MockRouterService::new(); + mock_bad_request_service + .expect_call() + .times(1) + .returning(|req: router::Request| { + Ok(router::Response { + context: req.context, + response: http::Response::builder() + .status(StatusCode::BAD_REQUEST) + .header("content-type", "application/json") + // making sure the request body is consumed + .body(req.router_request.into_body()) + .unwrap(), + }) + }); + let mut bad_request_router_service = + plugin.router_service(mock_bad_request_service.boxed()); + let router_req = router::Request::fake_builder() + .body("request") + .build() + .unwrap(); + let _router_response = bad_request_router_service + .ready() + .await + .unwrap() + .call(router_req) + .await + .unwrap() + .next_response() + .await + .unwrap(); + + // THEN operation size metrics shouldn't exist + assert!(!collect_metrics().metric_exists::( + "apollo.router.operations.request_size", + MetricType::Counter, + &[], + )); + assert!(!collect_metrics().metric_exists::( + "apollo.router.operations.response_size", + MetricType::Counter, + &[], + )); + } + .with_metrics() + .await; + } + + #[tokio::test] + async fn test_enabled_router_service() { + async { + // WHEN the plugin is enabled + let plugin = FleetDetector { + enabled: true, + ..Default::default() + }; + + // GIVEN a router service request + let mut mock_bad_request_service = MockRouterService::new(); + mock_bad_request_service + .expect_call() + .times(1) + .returning(|req: router::Request| { + Ok(router::Response { + context: req.context, + response: http::Response::builder() + .status(StatusCode::BAD_REQUEST) + .header("content-type", "application/json") + // making sure the request body is consumed + .body(req.router_request.into_body()) + .unwrap(), + }) + }); + let mut bad_request_router_service = + plugin.router_service(mock_bad_request_service.boxed()); + let router_req = router::Request::fake_builder() + .body(Body::wrap_stream(Body::from("request"))) + .build() + .unwrap(); + let _router_response = bad_request_router_service + .ready() + .await + .unwrap() + .call(router_req) + .await + .unwrap() + .next_response() + .await + .unwrap(); + + // THEN operation size metrics should exist + assert_counter!("apollo.router.operations.request_size", 7, &[]); + assert_counter!("apollo.router.operations.response_size", 7, &[]); + } + .with_metrics() + .await; + } + + #[tokio::test] + async fn test_disabled_http_client_service() { + async { + // WHEN the plugin is disabled + let plugin = FleetDetector::default(); + + // GIVEN an http client service request + let mut mock_bad_request_service = MockHttpClientService::new(); + mock_bad_request_service.expect_call().times(1).returning( + |req: http::Request| { + Box::pin(async { + let data = hyper::body::to_bytes(req.into_body()).await?; + Ok(http::Response::builder() + .status(StatusCode::BAD_REQUEST) + .header("content-type", "application/json") + // making sure the request body is consumed + .body(Body::from(data)) + .unwrap()) + }) + }, + ); + let mut bad_request_http_client_service = plugin.http_client_service( + "subgraph", + mock_bad_request_service + .map_request(|req: HttpRequest| req.http_request.map(|body| body.into_inner())) + .map_response(|res: http::Response| HttpResponse { + http_response: res.map(RouterBody::from), + context: Default::default(), + }) + .boxed(), + ); + let http_client_req = HttpRequest { + http_request: http::Request::builder() + .body(RouterBody::from("request")) + .unwrap(), + context: Default::default(), + }; + let http_client_response = bad_request_http_client_service + .ready() + .await + .unwrap() + .call(http_client_req) + .await + .unwrap(); + // making sure the response body is consumed + let _data = hyper::body::to_bytes(http_client_response.http_response.into_body()) + .await + .unwrap(); + + // THEN fetch metrics shouldn't exist + assert!(!collect_metrics().metric_exists::( + "apollo.router.operations.fetch", + MetricType::Counter, + &[KeyValue::new("subgraph.name", "subgraph"),], + )); + assert!(!collect_metrics().metric_exists::( + "apollo.router.operations.fetch.request_size", + MetricType::Counter, + &[KeyValue::new("subgraph.name", "subgraph"),], + )); + assert!(!collect_metrics().metric_exists::( + "apollo.router.operations.fetch.response_size", + MetricType::Counter, + &[KeyValue::new("subgraph.name", "subgraph"),], + )); + } + .with_metrics() + .await; + } + + #[tokio::test] + async fn test_enabled_http_client_service() { + async { + // WHEN the plugin is enabled + let plugin = FleetDetector { + enabled: true, + ..Default::default() + }; + + // GIVEN an http client service request + let mut mock_bad_request_service = MockHttpClientService::new(); + mock_bad_request_service.expect_call().times(1).returning( + |req: http::Request| { + Box::pin(async { + let data = hyper::body::to_bytes(req.into_body()).await?; + Ok(http::Response::builder() + .status(StatusCode::BAD_REQUEST) + .header("content-type", "application/json") + // making sure the request body is consumed + .body(Body::from(data)) + .unwrap()) + }) + }, + ); + let mut bad_request_http_client_service = plugin.http_client_service( + "subgraph", + mock_bad_request_service + .map_request(|req: HttpRequest| req.http_request.map(|body| body.into_inner())) + .map_response(|res: http::Response| HttpResponse { + http_response: res.map(RouterBody::from), + context: Default::default(), + }) + .boxed(), + ); + let http_client_req = HttpRequest { + http_request: http::Request::builder() + .body(RouterBody::from("request")) + .unwrap(), + context: Default::default(), + }; + let http_client_response = bad_request_http_client_service + .ready() + .await + .unwrap() + .call(http_client_req) + .await + .unwrap(); + + // making sure the response body is consumed + let _data = hyper::body::to_bytes(http_client_response.http_response.into_body()) + .await + .unwrap(); + + // THEN fetch metrics should exist + assert_counter!( + "apollo.router.operations.fetch", + 1, + &[ + KeyValue::new("subgraph.name", "subgraph"), + KeyValue::new("http.response.status_code", 400), + KeyValue::new("client_error", false) + ] + ); + assert_counter!( + "apollo.router.operations.fetch.request_size", + 7, + &[KeyValue::new("subgraph.name", "subgraph"),] + ); + assert_counter!( + "apollo.router.operations.fetch.response_size", + 7, + &[KeyValue::new("subgraph.name", "subgraph"),] + ); + } + .with_metrics() + .await; + } +} diff --git a/apollo-router/src/plugins/mod.rs b/apollo-router/src/plugins/mod.rs index beac8037b9..5684d0c3d3 100644 --- a/apollo-router/src/plugins/mod.rs +++ b/apollo-router/src/plugins/mod.rs @@ -28,6 +28,7 @@ pub(crate) mod csrf; mod demand_control; mod expose_query_plan; pub(crate) mod file_uploads; +mod fleet_detector; mod forbid_mutations; mod headers; mod include_subgraph_errors; diff --git a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap index 01cca77a5b..de68830bfe 100644 --- a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap +++ b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap @@ -12,14 +12,14 @@ expression: query_plan "kind": "Fetch", "serviceName": "Subgraph2", "variableUsages": [], - "operation": "{percent0{foo}}", + "operation": "{ percent0 { foo } }", "operationName": null, "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "23605b350473485e40bc8b1245f0c5c226a2997a96291bf3ad3412570a5172bb", + "schemaAwareHash": "343157a7d5b7929ebdc0c17cbf0f23c8d3cf0c93a820856d3a189521cc2f24a2", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap index 455898049f..2967a7d6f7 100644 --- a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap +++ b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap @@ -17,14 +17,14 @@ expression: query_plan "kind": "Fetch", "serviceName": "Subgraph1", "variableUsages": [], - "operation": "{percent100{__typename id}}", + "operation": "{ percent100 { __typename id } }", "operationName": null, "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "d14f50b039a3b961385f4d2a878c5800dd01141cddd3f8f1874a5499bbe397a9", + "schemaAwareHash": "df2a0633d70ab97805722bae920647da51b7eb821b06d8a2499683c5c7024316", "authorization": { "is_authenticated": false, "scopes": [], @@ -56,14 +56,14 @@ expression: query_plan } ], "variableUsages": [], - "operation": "query($representations:[_Any!]!){_entities(representations:$representations){...on T{foo}}}", + "operation": "query($representations: [_Any!]!) { _entities(representations: $representations) { ... on T { foo } } }", "operationName": null, "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "caa182daf66e4ffe9b1af8c386092ba830887bbae0d58395066fa480525080ec", + "schemaAwareHash": "56ac7a7cc11b7f293acbdaf0327cb2b676415eab8343e9259322a1609c90455e", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap index b8130ecf59..b093e7be08 100644 --- a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap +++ b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap @@ -62,14 +62,14 @@ expression: "serde_json::to_value(response).unwrap()" "variableUsages": [ "first" ], - "operation": "query TopProducts__products__0($first:Int){topProducts(first:$first){__typename upc name}}", + "operation": "query TopProducts__products__0($first: Int) { topProducts(first: $first) { __typename upc name } }", "operationName": "TopProducts__products__0", "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "c595a39efeab9494c75a29de44ec4748c1741ddb96e1833e99139b058aa9da84", + "schemaAwareHash": "45b4beebcbf1df72ab950db7bd278417712b1aa39119317f44ad5b425bdb6997", "authorization": { "is_authenticated": false, "scopes": [], @@ -102,14 +102,14 @@ expression: "serde_json::to_value(response).unwrap()" } ], "variableUsages": [], - "operation": "query TopProducts__reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{id product{__typename upc}author{__typename id}}}}}", + "operation": "query TopProducts__reviews__1($representations: [_Any!]!) { _entities(representations: $representations) { ... on Product { reviews { id product { __typename upc } author { __typename id } } } } }", "operationName": "TopProducts__reviews__1", "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "7054d7662e20905b01d6f937e6b588ed422e0e79de737c98e3d51b6dc610179f", + "schemaAwareHash": "645f3f8763133d2376e33ab3d1145be7ded0ccc8e94e20aba1fbaa34a51633da", "authorization": { "is_authenticated": false, "scopes": [], @@ -127,15 +127,15 @@ expression: "serde_json::to_value(response).unwrap()" "@", "reviews", "@", - "product" + "author" ], "node": { "kind": "Fetch", - "serviceName": "products", + "serviceName": "accounts", "requires": [ { "kind": "InlineFragment", - "typeCondition": "Product", + "typeCondition": "User", "selections": [ { "kind": "Field", @@ -143,20 +143,20 @@ expression: "serde_json::to_value(response).unwrap()" }, { "kind": "Field", - "name": "upc" + "name": "id" } ] } ], "variableUsages": [], - "operation": "query TopProducts__products__2($representations:[_Any!]!){_entities(representations:$representations){...on Product{name}}}", - "operationName": "TopProducts__products__2", + "operation": "query TopProducts__accounts__2($representations: [_Any!]!) { _entities(representations: $representations) { ... on User { name } } }", + "operationName": "TopProducts__accounts__2", "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "76d400fc6a494cbe05a44751923e570ee31928f0fb035ea36c14d4d6f4545482", + "schemaAwareHash": "a79f69245d777abc4afbd7d0a8fc434137fa4fd1079ef082edf4c7746b5a0fcd", "authorization": { "is_authenticated": false, "scopes": [], @@ -171,15 +171,15 @@ expression: "serde_json::to_value(response).unwrap()" "@", "reviews", "@", - "author" + "product" ], "node": { "kind": "Fetch", - "serviceName": "accounts", + "serviceName": "products", "requires": [ { "kind": "InlineFragment", - "typeCondition": "User", + "typeCondition": "Product", "selections": [ { "kind": "Field", @@ -187,20 +187,20 @@ expression: "serde_json::to_value(response).unwrap()" }, { "kind": "Field", - "name": "id" + "name": "upc" } ] } ], "variableUsages": [], - "operation": "query TopProducts__accounts__3($representations:[_Any!]!){_entities(representations:$representations){...on User{name}}}", - "operationName": "TopProducts__accounts__3", + "operation": "query TopProducts__products__3($representations: [_Any!]!) { _entities(representations: $representations) { ... on Product { name } } }", + "operationName": "TopProducts__products__3", "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "bff0ce0cfd6e2830949c59ae26f350d06d76150d6041b08c3d0c4384bc20b271", + "schemaAwareHash": "5ad94764f288a41312e07745510bf5dade2b63fb82c3d896f7d00408dbbe5cce", "authorization": { "is_authenticated": false, "scopes": [], @@ -213,7 +213,7 @@ expression: "serde_json::to_value(response).unwrap()" ] } }, - "text": "QueryPlan {\n Sequence {\n Fetch(service: \"products\") {\n {\n topProducts(first: $first) {\n __typename\n upc\n name\n }\n }\n },\n Flatten(path: \"topProducts.@\") {\n Fetch(service: \"reviews\") {\n {\n ... on Product {\n __typename\n upc\n }\n } =>\n {\n ... on Product {\n reviews {\n id\n product {\n __typename\n upc\n }\n author {\n __typename\n id\n }\n }\n }\n }\n },\n },\n Parallel {\n Flatten(path: \"topProducts.@.reviews.@.product\") {\n Fetch(service: \"products\") {\n {\n ... on Product {\n __typename\n upc\n }\n } =>\n {\n ... on Product {\n name\n }\n }\n },\n },\n Flatten(path: \"topProducts.@.reviews.@.author\") {\n Fetch(service: \"accounts\") {\n {\n ... on User {\n __typename\n id\n }\n } =>\n {\n ... on User {\n name\n }\n }\n },\n },\n },\n },\n}" + "text": "QueryPlan {\n Sequence {\n Fetch(service: \"products\") {\n {\n topProducts(first: $first) {\n __typename\n upc\n name\n }\n }\n },\n Flatten(path: \"topProducts.@\") {\n Fetch(service: \"reviews\") {\n {\n ... on Product {\n __typename\n upc\n }\n } =>\n {\n ... on Product {\n reviews {\n id\n product {\n __typename\n upc\n }\n author {\n __typename\n id\n }\n }\n }\n }\n },\n },\n Parallel {\n Flatten(path: \"topProducts.@.reviews.@.author\") {\n Fetch(service: \"accounts\") {\n {\n ... on User {\n __typename\n id\n }\n } =>\n {\n ... on User {\n name\n }\n }\n },\n },\n Flatten(path: \"topProducts.@.reviews.@.product\") {\n Fetch(service: \"products\") {\n {\n ... on Product {\n __typename\n upc\n }\n } =>\n {\n ... on Product {\n name\n }\n }\n },\n },\n },\n },\n}" } } } diff --git a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap index b8130ecf59..b093e7be08 100644 --- a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap +++ b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap @@ -62,14 +62,14 @@ expression: "serde_json::to_value(response).unwrap()" "variableUsages": [ "first" ], - "operation": "query TopProducts__products__0($first:Int){topProducts(first:$first){__typename upc name}}", + "operation": "query TopProducts__products__0($first: Int) { topProducts(first: $first) { __typename upc name } }", "operationName": "TopProducts__products__0", "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "c595a39efeab9494c75a29de44ec4748c1741ddb96e1833e99139b058aa9da84", + "schemaAwareHash": "45b4beebcbf1df72ab950db7bd278417712b1aa39119317f44ad5b425bdb6997", "authorization": { "is_authenticated": false, "scopes": [], @@ -102,14 +102,14 @@ expression: "serde_json::to_value(response).unwrap()" } ], "variableUsages": [], - "operation": "query TopProducts__reviews__1($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{id product{__typename upc}author{__typename id}}}}}", + "operation": "query TopProducts__reviews__1($representations: [_Any!]!) { _entities(representations: $representations) { ... on Product { reviews { id product { __typename upc } author { __typename id } } } } }", "operationName": "TopProducts__reviews__1", "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "7054d7662e20905b01d6f937e6b588ed422e0e79de737c98e3d51b6dc610179f", + "schemaAwareHash": "645f3f8763133d2376e33ab3d1145be7ded0ccc8e94e20aba1fbaa34a51633da", "authorization": { "is_authenticated": false, "scopes": [], @@ -127,15 +127,15 @@ expression: "serde_json::to_value(response).unwrap()" "@", "reviews", "@", - "product" + "author" ], "node": { "kind": "Fetch", - "serviceName": "products", + "serviceName": "accounts", "requires": [ { "kind": "InlineFragment", - "typeCondition": "Product", + "typeCondition": "User", "selections": [ { "kind": "Field", @@ -143,20 +143,20 @@ expression: "serde_json::to_value(response).unwrap()" }, { "kind": "Field", - "name": "upc" + "name": "id" } ] } ], "variableUsages": [], - "operation": "query TopProducts__products__2($representations:[_Any!]!){_entities(representations:$representations){...on Product{name}}}", - "operationName": "TopProducts__products__2", + "operation": "query TopProducts__accounts__2($representations: [_Any!]!) { _entities(representations: $representations) { ... on User { name } } }", + "operationName": "TopProducts__accounts__2", "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "76d400fc6a494cbe05a44751923e570ee31928f0fb035ea36c14d4d6f4545482", + "schemaAwareHash": "a79f69245d777abc4afbd7d0a8fc434137fa4fd1079ef082edf4c7746b5a0fcd", "authorization": { "is_authenticated": false, "scopes": [], @@ -171,15 +171,15 @@ expression: "serde_json::to_value(response).unwrap()" "@", "reviews", "@", - "author" + "product" ], "node": { "kind": "Fetch", - "serviceName": "accounts", + "serviceName": "products", "requires": [ { "kind": "InlineFragment", - "typeCondition": "User", + "typeCondition": "Product", "selections": [ { "kind": "Field", @@ -187,20 +187,20 @@ expression: "serde_json::to_value(response).unwrap()" }, { "kind": "Field", - "name": "id" + "name": "upc" } ] } ], "variableUsages": [], - "operation": "query TopProducts__accounts__3($representations:[_Any!]!){_entities(representations:$representations){...on User{name}}}", - "operationName": "TopProducts__accounts__3", + "operation": "query TopProducts__products__3($representations: [_Any!]!) { _entities(representations: $representations) { ... on Product { name } } }", + "operationName": "TopProducts__products__3", "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "bff0ce0cfd6e2830949c59ae26f350d06d76150d6041b08c3d0c4384bc20b271", + "schemaAwareHash": "5ad94764f288a41312e07745510bf5dade2b63fb82c3d896f7d00408dbbe5cce", "authorization": { "is_authenticated": false, "scopes": [], @@ -213,7 +213,7 @@ expression: "serde_json::to_value(response).unwrap()" ] } }, - "text": "QueryPlan {\n Sequence {\n Fetch(service: \"products\") {\n {\n topProducts(first: $first) {\n __typename\n upc\n name\n }\n }\n },\n Flatten(path: \"topProducts.@\") {\n Fetch(service: \"reviews\") {\n {\n ... on Product {\n __typename\n upc\n }\n } =>\n {\n ... on Product {\n reviews {\n id\n product {\n __typename\n upc\n }\n author {\n __typename\n id\n }\n }\n }\n }\n },\n },\n Parallel {\n Flatten(path: \"topProducts.@.reviews.@.product\") {\n Fetch(service: \"products\") {\n {\n ... on Product {\n __typename\n upc\n }\n } =>\n {\n ... on Product {\n name\n }\n }\n },\n },\n Flatten(path: \"topProducts.@.reviews.@.author\") {\n Fetch(service: \"accounts\") {\n {\n ... on User {\n __typename\n id\n }\n } =>\n {\n ... on User {\n name\n }\n }\n },\n },\n },\n },\n}" + "text": "QueryPlan {\n Sequence {\n Fetch(service: \"products\") {\n {\n topProducts(first: $first) {\n __typename\n upc\n name\n }\n }\n },\n Flatten(path: \"topProducts.@\") {\n Fetch(service: \"reviews\") {\n {\n ... on Product {\n __typename\n upc\n }\n } =>\n {\n ... on Product {\n reviews {\n id\n product {\n __typename\n upc\n }\n author {\n __typename\n id\n }\n }\n }\n }\n },\n },\n Parallel {\n Flatten(path: \"topProducts.@.reviews.@.author\") {\n Fetch(service: \"accounts\") {\n {\n ... on User {\n __typename\n id\n }\n } =>\n {\n ... on User {\n name\n }\n }\n },\n },\n Flatten(path: \"topProducts.@.reviews.@.product\") {\n Fetch(service: \"products\") {\n {\n ... on Product {\n __typename\n upc\n }\n } =>\n {\n ... on Product {\n name\n }\n }\n },\n },\n },\n },\n}" } } } diff --git a/apollo-router/src/plugins/subscription.rs b/apollo-router/src/plugins/subscription.rs index 50d5e78ead..1d342f586f 100644 --- a/apollo-router/src/plugins/subscription.rs +++ b/apollo-router/src/plugins/subscription.rs @@ -503,10 +503,12 @@ impl Service for CallbackService { }; // Keep the subscription to the client opened payload.subscribed = Some(true); - tracing::info!( - monotonic_counter.apollo.router.operations.subscriptions.events = 1u64, - subscriptions.mode="callback" - ); + u64_counter!( + "apollo.router.operations.subscriptions.events", + "Number of subscription events", + 1, + subscriptions.mode = "callback" + ); handle.send_sync(payload)?; Ok(router::Response { @@ -626,10 +628,12 @@ impl Service for CallbackService { }); } }; - tracing::info!( - monotonic_counter.apollo.router.operations.subscriptions.events = 1u64, - subscriptions.mode="callback", - subscriptions.complete=true + u64_counter!( + "apollo.router.operations.subscriptions.events", + "Number of subscription events", + 1, + subscriptions.mode = "callback", + subscriptions.complete = true ); if let Err(_err) = handle.send_sync( graphql::Response::builder().errors(errors).build(), diff --git a/apollo-router/src/plugins/telemetry/apollo.rs b/apollo-router/src/plugins/telemetry/apollo.rs index 69780cbc9e..89c1ecd8e8 100644 --- a/apollo-router/src/plugins/telemetry/apollo.rs +++ b/apollo-router/src/plugins/telemetry/apollo.rs @@ -67,7 +67,7 @@ pub(crate) struct Config { #[schemars(skip)] pub(crate) apollo_graph_ref: Option, - /// The name of the header to extract from requests when populating 'client nane' for traces and metrics in Apollo Studio. + /// The name of the header to extract from requests when populating 'client name' for traces and metrics in Apollo Studio. #[schemars(with = "Option", default = "client_name_header_default_str")] #[serde(deserialize_with = "deserialize_header_name")] pub(crate) client_name_header: HeaderName, diff --git a/apollo-router/src/plugins/telemetry/config_new/attributes.rs b/apollo-router/src/plugins/telemetry/config_new/attributes.rs index 53640cd87b..e21626200f 100644 --- a/apollo-router/src/plugins/telemetry/config_new/attributes.rs +++ b/apollo-router/src/plugins/telemetry/config_new/attributes.rs @@ -47,10 +47,12 @@ use crate::plugins::telemetry::otlp::TelemetryDataKind; use crate::services::router; use crate::services::router::Request; use crate::services::subgraph; +use crate::services::subgraph::SubgraphRequestId; use crate::services::supergraph; use crate::Context; pub(crate) const SUBGRAPH_NAME: Key = Key::from_static_str("subgraph.name"); +pub(crate) const HTTP_REQUEST_RESEND_COUNT: Key = Key::from_static_str("http.request.resend_count"); pub(crate) const SUBGRAPH_GRAPHQL_DOCUMENT: Key = Key::from_static_str("subgraph.graphql.document"); pub(crate) const SUBGRAPH_GRAPHQL_OPERATION_NAME: Key = Key::from_static_str("subgraph.graphql.operation.name"); @@ -231,6 +233,10 @@ pub(crate) struct SubgraphAttributes { /// Requirement level: Recommended #[serde(rename = "subgraph.graphql.operation.type")] graphql_operation_type: Option, + + /// The number of times the request has been resent + #[serde(rename = "http.request.resend_count")] + http_request_resend_count: Option, } impl DefaultForLevel for SubgraphAttributes { @@ -258,6 +264,9 @@ impl DefaultForLevel for SubgraphAttributes { if self.graphql_operation_type.is_none() { self.graphql_operation_type = Some(StandardAttribute::Bool(true)); } + if self.http_request_resend_count.is_none() { + self.http_request_resend_count = Some(StandardAttribute::Bool(true)); + } } DefaultAttributeRequirementLevel::None => {} } @@ -596,11 +605,7 @@ impl DefaultForLevel for HttpServerAttributes { } } -impl Selectors for RouterAttributes { - type Request = router::Request; - type Response = router::Response; - type EventResponse = (); - +impl Selectors for RouterAttributes { fn on_request(&self, request: &router::Request) -> Vec { let mut attrs = self.common.on_request(request); attrs.extend(self.server.on_request(request)); @@ -647,11 +652,7 @@ impl Selectors for RouterAttributes { } } -impl Selectors for HttpCommonAttributes { - type Request = router::Request; - type Response = router::Response; - type EventResponse = (); - +impl Selectors for HttpCommonAttributes { fn on_request(&self, request: &router::Request) -> Vec { let mut attrs = Vec::new(); if let Some(key) = self @@ -801,11 +802,7 @@ impl Selectors for HttpCommonAttributes { } } -impl Selectors for HttpServerAttributes { - type Request = router::Request; - type Response = router::Response; - type EventResponse = (); - +impl Selectors for HttpServerAttributes { fn on_request(&self, request: &router::Request) -> Vec { let mut attrs = Vec::new(); if let Some(key) = self.http_route.as_ref().and_then(|a| a.key(HTTP_ROUTE)) { @@ -1003,11 +1000,9 @@ impl HttpServerAttributes { } } -impl Selectors for SupergraphAttributes { - type Request = supergraph::Request; - type Response = supergraph::Response; - type EventResponse = crate::graphql::Response; - +impl Selectors + for SupergraphAttributes +{ fn on_request(&self, request: &supergraph::Request) -> Vec { let mut attrs = Vec::new(); if let Some(key) = self @@ -1057,7 +1052,7 @@ impl Selectors for SupergraphAttributes { fn on_response_event( &self, - response: &Self::EventResponse, + response: &crate::graphql::Response, context: &Context, ) -> Vec { let mut attrs = Vec::new(); @@ -1070,11 +1065,7 @@ impl Selectors for SupergraphAttributes { } } -impl Selectors for SubgraphAttributes { - type Request = subgraph::Request; - type Response = subgraph::Response; - type EventResponse = (); - +impl Selectors for SubgraphAttributes { fn on_request(&self, request: &subgraph::Request) -> Vec { let mut attrs = Vec::new(); if let Some(key) = self @@ -1122,8 +1113,24 @@ impl Selectors for SubgraphAttributes { attrs } - fn on_response(&self, _response: &subgraph::Response) -> Vec { - Vec::default() + fn on_response(&self, response: &subgraph::Response) -> Vec { + let mut attrs = Vec::new(); + if let Some(key) = self + .http_request_resend_count + .as_ref() + .and_then(|a| a.key(HTTP_REQUEST_RESEND_COUNT)) + { + if let Some(resend_count) = response + .context + .get::<_, usize>(SubgraphRequestResendCountKey::new(&response.id)) + .ok() + .flatten() + { + attrs.push(KeyValue::new(key, resend_count as i64)); + } + } + + attrs } fn on_error(&self, _error: &BoxError, _ctx: &Context) -> Vec { @@ -1131,6 +1138,26 @@ impl Selectors for SubgraphAttributes { } } +/// Key used in context to save number of retries for a subgraph http request +pub(crate) struct SubgraphRequestResendCountKey<'a> { + subgraph_req_id: &'a SubgraphRequestId, +} + +impl<'a> SubgraphRequestResendCountKey<'a> { + pub(crate) fn new(subgraph_req_id: &'a SubgraphRequestId) -> Self { + Self { subgraph_req_id } + } +} + +impl<'a> From> for String { + fn from(value: SubgraphRequestResendCountKey) -> Self { + format!( + "apollo::telemetry::http_request_resend_count_{}", + value.subgraph_req_id + ) + } +} + #[cfg(test)] mod test { use std::net::SocketAddr; diff --git a/apollo-router/src/plugins/telemetry/config_new/cache/attributes.rs b/apollo-router/src/plugins/telemetry/config_new/cache/attributes.rs index 00d0b4b240..1d3085ffd1 100644 --- a/apollo-router/src/plugins/telemetry/config_new/cache/attributes.rs +++ b/apollo-router/src/plugins/telemetry/config_new/cache/attributes.rs @@ -36,16 +36,12 @@ impl DefaultForLevel for CacheAttributes { // Nothing to do here because we're using a trick because entity_type is related to CacheControl data we put in the context and for one request we have several entity types // and so several metrics to generate it can't be done here -impl Selectors for CacheAttributes { - type Request = subgraph::Request; - type Response = subgraph::Response; - type EventResponse = (); - - fn on_request(&self, _request: &Self::Request) -> Vec { +impl Selectors for CacheAttributes { + fn on_request(&self, _request: &subgraph::Request) -> Vec { Vec::default() } - fn on_response(&self, _response: &Self::Response) -> Vec { + fn on_response(&self, _response: &subgraph::Response) -> Vec { Vec::default() } diff --git a/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs b/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs index 7000278edc..894d938c1f 100644 --- a/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/cache/mod.rs @@ -49,7 +49,7 @@ impl DefaultForLevel for CacheInstrumentsConfig { pub(crate) struct CacheInstruments { pub(crate) cache_hit: Option< - CustomCounter, + CustomCounter, >, } diff --git a/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs b/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs index 8341790eac..a3fad1f93e 100644 --- a/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/cost/mod.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::marker::PhantomData; use std::sync::Arc; use opentelemetry::metrics::MeterProvider; @@ -12,6 +13,7 @@ use tower::BoxError; use super::attributes::StandardAttribute; use super::instruments::Increment; use super::instruments::StaticInstrument; +use crate::graphql; use crate::metrics; use crate::plugins::demand_control::COST_ACTUAL_KEY; use crate::plugins::demand_control::COST_DELTA_KEY; @@ -59,16 +61,14 @@ pub(crate) struct SupergraphCostAttributes { cost_result: Option, } -impl Selectors for SupergraphCostAttributes { - type Request = supergraph::Request; - type Response = supergraph::Response; - type EventResponse = crate::graphql::Response; - - fn on_request(&self, _request: &Self::Request) -> Vec { +impl Selectors + for SupergraphCostAttributes +{ + fn on_request(&self, _request: &supergraph::Request) -> Vec { Vec::default() } - fn on_response(&self, _response: &Self::Response) -> Vec { + fn on_response(&self, _response: &supergraph::Response) -> Vec { Vec::default() } @@ -76,7 +76,11 @@ impl Selectors for SupergraphCostAttributes { Vec::default() } - fn on_response_event(&self, _response: &Self::EventResponse, ctx: &Context) -> Vec { + fn on_response_event( + &self, + _response: &crate::graphql::Response, + ctx: &Context, + ) -> Vec { let mut attrs = Vec::with_capacity(4); if let Some(estimated_cost) = self.estimated_cost_if_configured(ctx) { attrs.push(estimated_cost); @@ -216,7 +220,13 @@ impl CostInstrumentsConfig { config: &DefaultedStandardInstrument>, selector: SupergraphSelector, static_instruments: &Arc>, - ) -> CustomHistogram { + ) -> CustomHistogram< + Request, + Response, + graphql::Response, + SupergraphAttributes, + SupergraphSelector, + > { let mut nb_attributes = 0; let selectors = match config { DefaultedStandardInstrument::Bool(_) | DefaultedStandardInstrument::Unset => None, @@ -241,6 +251,7 @@ impl CostInstrumentsConfig { selector: Some(Arc::new(selector)), selectors, updated: false, + _phantom: PhantomData, }), } } @@ -254,6 +265,7 @@ pub(crate) struct CostInstruments { CustomHistogram< supergraph::Request, supergraph::Response, + crate::graphql::Response, SupergraphAttributes, SupergraphSelector, >, @@ -264,6 +276,7 @@ pub(crate) struct CostInstruments { CustomHistogram< supergraph::Request, supergraph::Response, + crate::graphql::Response, SupergraphAttributes, SupergraphSelector, >, @@ -273,6 +286,7 @@ pub(crate) struct CostInstruments { CustomHistogram< supergraph::Request, supergraph::Response, + crate::graphql::Response, SupergraphAttributes, SupergraphSelector, >, diff --git a/apollo-router/src/plugins/telemetry/config_new/events.rs b/apollo-router/src/plugins/telemetry/config_new/events.rs index c5fac133a2..763686d807 100644 --- a/apollo-router/src/plugins/telemetry/config_new/events.rs +++ b/apollo-router/src/plugins/telemetry/config_new/events.rs @@ -1,4 +1,5 @@ use std::fmt::Debug; +use std::marker::PhantomData; use std::sync::Arc; #[cfg(test)] @@ -16,6 +17,7 @@ use super::instruments::Instrumented; use super::Selector; use super::Selectors; use super::Stage; +use crate::graphql; use crate::plugins::telemetry::config_new::attributes::RouterAttributes; use crate::plugins::telemetry::config_new::attributes::SubgraphAttributes; use crate::plugins::telemetry::config_new::attributes::SupergraphAttributes; @@ -59,6 +61,7 @@ impl Events { selectors: event_cfg.attributes.clone().into(), condition: event_cfg.condition.clone(), attributes: Vec::new(), + _phantom: PhantomData, }), }), }) @@ -88,6 +91,7 @@ impl Events { selectors: event_cfg.attributes.clone().into(), condition: event_cfg.condition.clone(), attributes: Vec::new(), + _phantom: PhantomData, }), }), }) @@ -117,6 +121,7 @@ impl Events { selectors: event_cfg.attributes.clone().into(), condition: event_cfg.condition.clone(), attributes: Vec::new(), + _phantom: PhantomData, }), }), }) @@ -180,31 +185,32 @@ impl Events { } pub(crate) type RouterEvents = - CustomEvents; + CustomEvents; pub(crate) type SupergraphEvents = CustomEvents< supergraph::Request, supergraph::Response, + graphql::Response, SupergraphAttributes, SupergraphSelector, >; pub(crate) type SubgraphEvents = - CustomEvents; + CustomEvents; -pub(crate) struct CustomEvents +pub(crate) struct CustomEvents where - Attributes: Selectors + Default, + Attributes: Selectors + Default, Sel: Selector + Debug, { request: StandardEvent, response: StandardEvent, error: StandardEvent, - custom: Vec>, + custom: Vec>, } impl Instrumented - for CustomEvents + for CustomEvents { type Request = router::Request; type Response = router::Response; @@ -331,6 +337,7 @@ impl Instrumented for CustomEvents< supergraph::Request, supergraph::Response, + crate::graphql::Response, SupergraphAttributes, SupergraphSelector, > @@ -438,7 +445,13 @@ impl Instrumented } impl Instrumented - for CustomEvents + for CustomEvents< + subgraph::Request, + subgraph::Response, + (), + SubgraphAttributes, + SubgraphSelector, + > { type Request = subgraph::Request; type Response = subgraph::Response; @@ -628,9 +641,7 @@ where impl Event where - A: Selectors - + Default - + Debug, + A: Selectors + Default + Debug, E: Selector + Debug, { pub(crate) fn validate(&self) -> Result<(), String> { @@ -655,17 +666,17 @@ pub(crate) enum EventOn { Error, } -pub(crate) struct CustomEvent +pub(crate) struct CustomEvent where - A: Selectors + Default, + A: Selectors + Default, T: Selector + Debug, { - inner: Mutex>, + inner: Mutex>, } -struct CustomEventInner +struct CustomEventInner where - A: Selectors + Default, + A: Selectors + Default, T: Selector + Debug, { name: String, @@ -675,11 +686,13 @@ where selectors: Option>>, condition: Condition, attributes: Vec, + _phantom: PhantomData, } -impl Instrumented for CustomEvent +impl Instrumented + for CustomEvent where - A: Selectors + Default, + A: Selectors + Default, T: Selector + Debug + Debug, @@ -765,9 +778,10 @@ where } } -impl CustomEventInner +impl + CustomEventInner where - A: Selectors + Default, + A: Selectors + Default, T: Selector + Debug + Debug, { #[inline] diff --git a/apollo-router/src/plugins/telemetry/config_new/extendable.rs b/apollo-router/src/plugins/telemetry/config_new/extendable.rs index c515a352bd..7f4db376d8 100644 --- a/apollo-router/src/plugins/telemetry/config_new/extendable.rs +++ b/apollo-router/src/plugins/telemetry/config_new/extendable.rs @@ -181,16 +181,13 @@ where } } -impl Selectors for Extendable +impl Selectors + for Extendable where - A: Default + Selectors, + A: Default + Selectors, E: Selector, { - type Request = Request; - type Response = Response; - type EventResponse = EventResponse; - - fn on_request(&self, request: &Self::Request) -> Vec { + fn on_request(&self, request: &Request) -> Vec { let mut attrs = self.attributes.on_request(request); let custom_attributes = self.custom.iter().filter_map(|(key, value)| { value @@ -202,7 +199,7 @@ where attrs } - fn on_response(&self, response: &Self::Response) -> Vec { + fn on_response(&self, response: &Response) -> Vec { let mut attrs = self.attributes.on_response(response); let custom_attributes = self.custom.iter().filter_map(|(key, value)| { value @@ -226,7 +223,7 @@ where attrs } - fn on_response_event(&self, response: &Self::EventResponse, ctx: &Context) -> Vec { + fn on_response_event(&self, response: &EventResponse, ctx: &Context) -> Vec { let mut attrs = self.attributes.on_response_event(response, ctx); let custom_attributes = self.custom.iter().filter_map(|(key, value)| { value @@ -258,7 +255,7 @@ where impl Extendable where - A: Default + Selectors, + A: Default + Selectors, E: Selector, { pub(crate) fn validate(&self, restricted_stage: Option) -> Result<(), String> { diff --git a/apollo-router/src/plugins/telemetry/config_new/graphql/attributes.rs b/apollo-router/src/plugins/telemetry/config_new/graphql/attributes.rs index 8765348d78..60d9e168c6 100644 --- a/apollo-router/src/plugins/telemetry/config_new/graphql/attributes.rs +++ b/apollo-router/src/plugins/telemetry/config_new/graphql/attributes.rs @@ -58,16 +58,14 @@ impl DefaultForLevel for GraphQLAttributes { } } -impl Selectors for GraphQLAttributes { - type Request = supergraph::Request; - type Response = supergraph::Response; - type EventResponse = crate::graphql::Response; - - fn on_request(&self, _request: &Self::Request) -> Vec { +impl Selectors + for GraphQLAttributes +{ + fn on_request(&self, _request: &supergraph::Request) -> Vec { Vec::default() } - fn on_response(&self, _response: &Self::Response) -> Vec { + fn on_response(&self, _response: &supergraph::Response) -> Vec { Vec::default() } diff --git a/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs b/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs index 3cabed05ff..c73d9ec099 100644 --- a/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/graphql/mod.rs @@ -62,6 +62,7 @@ impl DefaultForLevel for GraphQLInstrumentsConfig { pub(crate) type GraphQLCustomInstruments = CustomInstruments< supergraph::Request, supergraph::Response, + crate::graphql::Response, GraphQLAttributes, GraphQLSelector, GraphQLValue, @@ -72,6 +73,7 @@ pub(crate) struct GraphQLInstruments { CustomHistogram< supergraph::Request, supergraph::Response, + crate::graphql::Response, GraphQLAttributes, GraphQLSelector, >, @@ -80,6 +82,7 @@ pub(crate) struct GraphQLInstruments { CustomCounter< supergraph::Request, supergraph::Response, + crate::graphql::Response, GraphQLAttributes, GraphQLSelector, >, diff --git a/apollo-router/src/plugins/telemetry/config_new/instruments.rs b/apollo-router/src/plugins/telemetry/config_new/instruments.rs index 88760f1f1c..6dad942576 100644 --- a/apollo-router/src/plugins/telemetry/config_new/instruments.rs +++ b/apollo-router/src/plugins/telemetry/config_new/instruments.rs @@ -289,6 +289,7 @@ impl InstrumentsConfig { } }, updated: false, + _phantom: PhantomData, }), }); let http_server_request_body_size = @@ -329,6 +330,7 @@ impl InstrumentsConfig { })), selectors, updated: false, + _phantom: PhantomData, }), } }); @@ -372,6 +374,7 @@ impl InstrumentsConfig { })), selectors, updated: false, + _phantom: PhantomData, }), } }); @@ -592,6 +595,7 @@ impl InstrumentsConfig { selector: None, selectors, updated: false, + _phantom: PhantomData, }), } }); @@ -633,6 +637,7 @@ impl InstrumentsConfig { })), selectors, updated: false, + _phantom: PhantomData, }), } }); @@ -674,6 +679,7 @@ impl InstrumentsConfig { })), selectors, updated: false, + _phantom: PhantomData, }), } }); @@ -781,6 +787,7 @@ impl InstrumentsConfig { })), selectors, updated: false, + _phantom: PhantomData, }), } }), @@ -818,6 +825,7 @@ impl InstrumentsConfig { selector: None, selectors, incremented: false, + _phantom: PhantomData, }), } }), @@ -882,6 +890,7 @@ impl InstrumentsConfig { })), selectors, incremented: false, + _phantom: PhantomData, }), } }), @@ -1057,22 +1066,19 @@ where } } -impl Selectors for DefaultedStandardInstrument +impl Selectors + for DefaultedStandardInstrument where - T: Selectors, + T: Selectors, { - type Request = Request; - type Response = Response; - type EventResponse = EventResponse; - - fn on_request(&self, request: &Self::Request) -> Vec { + fn on_request(&self, request: &Request) -> Vec { match self { Self::Bool(_) | Self::Unset => Vec::with_capacity(0), Self::Extendable { attributes } => attributes.on_request(request), } } - fn on_response(&self, response: &Self::Response) -> Vec { + fn on_response(&self, response: &Response) -> Vec { match self { Self::Bool(_) | Self::Unset => Vec::with_capacity(0), Self::Extendable { attributes } => attributes.on_response(response), @@ -1086,7 +1092,7 @@ where } } - fn on_response_event(&self, response: &Self::EventResponse, ctx: &Context) -> Vec { + fn on_response_event(&self, response: &EventResponse, ctx: &Context) -> Vec { match self { Self::Bool(_) | Self::Unset => Vec::with_capacity(0), Self::Extendable { attributes } => attributes.on_response_event(response, ctx), @@ -1174,30 +1180,24 @@ where condition: Condition, } -impl Selectors - for Instrument +impl + Selectors for Instrument where - A: Debug - + Default - + Selectors, + A: Debug + Default + Selectors, E: Debug + Selector, for<'a> &'a SelectorValue: Into>, { - type Request = Request; - type Response = Response; - type EventResponse = EventResponse; - - fn on_request(&self, request: &Self::Request) -> Vec { + fn on_request(&self, request: &Request) -> Vec { self.attributes.on_request(request) } - fn on_response(&self, response: &Self::Response) -> Vec { + fn on_response(&self, response: &Response) -> Vec { self.attributes.on_response(response) } fn on_response_event( &self, - response: &Self::EventResponse, + response: &EventResponse, ctx: &Context, ) -> Vec { self.attributes.on_response_event(response, ctx) @@ -1293,9 +1293,7 @@ impl Instrumented where A: Default + Instrumented, - B: Default - + Debug - + Selectors, + B: Default + Debug + Selectors, E: Debug + Selector, for<'a> InstrumentValue: From<&'a SelectorValue>, { @@ -1330,12 +1328,8 @@ where } } -impl Selectors for SubgraphInstrumentsConfig { - type Request = subgraph::Request; - type Response = subgraph::Response; - type EventResponse = (); - - fn on_request(&self, request: &Self::Request) -> Vec { +impl Selectors for SubgraphInstrumentsConfig { + fn on_request(&self, request: &subgraph::Request) -> Vec { let mut attrs = self.http_client_request_body_size.on_request(request); attrs.extend(self.http_client_request_duration.on_request(request)); attrs.extend(self.http_client_response_body_size.on_request(request)); @@ -1343,7 +1337,7 @@ impl Selectors for SubgraphInstrumentsConfig { attrs } - fn on_response(&self, response: &Self::Response) -> Vec { + fn on_response(&self, response: &subgraph::Response) -> Vec { let mut attrs = self.http_client_request_body_size.on_response(response); attrs.extend(self.http_client_request_duration.on_response(response)); attrs.extend(self.http_client_response_body_size.on_response(response)); @@ -1360,20 +1354,26 @@ impl Selectors for SubgraphInstrumentsConfig { } } -pub(crate) struct CustomInstruments -where - Attributes: Selectors + Default, +pub(crate) struct CustomInstruments< + Request, + Response, + EventResponse, + Attributes, + Select, + SelectorValue, +> where + Attributes: Selectors + Default, Select: Selector + Debug, { _phantom: PhantomData, - counters: Vec>, - histograms: Vec>, + counters: Vec>, + histograms: Vec>, } -impl - CustomInstruments +impl + CustomInstruments where - Attributes: Selectors + Default, + Attributes: Selectors + Default, Select: Selector + Debug, { pub(crate) fn is_empty(&self) -> bool { @@ -1381,10 +1381,10 @@ where } } -impl - CustomInstruments +impl + CustomInstruments where - Attributes: Selectors + Default + Debug + Clone, + Attributes: Selectors + Default + Debug + Clone, Select: Selector + Debug + Clone, for<'a> &'a SelectorValue: Into>, { @@ -1440,6 +1440,7 @@ where selector, selectors: Some(instrument.attributes.clone()), incremented: false, + _phantom: PhantomData, }; counters.push(CustomCounter { inner: Mutex::new(counter), @@ -1494,6 +1495,7 @@ where selector, selectors: Some(instrument.attributes.clone()), updated: false, + _phantom: PhantomData, }; histograms.push(CustomHistogram { @@ -1517,10 +1519,9 @@ where } impl Instrumented - for CustomInstruments + for CustomInstruments where - Attributes: - Selectors + Default, + Attributes: Selectors + Default, Select: Selector + Debug, { type Request = Request; @@ -1581,14 +1582,14 @@ where pub(crate) struct RouterInstruments { http_server_request_duration: Option< - CustomHistogram, + CustomHistogram, >, http_server_active_requests: Option, http_server_request_body_size: Option< - CustomHistogram, + CustomHistogram, >, http_server_response_body_size: Option< - CustomHistogram, + CustomHistogram, >, custom: RouterCustomInstruments, } @@ -1683,6 +1684,7 @@ pub(crate) struct SubgraphInstruments { CustomHistogram< subgraph::Request, subgraph::Response, + (), SubgraphAttributes, SubgraphSelector, >, @@ -1691,6 +1693,7 @@ pub(crate) struct SubgraphInstruments { CustomHistogram< subgraph::Request, subgraph::Response, + (), SubgraphAttributes, SubgraphSelector, >, @@ -1699,6 +1702,7 @@ pub(crate) struct SubgraphInstruments { CustomHistogram< subgraph::Request, subgraph::Response, + (), SubgraphAttributes, SubgraphSelector, >, @@ -1754,6 +1758,7 @@ impl Instrumented for SubgraphInstruments { pub(crate) type RouterCustomInstruments = CustomInstruments< router::Request, router::Response, + (), RouterAttributes, RouterSelector, RouterValue, @@ -1762,6 +1767,7 @@ pub(crate) type RouterCustomInstruments = CustomInstruments< pub(crate) type SupergraphCustomInstruments = CustomInstruments< supergraph::Request, supergraph::Response, + crate::graphql::Response, SupergraphAttributes, SupergraphSelector, SupergraphValue, @@ -1770,6 +1776,7 @@ pub(crate) type SupergraphCustomInstruments = CustomInstruments< pub(crate) type SubgraphCustomInstruments = CustomInstruments< subgraph::Request, subgraph::Response, + (), SubgraphAttributes, SubgraphSelector, SubgraphValue, @@ -1798,17 +1805,18 @@ fn to_i64(value: opentelemetry::Value) -> Option { } } -pub(crate) struct CustomCounter +pub(crate) struct CustomCounter where - A: Selectors + Default, + A: Selectors + Default, T: Selector + Debug, { - pub(crate) inner: Mutex>, + pub(crate) inner: Mutex>, } -impl Clone for CustomCounter +impl Clone + for CustomCounter where - A: Selectors + Default, + A: Selectors + Default, T: Selector + Debug + Clone, { fn clone(&self) -> Self { @@ -1818,9 +1826,9 @@ where } } -pub(crate) struct CustomCounterInner +pub(crate) struct CustomCounterInner where - A: Selectors + Default, + A: Selectors + Default, T: Selector + Debug, { pub(crate) increment: Increment, @@ -1831,11 +1839,13 @@ where pub(crate) attributes: Vec, // Useful when it's a counter on events to know if we have to count for an event or not pub(crate) incremented: bool, + pub(crate) _phantom: PhantomData, } -impl Clone for CustomCounterInner +impl Clone + for CustomCounterInner where - A: Selectors + Default, + A: Selectors + Default, T: Selector + Debug + Clone, { fn clone(&self) -> Self { @@ -1847,13 +1857,15 @@ where condition: self.condition.clone(), attributes: self.attributes.clone(), incremented: self.incremented, + _phantom: PhantomData, } } } -impl Instrumented for CustomCounter +impl Instrumented + for CustomCounter where - A: Selectors + Default, + A: Selectors + Default, T: Selector + Debug + Debug, @@ -2109,9 +2121,10 @@ where } } -impl Drop for CustomCounter +impl Drop + for CustomCounter where - A: Selectors + Default, + A: Selectors + Default, T: Selector + Debug, { fn drop(&mut self) { @@ -2230,17 +2243,17 @@ impl Drop for ActiveRequestsCounter { // ---------------- Histogram ----------------------- -pub(crate) struct CustomHistogram +pub(crate) struct CustomHistogram where - A: Selectors + Default, + A: Selectors + Default, T: Selector, { - pub(crate) inner: Mutex>, + pub(crate) inner: Mutex>, } -pub(crate) struct CustomHistogramInner +pub(crate) struct CustomHistogramInner where - A: Selectors + Default, + A: Selectors + Default, T: Selector, { pub(crate) increment: Increment, @@ -2251,12 +2264,13 @@ where pub(crate) attributes: Vec, // Useful when it's an histogram on events to know if we have to count for an event or not pub(crate) updated: bool, + pub(crate) _phantom: PhantomData, } impl Instrumented - for CustomHistogram + for CustomHistogram where - A: Selectors + Default, + A: Selectors + Default, T: Selector, { type Request = Request; @@ -2501,9 +2515,10 @@ where } } -impl Drop for CustomHistogram +impl Drop + for CustomHistogram where - A: Selectors + Default, + A: Selectors + Default, T: Selector, { fn drop(&mut self) { diff --git a/apollo-router/src/plugins/telemetry/config_new/mod.rs b/apollo-router/src/plugins/telemetry/config_new/mod.rs index 082d0a438e..d619dafcaf 100644 --- a/apollo-router/src/plugins/telemetry/config_new/mod.rs +++ b/apollo-router/src/plugins/telemetry/config_new/mod.rs @@ -30,14 +30,10 @@ pub(crate) mod logging; pub(crate) mod selectors; pub(crate) mod spans; -pub(crate) trait Selectors { - type Request; - type Response; - type EventResponse; - - fn on_request(&self, request: &Self::Request) -> Vec; - fn on_response(&self, response: &Self::Response) -> Vec; - fn on_response_event(&self, _response: &Self::EventResponse, _ctx: &Context) -> Vec { +pub(crate) trait Selectors { + fn on_request(&self, request: &Request) -> Vec; + fn on_response(&self, response: &Response) -> Vec; + fn on_response_event(&self, _response: &EventResponse, _ctx: &Context) -> Vec { Vec::with_capacity(0) } fn on_error(&self, error: &BoxError, ctx: &Context) -> Vec; diff --git a/apollo-router/src/plugins/telemetry/config_new/selectors.rs b/apollo-router/src/plugins/telemetry/config_new/selectors.rs index 3324047b2f..d894a543e0 100644 --- a/apollo-router/src/plugins/telemetry/config_new/selectors.rs +++ b/apollo-router/src/plugins/telemetry/config_new/selectors.rs @@ -7,6 +7,7 @@ use serde_json_bytes::path::JsonPathInst; use serde_json_bytes::ByteString; use sha2::Digest; +use super::attributes::SubgraphRequestResendCountKey; use crate::context::CONTAINS_GRAPHQL_ERROR; use crate::context::OPERATION_KIND; use crate::context::OPERATION_NAME; @@ -526,6 +527,12 @@ pub(crate) enum SubgraphSelector { /// The subgraph http response status code. subgraph_response_status: ResponseStatus, }, + SubgraphResendCount { + /// The subgraph http resend count + subgraph_resend_count: bool, + /// Optional default value. + default: Option, + }, SupergraphOperationName { /// The supergraph query operation name. supergraph_operation_name: OperationName, @@ -1577,6 +1584,18 @@ impl Selector for SubgraphSelector { SubgraphSelector::OnGraphQLError { subgraph_on_graphql_error: on_graphql_error, } if *on_graphql_error => Some((!response.response.body().errors.is_empty()).into()), + SubgraphSelector::SubgraphResendCount { + subgraph_resend_count, + default, + } if *subgraph_resend_count => { + response + .context + .get::<_, usize>(SubgraphRequestResendCountKey::new(&response.id)) + .ok() + .flatten() + .map(|v| opentelemetry::Value::from(v as i64)) + } + .or_else(|| default.maybe_to_otel_value()), SubgraphSelector::Static(val) => Some(val.clone().into()), SubgraphSelector::StaticField { r#static } => Some(r#static.clone().into()), SubgraphSelector::Cache { cache, entity_type } => { @@ -1771,12 +1790,14 @@ mod test { use crate::plugins::telemetry::config_new::selectors::ResponseStatus; use crate::plugins::telemetry::config_new::selectors::RouterSelector; use crate::plugins::telemetry::config_new::selectors::SubgraphQuery; + use crate::plugins::telemetry::config_new::selectors::SubgraphRequestResendCountKey; use crate::plugins::telemetry::config_new::selectors::SubgraphSelector; use crate::plugins::telemetry::config_new::selectors::SupergraphSelector; use crate::plugins::telemetry::config_new::selectors::TraceIdFormat; use crate::plugins::telemetry::config_new::Selector; use crate::plugins::telemetry::otel; use crate::query_planner::APOLLO_OPERATION_ID; + use crate::services::subgraph::SubgraphRequestId; use crate::services::FIRST_EVENT_CONTEXT_KEY; use crate::spec::operation_limits::OperationLimits; @@ -2477,6 +2498,41 @@ mod test { ); } + #[test] + fn subgraph_resend_count() { + let selector = SubgraphSelector::SubgraphResendCount { + subgraph_resend_count: true, + default: Some("defaulted".into()), + }; + let context = crate::context::Context::new(); + assert_eq!( + selector + .on_response( + &crate::services::SubgraphResponse::fake2_builder() + .context(context.clone()) + .build() + .unwrap() + ) + .unwrap(), + "defaulted".into() + ); + let subgraph_req_id = SubgraphRequestId(String::from("test")); + let _ = context.insert(SubgraphRequestResendCountKey::new(&subgraph_req_id), 2usize); + + assert_eq!( + selector + .on_response( + &crate::services::SubgraphResponse::fake2_builder() + .context(context.clone()) + .id(subgraph_req_id) + .build() + .unwrap() + ) + .unwrap(), + 2i64.into() + ); + } + #[test] fn router_baggage() { let subscriber = tracing_subscriber::registry().with(otel::layer()); diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs b/apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs index 47a13762d0..3eacc09620 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs +++ b/apollo-router/src/plugins/telemetry/metrics/apollo/mod.rs @@ -192,6 +192,7 @@ mod test { use crate::context::OPERATION_KIND; use crate::plugin::Plugin; use crate::plugin::PluginInit; + use crate::plugin::PluginPrivate; use crate::plugins::subscription; use crate::plugins::telemetry::apollo; use crate::plugins::telemetry::apollo::default_buffer_size; @@ -364,7 +365,7 @@ mod test { request_builder.header("accept", "multipart/mixed;subscriptionSpec=1.0"); } TestHarness::builder() - .extra_plugin(plugin) + .extra_private_plugin(plugin) .extra_plugin(create_subscription_plugin().await?) .build_router() .await? @@ -421,7 +422,7 @@ mod test { } async fn create_subscription_plugin() -> Result { - subscription::Subscription::new(PluginInit::fake_new( + ::new(PluginInit::fake_new( subscription::SubscriptionConfig::default(), Default::default(), )) diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index b6a0e7a1a2..49fcbdf935 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -93,8 +93,8 @@ use crate::layers::ServiceBuilderExt; use crate::metrics::aggregation::MeterProviderType; use crate::metrics::filter::FilterMeterProvider; use crate::metrics::meter_provider; -use crate::plugin::Plugin; use crate::plugin::PluginInit; +use crate::plugin::PluginPrivate; use crate::plugins::telemetry::apollo::ForwardHeaders; use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::node::Id::ResponseName; use crate::plugins::telemetry::apollo_exporter::proto::reports::StatsContext; @@ -131,9 +131,8 @@ use crate::plugins::telemetry::reload::OPENTELEMETRY_TRACER_HANDLE; use crate::plugins::telemetry::tracing::apollo_telemetry::decode_ftv1_trace; use crate::plugins::telemetry::tracing::apollo_telemetry::APOLLO_PRIVATE_OPERATION_SIGNATURE; use crate::plugins::telemetry::tracing::TracingConfigurator; -use crate::plugins::telemetry::utils::TracingUtils; use crate::query_planner::OperationKind; -use crate::register_plugin; +use crate::register_private_plugin; use crate::router_factory::Endpoint; use crate::services::execution; use crate::services::router; @@ -172,7 +171,7 @@ pub(crate) mod tracing; pub(crate) mod utils; // Tracing consts -const CLIENT_NAME: &str = "apollo_telemetry::client_name"; +pub(crate) const CLIENT_NAME: &str = "apollo_telemetry::client_name"; const CLIENT_VERSION: &str = "apollo_telemetry::client_version"; const SUBGRAPH_FTV1: &str = "apollo_telemetry::subgraph_ftv1"; pub(crate) const STUDIO_EXCLUDE: &str = "apollo_telemetry::studio::exclude"; @@ -280,7 +279,7 @@ fn create_builtin_instruments(config: &InstrumentsConfig) -> BuiltinInstruments } #[async_trait::async_trait] -impl Plugin for Telemetry { +impl PluginPrivate for Telemetry { type Config = config::Conf; async fn new(init: PluginInit) -> Result { @@ -853,10 +852,8 @@ impl Plugin for Telemetry { fn web_endpoints(&self) -> MultiMap { self.custom_endpoints.clone() } -} -impl Telemetry { - pub(crate) fn activate(&self) { + fn activate(&self) { let mut activation = self.activation.lock(); if activation.is_active { return; @@ -910,7 +907,9 @@ impl Telemetry { reload_fmt(create_fmt_layer(&self.config)); activation.is_active = true; } +} +impl Telemetry { fn create_propagator(config: &config::Conf) -> TextMapCompositePropagator { let propagation = &config.exporters.tracing.propagation; @@ -1747,28 +1746,32 @@ impl Telemetry { } fn plugin_metrics(config: &Arc) { - let metrics_prom_used = config.exporters.metrics.prometheus.enabled; - let metrics_otlp_used = MetricsConfigurator::enabled(&config.exporters.metrics.otlp); - let tracing_otlp_used = TracingConfigurator::enabled(&config.exporters.tracing.otlp); - let tracing_datadog_used = config.exporters.tracing.datadog.enabled(); - let tracing_jaeger_used = config.exporters.tracing.jaeger.enabled(); - let tracing_zipkin_used = config.exporters.tracing.zipkin.enabled(); - - if metrics_prom_used - || metrics_otlp_used - || tracing_jaeger_used - || tracing_otlp_used - || tracing_zipkin_used - || tracing_datadog_used - { - ::tracing::info!( - monotonic_counter.apollo.router.operations.telemetry = 1u64, - telemetry.metrics.otlp = metrics_otlp_used.or_empty(), - telemetry.metrics.prometheus = metrics_prom_used.or_empty(), - telemetry.tracing.otlp = tracing_otlp_used.or_empty(), - telemetry.tracing.datadog = tracing_datadog_used.or_empty(), - telemetry.tracing.jaeger = tracing_jaeger_used.or_empty(), - telemetry.tracing.zipkin = tracing_zipkin_used.or_empty(), + let mut attributes = Vec::new(); + if MetricsConfigurator::enabled(&config.exporters.metrics.otlp) { + attributes.push(KeyValue::new("telemetry.metrics.otlp", true)); + } + if config.exporters.metrics.prometheus.enabled { + attributes.push(KeyValue::new("telemetry.metrics.prometheus", true)); + } + if TracingConfigurator::enabled(&config.exporters.tracing.otlp) { + attributes.push(KeyValue::new("telemetry.tracing.otlp", true)); + } + if config.exporters.tracing.datadog.enabled() { + attributes.push(KeyValue::new("telemetry.tracing.datadog", true)); + } + if config.exporters.tracing.jaeger.enabled() { + attributes.push(KeyValue::new("telemetry.tracing.jaeger", true)); + } + if config.exporters.tracing.zipkin.enabled() { + attributes.push(KeyValue::new("telemetry.tracing.zipkin", true)); + } + + if !attributes.is_empty() { + u64_counter!( + "apollo.router.operations.telemetry", + "Telemetry exporters enabled", + 1, + attributes ); } } @@ -1979,7 +1982,7 @@ fn handle_error_internal>( } } -register_plugin!("apollo", "telemetry", Telemetry); +register_private_plugin!("apollo", "telemetry", Telemetry); fn request_ftv1(mut req: SubgraphRequest) -> SubgraphRequest { if req diff --git a/apollo-router/src/plugins/test.rs b/apollo-router/src/plugins/test.rs index ec1d9c509e..8535c9f5f1 100644 --- a/apollo-router/src/plugins/test.rs +++ b/apollo-router/src/plugins/test.rs @@ -12,8 +12,8 @@ use tower_service::Service; use crate::introspection::IntrospectionCache; use crate::plugin::DynPlugin; -use crate::plugin::Plugin; use crate::plugin::PluginInit; +use crate::plugin::PluginPrivate; use crate::query_planner::BridgeQueryPlanner; use crate::query_planner::PlannerMode; use crate::services::execution; @@ -65,12 +65,12 @@ use crate::Notify; /// You can pass in a configuration and a schema to the test harness. If you pass in a schema, the test harness will create a query planner and use the schema to extract subgraph schemas. /// /// -pub(crate) struct PluginTestHarness { +pub(crate) struct PluginTestHarness>> { plugin: Box, phantom: std::marker::PhantomData, } #[buildstructor::buildstructor] -impl PluginTestHarness { +impl> + 'static> PluginTestHarness { #[builder] pub(crate) async fn new<'a, 'b>(config: Option<&'a str>, schema: Option<&'b str>) -> Self { let factory = crate::plugin::plugins() @@ -217,7 +217,7 @@ impl PluginTestHarness { impl Deref for PluginTestHarness where - T: Plugin, + T: PluginPrivate, { type Target = T; diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index 4335bfe988..935a670164 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -396,7 +396,6 @@ impl TrafficShaping { config.min_per_sec, config.retry_percent, config.retry_mutations, - name.to_string(), ); tower::retry::RetryLayer::new(retry_policy) }); diff --git a/apollo-router/src/plugins/traffic_shaping/retry.rs b/apollo-router/src/plugins/traffic_shaping/retry.rs index 40727cc6dd..d04101dc3e 100644 --- a/apollo-router/src/plugins/traffic_shaping/retry.rs +++ b/apollo-router/src/plugins/traffic_shaping/retry.rs @@ -5,6 +5,7 @@ use std::time::Duration; use tower::retry::budget::Budget; use tower::retry::Policy; +use crate::plugins::telemetry::config_new::attributes::SubgraphRequestResendCountKey; use crate::query_planner::OperationKind; use crate::services::subgraph; @@ -12,7 +13,6 @@ use crate::services::subgraph; pub(crate) struct RetryPolicy { budget: Arc, retry_mutations: bool, - subgraph_name: String, } impl RetryPolicy { @@ -21,7 +21,6 @@ impl RetryPolicy { min_per_sec: Option, retry_percent: Option, retry_mutations: Option, - subgraph_name: String, ) -> Self { Self { budget: Arc::new(Budget::new( @@ -30,17 +29,21 @@ impl RetryPolicy { retry_percent.unwrap_or(0.2), )), retry_mutations: retry_mutations.unwrap_or(false), - subgraph_name, } } } -impl Policy for RetryPolicy { +impl Policy for RetryPolicy { type Future = future::Ready; - fn retry(&self, req: &subgraph::Request, result: Result<&Res, &E>) -> Option { + fn retry( + &self, + req: &subgraph::Request, + result: Result<&subgraph::Response, &E>, + ) -> Option { + let subgraph_name = req.subgraph_name.clone().unwrap_or_default(); match result { - Ok(_) => { + Ok(_resp) => { // Treat all `Response`s as success, // so deposit budget and don't retry... self.budget.deposit(); @@ -53,20 +56,27 @@ impl Policy for RetryPolicy { let withdrew = self.budget.withdraw(); if withdrew.is_err() { - tracing::info!( - monotonic_counter.apollo_router_http_request_retry_total = 1u64, + u64_counter!( + "apollo_router_http_request_retry_total", + "Number of retries for an http request to a subgraph", + 1u64, status = "aborted", - subgraph = %self.subgraph_name, + subgraph = subgraph_name ); return None; } - - tracing::info!( - monotonic_counter.apollo_router_http_request_retry_total = 1u64, - subgraph = %self.subgraph_name, + u64_counter!( + "apollo_router_http_request_retry_total", + "Number of retries for an http request to a subgraph", + 1u64, + subgraph = subgraph_name ); + let _ = req + .context + .upsert::<_, usize>(SubgraphRequestResendCountKey::new(&req.id), |val| val + 1); + Some(future::ready(self.clone())) } } @@ -76,3 +86,69 @@ impl Policy for RetryPolicy { Some(req.clone()) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::error::FetchError; + use crate::graphql; + use crate::http_ext; + use crate::metrics::FutureMetricsExt; + + #[tokio::test] + async fn test_retry_with_error() { + async { + let retry = RetryPolicy::new( + Some(Duration::from_secs(10)), + Some(10), + Some(0.2), + Some(false), + ); + + let subgraph_req = subgraph::Request::fake_builder() + .subgraph_name("my_subgraph_name_error") + .subgraph_request( + http_ext::Request::fake_builder() + .header("test", "my_value_set") + .body( + graphql::Request::fake_builder() + .query(String::from("query { test }")) + .build(), + ) + .build() + .unwrap(), + ) + .build(); + + assert!(retry + .retry( + &subgraph_req, + Err(&Box::new(FetchError::SubrequestHttpError { + status_code: None, + service: String::from("my_subgraph_name_error"), + reason: String::from("cannot contact the subgraph"), + })) + ) + .is_some()); + + assert!(retry + .retry( + &subgraph_req, + Err(&Box::new(FetchError::SubrequestHttpError { + status_code: None, + service: String::from("my_subgraph_name_error"), + reason: String::from("cannot contact the subgraph"), + })) + ) + .is_some()); + + assert_counter!( + "apollo_router_http_request_retry_total", + 2, + "subgraph" = "my_subgraph_name_error" + ); + } + .with_metrics() + .await; + } +} diff --git a/apollo-router/src/plugins/traffic_shaping/timeout/future.rs b/apollo-router/src/plugins/traffic_shaping/timeout/future.rs index 8a390b393e..eda4100198 100644 --- a/apollo-router/src/plugins/traffic_shaping/timeout/future.rs +++ b/apollo-router/src/plugins/traffic_shaping/timeout/future.rs @@ -49,7 +49,11 @@ where match Pin::new(&mut this.sleep).poll(cx) { Poll::Pending => Poll::Pending, Poll::Ready(_) => { - tracing::info!(monotonic_counter.apollo_router_timeout = 1u64,); + u64_counter!( + "apollo_router_timeout", + "Number of timed out client requests", + 1 + ); Poll::Ready(Err(Elapsed::new().into())) } } diff --git a/apollo-router/src/protocols/websocket.rs b/apollo-router/src/protocols/websocket.rs index bd556232ac..13700e84ce 100644 --- a/apollo-router/src/protocols/websocket.rs +++ b/apollo-router/src/protocols/websocket.rs @@ -300,13 +300,10 @@ where request: graphql::Request, heartbeat_interval: Option, ) -> Result, graphql::Error> { - tracing::info!( - monotonic_counter - .apollo - .router - .operations - .subscriptions - .events = 1u64, + u64_counter!( + "apollo.router.operations.subscriptions.events", + "Number of subscription events", + 1, subscriptions.mode = "passthrough" ); @@ -443,13 +440,10 @@ where tracing::trace!("cannot shutdown sink: {err:?}"); }; - tracing::info!( - monotonic_counter - .apollo - .router - .operations - .subscriptions - .events = 1u64, + u64_counter!( + "apollo.router.operations.subscriptions.events", + "Number of subscription events", + 1, subscriptions.mode = "passthrough", subscriptions.complete = true ); diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index dac65efe45..10edd392ae 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use std::fmt::Debug; -use std::fmt::Write; use std::ops::ControlFlow; use std::sync::Arc; use std::time::Instant; @@ -31,6 +30,7 @@ use super::QueryKey; use crate::apollo_studio_interop::generate_usage_reporting; use crate::compute_job; use crate::configuration::QueryPlannerMode; +use crate::error::FederationErrorBridge; use crate::error::PlanErrors; use crate::error::QueryPlannerError; use crate::error::SchemaError; @@ -64,7 +64,6 @@ use crate::Configuration; pub(crate) const RUST_QP_MODE: &str = "rust"; pub(crate) const JS_QP_MODE: &str = "js"; -const UNSUPPORTED_CONTEXT: &str = "context"; const UNSUPPORTED_FED1: &str = "fed1"; const INTERNAL_INIT_ERROR: &str = "internal"; @@ -140,6 +139,13 @@ impl PlannerMode { Self::Js(Self::js_planner(&schema.raw_sdl, configuration, old_planner).await?) } } + QueryPlannerMode::NewBestEffort => { + if let Some(rust) = rust_planner { + Self::Rust(rust) + } else { + Self::Js(Self::js_planner(&schema.raw_sdl, configuration, old_planner).await?) + } + } }) } @@ -152,13 +158,15 @@ impl PlannerMode { QueryPlannerMode::New | QueryPlannerMode::Both => { Ok(Some(Self::rust(schema, configuration)?)) } - QueryPlannerMode::BothBestEffort => match Self::rust(schema, configuration) { - Ok(planner) => Ok(Some(planner)), - Err(error) => { - tracing::info!("Falling back to the legacy query planner: {error}"); - Ok(None) + QueryPlannerMode::BothBestEffort | QueryPlannerMode::NewBestEffort => { + match Self::rust(schema, configuration) { + Ok(planner) => Ok(Some(planner)), + Err(error) => { + tracing::info!("Falling back to the legacy query planner: {error}"); + Ok(None) + } } - }, + } } } @@ -170,19 +178,14 @@ impl PlannerMode { let result = QueryPlanner::new(schema.federation_supergraph(), config); match &result { - Err(FederationError::SingleFederationError { - inner: error, - trace: _, - }) => match error { + Err(FederationError::SingleFederationError(error)) => match error { SingleFederationError::UnsupportedFederationVersion { .. } => { metric_rust_qp_init(Some(UNSUPPORTED_FED1)); } - SingleFederationError::UnsupportedFeature { message: _, kind } => match kind { - apollo_federation::error::UnsupportedFeatureKind::Context => { - metric_rust_qp_init(Some(UNSUPPORTED_CONTEXT)) - } - _ => metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)), - }, + SingleFederationError::UnsupportedFeature { + message: _, + kind: _, + } => metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)), _ => { metric_rust_qp_init(Some(INTERNAL_INIT_ERROR)); } @@ -264,8 +267,18 @@ impl PlannerMode { operation, query_plan_options, ) - }) - .map_err(|e| QueryPlannerError::FederationError(e.to_string())); + }); + if let Err(FederationError::SingleFederationError( + SingleFederationError::InternalUnmergeableFields { .. }, + )) = &result + { + u64_counter!( + "apollo.router.operations.query_planner.unmergeable_fields", + "Query planner caught attempting to merge unmergeable fields", + 1 + ); + } + let result = result.map_err(FederationErrorBridge::from); let elapsed = start.elapsed().as_secs_f64(); metric_query_planning_plan_duration(RUST_QP_MODE, elapsed); @@ -789,33 +802,6 @@ pub(super) struct QueryPlan { pub(super) node: Option>, } -// Note: Reexported under `apollo_router::_private` -pub fn render_diff(differences: &[diff::Result<&str>]) -> String { - let mut output = String::new(); - for diff_line in differences { - match diff_line { - diff::Result::Left(l) => { - let trimmed = l.trim(); - if !trimmed.starts_with('#') && !trimmed.is_empty() { - writeln!(&mut output, "-{l}").expect("write will never fail"); - } else { - writeln!(&mut output, " {l}").expect("write will never fail"); - } - } - diff::Result::Both(l, _) => { - writeln!(&mut output, " {l}").expect("write will never fail"); - } - diff::Result::Right(r) => { - let trimmed = r.trim(); - if trimmed != "---" && !trimmed.is_empty() { - writeln!(&mut output, "+{r}").expect("write will never fail"); - } - } - } - } - output -} - pub(crate) fn metric_query_planning_plan_duration(planner: &'static str, elapsed: f64) { f64_histogram!( "apollo.router.query_planning.plan.duration", @@ -846,9 +832,6 @@ pub(crate) fn metric_rust_qp_init(init_error_kind: Option<&'static str>) { #[cfg(test)] mod tests { - use std::fs; - use std::path::PathBuf; - use serde_json::json; use test_log::test; use tower::Service; @@ -1392,28 +1375,6 @@ mod tests { .await } - #[test] - fn router_bridge_dependency_is_pinned() { - let cargo_manifest: serde_json::Value = basic_toml::from_str( - &fs::read_to_string(PathBuf::from(&env!("CARGO_MANIFEST_DIR")).join("Cargo.toml")) - .expect("could not read Cargo.toml"), - ) - .expect("could not parse Cargo.toml"); - let router_bridge_version = cargo_manifest - .get("dependencies") - .expect("Cargo.toml does not contain dependencies") - .as_object() - .expect("Cargo.toml dependencies key is not an object") - .get("router-bridge") - .expect("Cargo.toml dependencies does not have an entry for router-bridge") - .as_str() - .unwrap_or_default(); - assert!( - router_bridge_version.contains('='), - "router-bridge in Cargo.toml is not pinned with a '=' prefix" - ); - } - #[tokio::test] async fn test_both_mode() { let mut harness = crate::TestHarness::builder() @@ -1520,13 +1481,6 @@ mod tests { 1, "init.is_success" = true ); - metric_rust_qp_init(Some(UNSUPPORTED_CONTEXT)); - assert_counter!( - "apollo.router.lifecycle.query_planner.init", - 1, - "init.error_kind" = "context", - "init.is_success" = false - ); metric_rust_qp_init(Some(UNSUPPORTED_FED1)); assert_counter!( "apollo.router.lifecycle.query_planner.init", diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index 209adcc856..7b1f8c5279 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -145,6 +145,12 @@ where ConfigMode::Rust(Arc::new(configuration.rust_query_planner_config())) .hash(&mut hasher); } + crate::configuration::QueryPlannerMode::NewBestEffort => { + "PLANNER-NEW-BEST-EFFORT".hash(&mut hasher); + ConfigMode::Js(Arc::new(configuration.js_query_planner_config())).hash(&mut hasher); + ConfigMode::Rust(Arc::new(configuration.rust_query_planner_config())) + .hash(&mut hasher); + } }; let config_mode_hash = Arc::new(QueryHash(hasher.finalize())); @@ -632,10 +638,7 @@ pub(crate) struct CachingQueryKey { pub(crate) config_mode: Arc, } -// Update this key every time the cache key or the query plan format has to change. -// When changed it MUST BE CALLED OUT PROMINENTLY IN THE CHANGELOG. -const CACHE_KEY_VERSION: usize = 1; -const FEDERATION_VERSION: &str = std::env!("FEDERATION_VERSION"); +const ROUTER_VERSION: &str = env!("CARGO_PKG_VERSION"); impl std::fmt::Display for CachingQueryKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -654,8 +657,8 @@ impl std::fmt::Display for CachingQueryKey { write!( f, - "plan:cache:{}:federation:{}:{}:opname:{}:metadata:{}", - CACHE_KEY_VERSION, FEDERATION_VERSION, self.hash, operation, metadata, + "plan:router:{}:{}:opname:{}:metadata:{}", + ROUTER_VERSION, self.hash, operation, metadata, ) } } diff --git a/apollo-router/src/query_planner/convert.rs b/apollo-router/src/query_planner/convert.rs index 640ecb624f..5d6488025e 100644 --- a/apollo-router/src/query_planner/convert.rs +++ b/apollo-router/src/query_planner/convert.rs @@ -310,19 +310,15 @@ impl From<&'_ next::FetchDataPathElement> for crate::json_ext::PathElement { match value { next::FetchDataPathElement::Key(name, conditions) => Self::Key( name.to_string(), - if conditions.is_empty() { - None - } else { - Some(conditions.iter().map(|c| c.to_string()).collect()) - }, + conditions + .as_ref() + .map(|conditions| conditions.iter().map(|c| c.to_string()).collect()), + ), + next::FetchDataPathElement::AnyIndex(conditions) => Self::Flatten( + conditions + .as_ref() + .map(|conditions| conditions.iter().map(|c| c.to_string()).collect()), ), - next::FetchDataPathElement::AnyIndex(conditions) => { - Self::Flatten(if conditions.is_empty() { - None - } else { - Some(conditions.iter().map(|c| c.to_string()).collect()) - }) - } next::FetchDataPathElement::TypenameEquals(value) => Self::Fragment(value.to_string()), next::FetchDataPathElement::Parent => Self::Key("..".to_owned(), None), } diff --git a/apollo-router/src/query_planner/dual_query_planner.rs b/apollo-router/src/query_planner/dual_query_planner.rs index 9e5250a447..3996d4f22c 100644 --- a/apollo-router/src/query_planner/dual_query_planner.rs +++ b/apollo-router/src/query_planner/dual_query_planner.rs @@ -1,40 +1,23 @@ //! Running two query planner implementations and comparing their results -use std::borrow::Borrow; -use std::collections::hash_map::HashMap; -use std::fmt::Write; -use std::hash::DefaultHasher; -use std::hash::Hash; -use std::hash::Hasher; use std::sync::Arc; use std::sync::OnceLock; use std::time::Instant; -use apollo_compiler::ast; use apollo_compiler::validation::Valid; use apollo_compiler::ExecutableDocument; use apollo_compiler::Name; -use apollo_compiler::Node; +use apollo_federation::error::FederationError; use apollo_federation::query_plan::query_planner::QueryPlanOptions; use apollo_federation::query_plan::query_planner::QueryPlanner; -use apollo_federation::query_plan::QueryPlan; -use super::fetch::FetchNode; -use super::fetch::SubgraphOperation; -use super::subscription::SubscriptionNode; -use super::FlattenNode; use crate::error::format_bridge_errors; -use crate::executable::USING_CATCH_UNWIND; use crate::query_planner::bridge_query_planner::metric_query_planning_plan_duration; use crate::query_planner::bridge_query_planner::JS_QP_MODE; use crate::query_planner::bridge_query_planner::RUST_QP_MODE; use crate::query_planner::convert::convert_root_query_plan_node; -use crate::query_planner::render_diff; -use crate::query_planner::rewrites::DataRewrite; -use crate::query_planner::selection::Selection; -use crate::query_planner::DeferredNode; -use crate::query_planner::PlanNode; -use crate::query_planner::Primary; +use crate::query_planner::plan_compare::diff_plan; +use crate::query_planner::plan_compare::opt_plan_node_matches; use crate::query_planner::QueryPlanResult; /// Jobs are dropped if this many are already queued @@ -80,47 +63,23 @@ impl BothModeComparisonJob { } fn execute(self) { - // TODO: once the Rust query planner does not use `todo!()` anymore, - // remove `USING_CATCH_UNWIND` and this use of `catch_unwind`. - let rust_result = std::panic::catch_unwind(|| { - let name = self - .operation_name - .clone() - .map(Name::try_from) - .transpose()?; - USING_CATCH_UNWIND.set(true); - - let start = Instant::now(); + let start = Instant::now(); - // No question mark operator or macro from here ā€¦ - let result = + let rust_result = self + .operation_name + .as_deref() + .map(|n| Name::new(n).map_err(FederationError::from)) + .transpose() + .and_then(|operation| { self.rust_planner - .build_query_plan(&self.document, name, self.plan_options); - - let elapsed = start.elapsed().as_secs_f64(); - metric_query_planning_plan_duration(RUST_QP_MODE, elapsed); + .build_query_plan(&self.document, operation, self.plan_options) + }); - metric_query_planning_plan_both_comparison_duration(RUST_QP_MODE, elapsed); - metric_query_planning_plan_both_comparison_duration(JS_QP_MODE, self.js_duration); + let elapsed = start.elapsed().as_secs_f64(); + metric_query_planning_plan_duration(RUST_QP_MODE, elapsed); - // ā€¦ to here, so the thread can only eiher reach here or panic. - // We unset USING_CATCH_UNWIND in both cases. - USING_CATCH_UNWIND.set(false); - result - }) - .unwrap_or_else(|panic| { - USING_CATCH_UNWIND.set(false); - Err(apollo_federation::error::FederationError::internal( - format!( - "query planner panicked: {}", - panic - .downcast_ref::() - .map(|s| s.as_str()) - .or_else(|| panic.downcast_ref::<&str>().copied()) - .unwrap_or_default() - ), - )) - }); + metric_query_planning_plan_both_comparison_duration(RUST_QP_MODE, elapsed); + metric_query_planning_plan_both_comparison_duration(JS_QP_MODE, self.js_duration); let name = self.operation_name.as_deref(); let operation_desc = if let Ok(operation) = self.document.operations.get(name) { @@ -194,1136 +153,6 @@ pub(crate) fn metric_query_planning_plan_both_comparison_duration( ); } -// Specific comparison functions - -pub struct MatchFailure { - description: String, - backtrace: std::backtrace::Backtrace, -} - -impl MatchFailure { - pub fn description(&self) -> String { - self.description.clone() - } - - pub fn full_description(&self) -> String { - format!("{}\n\nBacktrace:\n{}", self.description, self.backtrace) - } - - fn new(description: String) -> MatchFailure { - MatchFailure { - description, - backtrace: std::backtrace::Backtrace::force_capture(), - } - } - - fn add_description(self: MatchFailure, description: &str) -> MatchFailure { - MatchFailure { - description: format!("{}\n{}", self.description, description), - backtrace: self.backtrace, - } - } -} - -macro_rules! check_match { - ($pred:expr) => { - if !$pred { - return Err(MatchFailure::new(format!( - "mismatch at {}", - stringify!($pred) - ))); - } - }; -} - -macro_rules! check_match_eq { - ($a:expr, $b:expr) => { - if $a != $b { - let message = format!( - "mismatch between {} and {}:\nleft: {:?}\nright: {:?}", - stringify!($a), - stringify!($b), - $a, - $b - ); - return Err(MatchFailure::new(message)); - } - }; -} - -fn fetch_node_matches(this: &FetchNode, other: &FetchNode) -> Result<(), MatchFailure> { - let FetchNode { - service_name, - requires, - variable_usages, - operation, - // ignored: - // reordered parallel fetches may have different names - operation_name: _, - operation_kind, - id, - input_rewrites, - output_rewrites, - context_rewrites, - // ignored - schema_aware_hash: _, - // ignored: - // when running in comparison mode, the rust plan node does not have - // the attached cache key metadata for authorisation, since the rust plan is - // not going to be the one being executed. - authorization: _, - } = this; - - check_match_eq!(*service_name, other.service_name); - check_match_eq!(*operation_kind, other.operation_kind); - check_match_eq!(*id, other.id); - check_match!(same_requires(requires, &other.requires)); - check_match!(vec_matches_sorted(variable_usages, &other.variable_usages)); - check_match!(same_rewrites(input_rewrites, &other.input_rewrites)); - check_match!(same_rewrites(output_rewrites, &other.output_rewrites)); - check_match!(same_rewrites(context_rewrites, &other.context_rewrites)); - operation_matches(operation, &other.operation)?; - Ok(()) -} - -fn subscription_primary_matches( - this: &SubscriptionNode, - other: &SubscriptionNode, -) -> Result<(), MatchFailure> { - let SubscriptionNode { - service_name, - variable_usages, - operation, - operation_name: _, // ignored (reordered parallel fetches may have different names) - operation_kind, - input_rewrites, - output_rewrites, - } = this; - check_match_eq!(*service_name, other.service_name); - check_match_eq!(*operation_kind, other.operation_kind); - check_match!(vec_matches_sorted(variable_usages, &other.variable_usages)); - check_match!(same_rewrites(input_rewrites, &other.input_rewrites)); - check_match!(same_rewrites(output_rewrites, &other.output_rewrites)); - operation_matches(operation, &other.operation)?; - Ok(()) -} - -fn operation_matches( - this: &SubgraphOperation, - other: &SubgraphOperation, -) -> Result<(), MatchFailure> { - document_str_matches(this.as_serialized(), other.as_serialized()) -} - -// Compare operation document strings such as query or just selection set. -fn document_str_matches(this: &str, other: &str) -> Result<(), MatchFailure> { - let this_ast = match ast::Document::parse(this, "this_operation.graphql") { - Ok(document) => document, - Err(_) => { - return Err(MatchFailure::new( - "Failed to parse this operation".to_string(), - )); - } - }; - let other_ast = match ast::Document::parse(other, "other_operation.graphql") { - Ok(document) => document, - Err(_) => { - return Err(MatchFailure::new( - "Failed to parse other operation".to_string(), - )); - } - }; - same_ast_document(&this_ast, &other_ast) -} - -fn opt_document_string_matches( - this: &Option, - other: &Option, -) -> Result<(), MatchFailure> { - match (this, other) { - (None, None) => Ok(()), - (Some(this_sel), Some(other_sel)) => document_str_matches(this_sel, other_sel), - _ => Err(MatchFailure::new(format!( - "mismatched at opt_document_string_matches\nleft: {:?}\nright: {:?}", - this, other - ))), - } -} - -// The rest is calling the comparison functions above instead of `PartialEq`, -// but otherwise behave just like `PartialEq`: - -// Note: Reexported under `apollo_router::_private` -pub fn plan_matches(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> Result<(), MatchFailure> { - let js_root_node = &js_plan.query_plan.node; - let rust_root_node = convert_root_query_plan_node(rust_plan); - opt_plan_node_matches(js_root_node, &rust_root_node) -} - -pub fn diff_plan(js_plan: &QueryPlanResult, rust_plan: &QueryPlan) -> String { - let js_root_node = &js_plan.query_plan.node; - let rust_root_node = convert_root_query_plan_node(rust_plan); - - match (js_root_node, rust_root_node) { - (None, None) => String::from(""), - (None, Some(rust)) => { - let rust = &format!("{rust:#?}"); - let differences = diff::lines("", rust); - render_diff(&differences) - } - (Some(js), None) => { - let js = &format!("{js:#?}"); - let differences = diff::lines(js, ""); - render_diff(&differences) - } - (Some(js), Some(rust)) => { - let rust = &format!("{rust:#?}"); - let js = &format!("{js:#?}"); - let differences = diff::lines(js, rust); - render_diff(&differences) - } - } -} - -fn opt_plan_node_matches( - this: &Option>, - other: &Option>, -) -> Result<(), MatchFailure> { - match (this, other) { - (None, None) => Ok(()), - (None, Some(_)) | (Some(_), None) => Err(MatchFailure::new(format!( - "mismatch at opt_plan_node_matches\nleft: {:?}\nright: {:?}", - this.is_some(), - other.is_some() - ))), - (Some(this), Some(other)) => plan_node_matches(this.borrow(), other.borrow()), - } -} - -//================================================================================================== -// Vec comparison functions - -fn vec_matches(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) -> bool) -> bool { - this.len() == other.len() - && std::iter::zip(this, other).all(|(this, other)| item_matches(this, other)) -} - -fn vec_matches_result( - this: &[T], - other: &[T], - item_matches: impl Fn(&T, &T) -> Result<(), MatchFailure>, -) -> Result<(), MatchFailure> { - check_match_eq!(this.len(), other.len()); - std::iter::zip(this, other) - .enumerate() - .try_fold((), |_acc, (index, (this, other))| { - item_matches(this, other) - .map_err(|err| err.add_description(&format!("under item[{}]", index))) - })?; - Ok(()) -} - -fn vec_matches_sorted(this: &[T], other: &[T]) -> bool { - let mut this_sorted = this.to_owned(); - let mut other_sorted = other.to_owned(); - this_sorted.sort(); - other_sorted.sort(); - vec_matches(&this_sorted, &other_sorted, T::eq) -} - -fn vec_matches_sorted_by( - this: &[T], - other: &[T], - compare: impl Fn(&T, &T) -> std::cmp::Ordering, - item_matches: impl Fn(&T, &T) -> bool, -) -> bool { - let mut this_sorted = this.to_owned(); - let mut other_sorted = other.to_owned(); - this_sorted.sort_by(&compare); - other_sorted.sort_by(&compare); - vec_matches(&this_sorted, &other_sorted, item_matches) -} - -fn vec_matches_result_sorted_by( - this: &[T], - other: &[T], - compare: impl Fn(&T, &T) -> std::cmp::Ordering, - item_matches: impl Fn(&T, &T) -> Result<(), MatchFailure>, -) -> Result<(), MatchFailure> { - check_match_eq!(this.len(), other.len()); - let mut this_sorted = this.to_owned(); - let mut other_sorted = other.to_owned(); - this_sorted.sort_by(&compare); - other_sorted.sort_by(&compare); - std::iter::zip(&this_sorted, &other_sorted) - .try_fold((), |_acc, (this, other)| item_matches(this, other))?; - Ok(()) -} - -// `this` vector includes `other` vector as a set -fn vec_includes_as_set(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) -> bool) -> bool { - other.iter().all(|other_node| { - this.iter() - .any(|this_node| item_matches(this_node, other_node)) - }) -} - -// performs a set comparison, ignoring order -fn vec_matches_as_set(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) -> bool) -> bool { - // Set-inclusion test in both directions - this.len() == other.len() - && vec_includes_as_set(this, other, &item_matches) - && vec_includes_as_set(other, this, &item_matches) -} - -// Forward/reverse mappings from one Vec items (indices) to another. -type VecMapping = (HashMap, HashMap); - -// performs a set comparison, ignoring order -// and returns a mapping from `this` to `other`. -fn vec_matches_as_set_with_mapping( - this: &[T], - other: &[T], - item_matches: impl Fn(&T, &T) -> bool, -) -> VecMapping { - // Set-inclusion test in both directions - // - record forward/reverse mapping from this items <-> other items for reporting mismatches - let mut forward_map: HashMap = HashMap::new(); - let mut reverse_map: HashMap = HashMap::new(); - for (this_pos, this_node) in this.iter().enumerate() { - if let Some(other_pos) = other - .iter() - .position(|other_node| item_matches(this_node, other_node)) - { - forward_map.insert(this_pos, other_pos); - reverse_map.insert(other_pos, this_pos); - } - } - for (other_pos, other_node) in other.iter().enumerate() { - if reverse_map.contains_key(&other_pos) { - continue; - } - if let Some(this_pos) = this - .iter() - .position(|this_node| item_matches(this_node, other_node)) - { - forward_map.insert(this_pos, other_pos); - reverse_map.insert(other_pos, this_pos); - } - } - (forward_map, reverse_map) -} - -// Returns a formatted mismatch message and an optional pair of mismatched positions if the pair -// are the only remaining unmatched items. -fn format_mismatch_as_set( - this_len: usize, - other_len: usize, - forward_map: &HashMap, - reverse_map: &HashMap, -) -> Result<(String, Option<(usize, usize)>), std::fmt::Error> { - let mut ret = String::new(); - let buf = &mut ret; - write!(buf, "- mapping from left to right: [")?; - let mut this_missing_pos = None; - for this_pos in 0..this_len { - if this_pos != 0 { - write!(buf, ", ")?; - } - if let Some(other_pos) = forward_map.get(&this_pos) { - write!(buf, "{}", other_pos)?; - } else { - this_missing_pos = Some(this_pos); - write!(buf, "?")?; - } - } - writeln!(buf, "]")?; - - write!(buf, "- left-over on the right: [")?; - let mut other_missing_count = 0; - let mut other_missing_pos = None; - for other_pos in 0..other_len { - if reverse_map.get(&other_pos).is_none() { - if other_missing_count != 0 { - write!(buf, ", ")?; - } - other_missing_count += 1; - other_missing_pos = Some(other_pos); - write!(buf, "{}", other_pos)?; - } - } - write!(buf, "]")?; - let unmatched_pair = if let (Some(this_missing_pos), Some(other_missing_pos)) = - (this_missing_pos, other_missing_pos) - { - if this_len == 1 + forward_map.len() && other_len == 1 + reverse_map.len() { - // Special case: There are only one missing item on each side. They are supposed to - // match each other. - Some((this_missing_pos, other_missing_pos)) - } else { - None - } - } else { - None - }; - Ok((ret, unmatched_pair)) -} - -fn vec_matches_result_as_set( - this: &[T], - other: &[T], - item_matches: impl Fn(&T, &T) -> Result<(), MatchFailure>, -) -> Result { - // Set-inclusion test in both directions - // - record forward/reverse mapping from this items <-> other items for reporting mismatches - let (forward_map, reverse_map) = - vec_matches_as_set_with_mapping(this, other, |a, b| item_matches(a, b).is_ok()); - if forward_map.len() == this.len() && reverse_map.len() == other.len() { - Ok((forward_map, reverse_map)) - } else { - // report mismatch - let Ok((message, unmatched_pair)) = - format_mismatch_as_set(this.len(), other.len(), &forward_map, &reverse_map) - else { - // Exception: Unable to format mismatch report => fallback to most generic message - return Err(MatchFailure::new( - "mismatch at vec_matches_result_as_set (failed to format mismatched sets)" - .to_string(), - )); - }; - if let Some(unmatched_pair) = unmatched_pair { - // found a unique pair to report => use that pair's error message - let Err(err) = item_matches(&this[unmatched_pair.0], &other[unmatched_pair.1]) else { - // Exception: Unable to format unique pair mismatch error => fallback to overall report - return Err(MatchFailure::new(format!( - "mismatched sets (failed to format unique pair mismatch error):\n{}", - message - ))); - }; - Err(err.add_description(&format!( - "under a sole unmatched pair ({} -> {}) in a set comparison", - unmatched_pair.0, unmatched_pair.1 - ))) - } else { - Err(MatchFailure::new(format!("mismatched sets:\n{}", message))) - } - } -} - -//================================================================================================== -// PlanNode comparison functions - -fn option_to_string(name: Option) -> String { - name.map_or_else(|| "".to_string(), |name| name.to_string()) -} - -fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> Result<(), MatchFailure> { - match (this, other) { - (PlanNode::Sequence { nodes: this }, PlanNode::Sequence { nodes: other }) => { - vec_matches_result(this, other, plan_node_matches) - .map_err(|err| err.add_description("under Sequence node"))?; - } - (PlanNode::Parallel { nodes: this }, PlanNode::Parallel { nodes: other }) => { - vec_matches_result_as_set(this, other, plan_node_matches) - .map_err(|err| err.add_description("under Parallel node"))?; - } - (PlanNode::Fetch(this), PlanNode::Fetch(other)) => { - fetch_node_matches(this, other).map_err(|err| { - err.add_description(&format!( - "under Fetch node (operation name: {})", - option_to_string(this.operation_name.as_ref()) - )) - })?; - } - (PlanNode::Flatten(this), PlanNode::Flatten(other)) => { - flatten_node_matches(this, other).map_err(|err| { - err.add_description(&format!("under Flatten node (path: {})", this.path)) - })?; - } - ( - PlanNode::Defer { primary, deferred }, - PlanNode::Defer { - primary: other_primary, - deferred: other_deferred, - }, - ) => { - defer_primary_node_matches(primary, other_primary)?; - vec_matches_result(deferred, other_deferred, deferred_node_matches)?; - } - ( - PlanNode::Subscription { primary, rest }, - PlanNode::Subscription { - primary: other_primary, - rest: other_rest, - }, - ) => { - subscription_primary_matches(primary, other_primary)?; - opt_plan_node_matches(rest, other_rest) - .map_err(|err| err.add_description("under Subscription"))?; - } - ( - PlanNode::Condition { - condition, - if_clause, - else_clause, - }, - PlanNode::Condition { - condition: other_condition, - if_clause: other_if_clause, - else_clause: other_else_clause, - }, - ) => { - check_match_eq!(condition, other_condition); - opt_plan_node_matches(if_clause, other_if_clause) - .map_err(|err| err.add_description("under Condition node (if_clause)"))?; - opt_plan_node_matches(else_clause, other_else_clause) - .map_err(|err| err.add_description("under Condition node (else_clause)"))?; - } - _ => { - return Err(MatchFailure::new(format!( - "mismatched plan node types\nleft: {:?}\nright: {:?}", - this, other - ))) - } - }; - Ok(()) -} - -fn defer_primary_node_matches(this: &Primary, other: &Primary) -> Result<(), MatchFailure> { - let Primary { subselection, node } = this; - opt_document_string_matches(subselection, &other.subselection) - .map_err(|err| err.add_description("under defer primary subselection"))?; - opt_plan_node_matches(node, &other.node) - .map_err(|err| err.add_description("under defer primary plan node")) -} - -fn deferred_node_matches(this: &DeferredNode, other: &DeferredNode) -> Result<(), MatchFailure> { - let DeferredNode { - depends, - label, - query_path, - subselection, - node, - } = this; - - check_match_eq!(*depends, other.depends); - check_match_eq!(*label, other.label); - check_match_eq!(*query_path, other.query_path); - opt_document_string_matches(subselection, &other.subselection) - .map_err(|err| err.add_description("under deferred subselection"))?; - opt_plan_node_matches(node, &other.node) - .map_err(|err| err.add_description("under deferred node")) -} - -fn flatten_node_matches(this: &FlattenNode, other: &FlattenNode) -> Result<(), MatchFailure> { - let FlattenNode { path, node } = this; - check_match_eq!(*path, other.path); - plan_node_matches(node, &other.node) -} - -// Copied and modified from `apollo_federation::operation::SelectionKey` -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -enum SelectionKey { - Field { - /// The field alias (if specified) or field name in the resulting selection set. - response_name: Name, - directives: ast::DirectiveList, - }, - FragmentSpread { - /// The name of the fragment. - fragment_name: Name, - directives: ast::DirectiveList, - }, - InlineFragment { - /// The optional type condition of the fragment. - type_condition: Option, - directives: ast::DirectiveList, - }, -} - -fn get_selection_key(selection: &Selection) -> SelectionKey { - match selection { - Selection::Field(field) => SelectionKey::Field { - response_name: field.response_name().clone(), - directives: Default::default(), - }, - Selection::InlineFragment(fragment) => SelectionKey::InlineFragment { - type_condition: fragment.type_condition.clone(), - directives: Default::default(), - }, - } -} - -fn hash_value(x: &T) -> u64 { - let mut hasher = DefaultHasher::new(); - x.hash(&mut hasher); - hasher.finish() -} - -fn hash_selection_key(selection: &Selection) -> u64 { - hash_value(&get_selection_key(selection)) -} - -// Note: This `Selection` struct is a limited version used for the `requires` field. -fn same_selection(x: &Selection, y: &Selection) -> bool { - match (x, y) { - (Selection::Field(x), Selection::Field(y)) => { - x.name == y.name - && x.alias == y.alias - && match (&x.selections, &y.selections) { - (Some(x), Some(y)) => same_selection_set_sorted(x, y), - (None, None) => true, - _ => false, - } - } - (Selection::InlineFragment(x), Selection::InlineFragment(y)) => { - x.type_condition == y.type_condition - && same_selection_set_sorted(&x.selections, &y.selections) - } - _ => false, - } -} - -fn same_selection_set_sorted(x: &[Selection], y: &[Selection]) -> bool { - fn sorted_by_selection_key(s: &[Selection]) -> Vec<&Selection> { - let mut sorted: Vec<&Selection> = s.iter().collect(); - sorted.sort_by_key(|x| hash_selection_key(x)); - sorted - } - - if x.len() != y.len() { - return false; - } - sorted_by_selection_key(x) - .into_iter() - .zip(sorted_by_selection_key(y)) - .all(|(x, y)| same_selection(x, y)) -} - -fn same_requires(x: &[Selection], y: &[Selection]) -> bool { - vec_matches_as_set(x, y, same_selection) -} - -fn same_rewrites(x: &Option>, y: &Option>) -> bool { - match (x, y) { - (None, None) => true, - (Some(x), Some(y)) => vec_matches_as_set(x, y, |a, b| a == b), - _ => false, - } -} - -//================================================================================================== -// AST comparison functions - -fn same_ast_document(x: &ast::Document, y: &ast::Document) -> Result<(), MatchFailure> { - fn split_definitions( - doc: &ast::Document, - ) -> ( - Vec<&ast::OperationDefinition>, - Vec<&ast::FragmentDefinition>, - Vec<&ast::Definition>, - ) { - let mut operations: Vec<&ast::OperationDefinition> = Vec::new(); - let mut fragments: Vec<&ast::FragmentDefinition> = Vec::new(); - let mut others: Vec<&ast::Definition> = Vec::new(); - for def in doc.definitions.iter() { - match def { - ast::Definition::OperationDefinition(op) => operations.push(op), - ast::Definition::FragmentDefinition(frag) => fragments.push(frag), - _ => others.push(def), - } - } - (operations, fragments, others) - } - - let (x_ops, x_frags, x_others) = split_definitions(x); - let (y_ops, y_frags, y_others) = split_definitions(y); - - debug_assert!(x_others.is_empty(), "Unexpected definition types"); - debug_assert!(y_others.is_empty(), "Unexpected definition types"); - debug_assert!( - x_ops.len() == y_ops.len(), - "Different number of operation definitions" - ); - - check_match_eq!(x_frags.len(), y_frags.len()); - let mut fragment_map: HashMap = HashMap::new(); - // Assumption: x_frags and y_frags are topologically sorted. - // Thus, we can build the fragment name mapping in a single pass and compare - // fragment definitions using the mapping at the same time, since earlier fragments - // will never reference later fragments. - x_frags.iter().try_fold((), |_, x_frag| { - let y_frag = y_frags - .iter() - .find(|y_frag| same_ast_fragment_definition(x_frag, y_frag, &fragment_map).is_ok()); - if let Some(y_frag) = y_frag { - if x_frag.name != y_frag.name { - // record it only if they are not identical - fragment_map.insert(x_frag.name.clone(), y_frag.name.clone()); - } - Ok(()) - } else { - Err(MatchFailure::new(format!( - "mismatch: no matching fragment definition for {}", - x_frag.name - ))) - } - })?; - - check_match_eq!(x_ops.len(), y_ops.len()); - x_ops - .iter() - .zip(y_ops.iter()) - .try_fold((), |_, (x_op, y_op)| { - same_ast_operation_definition(x_op, y_op, &fragment_map) - .map_err(|err| err.add_description("under operation definition")) - })?; - Ok(()) -} - -fn same_ast_operation_definition( - x: &ast::OperationDefinition, - y: &ast::OperationDefinition, - fragment_map: &HashMap, -) -> Result<(), MatchFailure> { - // Note: Operation names are ignored, since parallel fetches may have different names. - check_match_eq!(x.operation_type, y.operation_type); - vec_matches_result_sorted_by( - &x.variables, - &y.variables, - |a, b| a.name.cmp(&b.name), - |a, b| same_variable_definition(a, b), - ) - .map_err(|err| err.add_description("under Variable definition"))?; - check_match_eq!(x.directives, y.directives); - check_match!(same_ast_selection_set_sorted( - &x.selection_set, - &y.selection_set, - fragment_map, - )); - Ok(()) -} - -// `x` may be coerced to `y`. -// - `x` should be a value from JS QP. -// - `y` should be a value from Rust QP. -// - Assume: x and y are already checked not equal. -// Due to coercion differences, we need to compare AST values with special cases. -fn ast_value_maybe_coerced_to(x: &ast::Value, y: &ast::Value) -> bool { - match (x, y) { - // Special case 1: JS QP may convert an enum value into string. - // - In this case, compare them as strings. - (ast::Value::String(ref x), ast::Value::Enum(ref y)) => { - if x == y.as_str() { - return true; - } - } - - // Special case 2: Rust QP expands a object value by filling in its - // default field values. - // - If the Rust QP object value subsumes the JS QP object value, consider it a match. - // - Assuming the Rust QP object value has only default field values. - // - Warning: This is an unsound heuristic. - (ast::Value::Object(ref x), ast::Value::Object(ref y)) => { - if vec_includes_as_set(y, x, |(yy_name, yy_val), (xx_name, xx_val)| { - xx_name == yy_name - && (xx_val == yy_val || ast_value_maybe_coerced_to(xx_val, yy_val)) - }) { - return true; - } - } - - // Special case 3: JS QP may convert string to int for custom scalars, while Rust doesn't. - // - Note: This conversion seems a bit difficult to implement in the `apollo-federation`'s - // `coerce_value` function, since IntValue's constructor is private to the crate. - (ast::Value::Int(ref x), ast::Value::String(ref y)) => { - if x.as_str() == y { - return true; - } - } - - // Recurse into list items. - (ast::Value::List(ref x), ast::Value::List(ref y)) => { - if vec_matches(x, y, |xx, yy| { - xx == yy || ast_value_maybe_coerced_to(xx, yy) - }) { - return true; - } - } - - _ => {} // otherwise, fall through - } - false -} - -// Use this function, instead of `VariableDefinition`'s `PartialEq` implementation, -// due to known differences. -fn same_variable_definition( - x: &ast::VariableDefinition, - y: &ast::VariableDefinition, -) -> Result<(), MatchFailure> { - check_match_eq!(x.name, y.name); - check_match_eq!(x.ty, y.ty); - if x.default_value != y.default_value { - if let (Some(x), Some(y)) = (&x.default_value, &y.default_value) { - if ast_value_maybe_coerced_to(x, y) { - return Ok(()); - } - } - - return Err(MatchFailure::new(format!( - "mismatch between default values:\nleft: {:?}\nright: {:?}", - x.default_value, y.default_value - ))); - } - check_match_eq!(x.directives, y.directives); - Ok(()) -} - -fn same_ast_fragment_definition( - x: &ast::FragmentDefinition, - y: &ast::FragmentDefinition, - fragment_map: &HashMap, -) -> Result<(), MatchFailure> { - // Note: Fragment names at definitions are ignored. - check_match_eq!(x.type_condition, y.type_condition); - check_match_eq!(x.directives, y.directives); - check_match!(same_ast_selection_set_sorted( - &x.selection_set, - &y.selection_set, - fragment_map, - )); - Ok(()) -} - -fn same_ast_argument_value(x: &ast::Value, y: &ast::Value) -> bool { - x == y || ast_value_maybe_coerced_to(x, y) -} - -fn same_ast_argument(x: &ast::Argument, y: &ast::Argument) -> bool { - x.name == y.name && same_ast_argument_value(&x.value, &y.value) -} - -fn same_ast_arguments(x: &[Node], y: &[Node]) -> bool { - vec_matches_sorted_by( - x, - y, - |a, b| a.name.cmp(&b.name), - |a, b| same_ast_argument(a, b), - ) -} - -fn same_directives(x: &ast::DirectiveList, y: &ast::DirectiveList) -> bool { - vec_matches_sorted_by( - x, - y, - |a, b| a.name.cmp(&b.name), - |a, b| a.name == b.name && same_ast_arguments(&a.arguments, &b.arguments), - ) -} - -fn get_ast_selection_key( - selection: &ast::Selection, - fragment_map: &HashMap, -) -> SelectionKey { - match selection { - ast::Selection::Field(field) => SelectionKey::Field { - response_name: field.response_name().clone(), - directives: field.directives.clone(), - }, - ast::Selection::FragmentSpread(fragment) => SelectionKey::FragmentSpread { - fragment_name: fragment_map - .get(&fragment.fragment_name) - .unwrap_or(&fragment.fragment_name) - .clone(), - directives: fragment.directives.clone(), - }, - ast::Selection::InlineFragment(fragment) => SelectionKey::InlineFragment { - type_condition: fragment.type_condition.clone(), - directives: fragment.directives.clone(), - }, - } -} - -fn same_ast_selection( - x: &ast::Selection, - y: &ast::Selection, - fragment_map: &HashMap, -) -> bool { - match (x, y) { - (ast::Selection::Field(x), ast::Selection::Field(y)) => { - x.name == y.name - && x.alias == y.alias - && same_ast_arguments(&x.arguments, &y.arguments) - && same_directives(&x.directives, &y.directives) - && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set, fragment_map) - } - (ast::Selection::FragmentSpread(x), ast::Selection::FragmentSpread(y)) => { - let mapped_fragment_name = fragment_map - .get(&x.fragment_name) - .unwrap_or(&x.fragment_name); - *mapped_fragment_name == y.fragment_name - && same_directives(&x.directives, &y.directives) - } - (ast::Selection::InlineFragment(x), ast::Selection::InlineFragment(y)) => { - x.type_condition == y.type_condition - && same_directives(&x.directives, &y.directives) - && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set, fragment_map) - } - _ => false, - } -} - -fn hash_ast_selection_key(selection: &ast::Selection, fragment_map: &HashMap) -> u64 { - hash_value(&get_ast_selection_key(selection, fragment_map)) -} - -// Selections are sorted and compared after renaming x's fragment spreads according to the -// fragment_map. -fn same_ast_selection_set_sorted( - x: &[ast::Selection], - y: &[ast::Selection], - fragment_map: &HashMap, -) -> bool { - fn sorted_by_selection_key<'a>( - s: &'a [ast::Selection], - fragment_map: &HashMap, - ) -> Vec<&'a ast::Selection> { - let mut sorted: Vec<&ast::Selection> = s.iter().collect(); - sorted.sort_by_key(|x| hash_ast_selection_key(x, fragment_map)); - sorted - } - - if x.len() != y.len() { - return false; - } - let x_sorted = sorted_by_selection_key(x, fragment_map); // Map fragment spreads - let y_sorted = sorted_by_selection_key(y, &Default::default()); // Don't map fragment spreads - x_sorted - .into_iter() - .zip(y_sorted) - .all(|(x, y)| same_ast_selection(x, y, fragment_map)) -} - -#[cfg(test)] -mod ast_comparison_tests { - use super::*; - - #[test] - fn test_query_variable_decl_order() { - let op_x = r#"query($qv2: String!, $qv1: Int!) { x(arg1: $qv1, arg2: $qv2) }"#; - let op_y = r#"query($qv1: Int!, $qv2: String!) { x(arg1: $qv1, arg2: $qv2) }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_query_variable_decl_enum_value_coercion() { - // Note: JS QP converts enum default values into strings. - let op_x = r#"query($qv1: E! = "default_value") { x(arg1: $qv1) }"#; - let op_y = r#"query($qv1: E! = default_value) { x(arg1: $qv1) }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_query_variable_decl_object_value_coercion_empty_case() { - // Note: Rust QP expands empty object default values by filling in its default field - // values. - let op_x = r#"query($qv1: T! = {}) { x(arg1: $qv1) }"#; - let op_y = - r#"query($qv1: T! = { field1: true, field2: "default_value" }) { x(arg1: $qv1) }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_query_variable_decl_object_value_coercion_non_empty_case() { - // Note: Rust QP expands an object default values by filling in its default field values. - let op_x = r#"query($qv1: T! = {field1: true}) { x(arg1: $qv1) }"#; - let op_y = - r#"query($qv1: T! = { field1: true, field2: "default_value" }) { x(arg1: $qv1) }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_query_variable_decl_list_of_object_value_coercion() { - // Testing a combination of list and object value coercion. - let op_x = r#"query($qv1: [T!]! = [{}]) { x(arg1: $qv1) }"#; - let op_y = - r#"query($qv1: [T!]! = [{field1: true, field2: "default_value"}]) { x(arg1: $qv1) }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_entities_selection_order() { - let op_x = r#" - query subgraph1__1($representations: [_Any!]!) { - _entities(representations: $representations) { x { w } y } - } - "#; - let op_y = r#" - query subgraph1__1($representations: [_Any!]!) { - _entities(representations: $representations) { y x { w } } - } - "#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_top_level_selection_order() { - let op_x = r#"{ x { w z } y }"#; - let op_y = r#"{ y x { z w } }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_fragment_definition_order() { - let op_x = r#"{ q { ...f1 ...f2 } } fragment f1 on T { x y } fragment f2 on T { w z }"#; - let op_y = r#"{ q { ...f1 ...f2 } } fragment f2 on T { w z } fragment f1 on T { x y }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_selection_argument_is_compared() { - let op_x = r#"{ x(arg1: "one") }"#; - let op_y = r#"{ x(arg1: "two") }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_err()); - } - - #[test] - fn test_selection_argument_order() { - let op_x = r#"{ x(arg1: "one", arg2: "two") }"#; - let op_y = r#"{ x(arg2: "two", arg1: "one") }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_selection_directive_order() { - let op_x = r#"{ x @include(if:true) @skip(if:false) }"#; - let op_y = r#"{ x @skip(if:false) @include(if:true) }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_string_to_id_coercion_difference() { - // JS QP coerces strings into integer for ID type, while Rust QP doesn't. - // This tests a special case that same_ast_document accepts this difference. - let op_x = r#"{ x(id: 123) }"#; - let op_y = r#"{ x(id: "123") }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_fragment_definition_different_names() { - let op_x = r#"{ q { ...f1 ...f2 } } fragment f1 on T { x y } fragment f2 on T { w z }"#; - let op_y = r#"{ q { ...g1 ...g2 } } fragment g1 on T { x y } fragment g2 on T { w z }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_fragment_definition_different_names_nested_1() { - // Nested fragments have the same name, only top-level fragments have different names. - let op_x = r#"{ q { ...f2 } } fragment f1 on T { x y } fragment f2 on T { z ...f1 }"#; - let op_y = r#"{ q { ...g2 } } fragment f1 on T { x y } fragment g2 on T { z ...f1 }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_fragment_definition_different_names_nested_2() { - // Nested fragments have different names. - let op_x = r#"{ q { ...f2 } } fragment f1 on T { x y } fragment f2 on T { z ...f1 }"#; - let op_y = r#"{ q { ...g2 } } fragment g1 on T { x y } fragment g2 on T { z ...g1 }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } - - #[test] - fn test_fragment_definition_different_names_nested_3() { - // Nested fragments have different names. - // Also, fragment definitions are in different order. - let op_x = r#"{ q { ...f2 ...f3 } } fragment f1 on T { x y } fragment f2 on T { z ...f1 } fragment f3 on T { w } "#; - let op_y = r#"{ q { ...g2 ...g3 } } fragment g1 on T { x y } fragment g2 on T { w } fragment g3 on T { z ...g1 }"#; - let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); - let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); - assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); - } -} - -#[cfg(test)] -mod qp_selection_comparison_tests { - use serde_json::json; - - use super::*; - - #[test] - fn test_requires_comparison_with_same_selection_key() { - let requires_json = json!([ - { - "kind": "InlineFragment", - "typeCondition": "T", - "selections": [ - { - "kind": "Field", - "name": "id", - }, - ] - }, - { - "kind": "InlineFragment", - "typeCondition": "T", - "selections": [ - { - "kind": "Field", - "name": "id", - }, - { - "kind": "Field", - "name": "job", - } - ] - }, - ]); - - // The only difference between requires1 and requires2 is the order of selections. - // But, their items all have the same SelectionKey. - let requires1: Vec = serde_json::from_value(requires_json).unwrap(); - let requires2: Vec = requires1.iter().rev().cloned().collect(); - - // `same_selection_set_sorted` fails to match, since it doesn't account for - // two items with the same SelectionKey but in different order. - assert!(!same_selection_set_sorted(&requires1, &requires2)); - // `same_requires` should succeed. - assert!(same_requires(&requires1, &requires2)); - } -} - #[cfg(test)] mod tests { use std::time::Instant; diff --git a/apollo-router/src/query_planner/execution.rs b/apollo-router/src/query_planner/execution.rs index bd23f549ea..9a5b647350 100644 --- a/apollo-router/src/query_planner/execution.rs +++ b/apollo-router/src/query_planner/execution.rs @@ -82,7 +82,11 @@ impl QueryPlan { ) .await; if !deferred_fetches.is_empty() { - tracing::info!(monotonic_counter.apollo.router.operations.defer = 1u64); + u64_counter!( + "apollo.router.operations.defer", + "Number of requests that request deferred data", + 1 + ); } Response::builder().data(value).errors(errors).build() diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 47069283ca..3ec89c1e95 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -521,7 +521,11 @@ impl FetchNode { self.response_at_path(parameters.schema, current_dir, paths, response); if let Some(id) = &self.id { if let Some(sender) = parameters.deferred_fetches.get(id.as_str()) { - tracing::info!(monotonic_counter.apollo.router.operations.defer.fetch = 1u64); + u64_counter!( + "apollo.router.operations.defer.fetch", + "Number of deferred responses fetched from subgraphs", + 1 + ); if let Err(e) = sender.clone().send((value.clone(), errors.clone())) { tracing::error!("error sending fetch result at path {} and id {:?} for deferred response building: {}", current_dir, self.id, e); } diff --git a/apollo-router/src/query_planner/mod.rs b/apollo-router/src/query_planner/mod.rs index ad23fbb80e..fb912e1d16 100644 --- a/apollo-router/src/query_planner/mod.rs +++ b/apollo-router/src/query_planner/mod.rs @@ -19,6 +19,7 @@ mod execution; pub(crate) mod fetch; mod labeler; mod plan; +pub(crate) mod plan_compare; pub(crate) mod rewrites; mod selection; mod subgraph_context; diff --git a/apollo-router/src/query_planner/plan_compare.rs b/apollo-router/src/query_planner/plan_compare.rs new file mode 100644 index 0000000000..7367ac2caa --- /dev/null +++ b/apollo-router/src/query_planner/plan_compare.rs @@ -0,0 +1,1303 @@ +// Semantic comparison of JS and Rust query plans + +use std::borrow::Borrow; +use std::collections::hash_map::HashMap; +use std::fmt::Write; +use std::hash::DefaultHasher; +use std::hash::Hash; +use std::hash::Hasher; + +use apollo_compiler::ast; +use apollo_compiler::Name; +use apollo_compiler::Node; +use apollo_federation::query_plan::QueryPlan as NativeQueryPlan; + +use super::convert::convert_root_query_plan_node; +use super::fetch::FetchNode; +use super::fetch::SubgraphOperation; +use super::rewrites::DataRewrite; +use super::selection::Selection; +use super::subscription::SubscriptionNode; +use super::DeferredNode; +use super::FlattenNode; +use super::PlanNode; +use super::Primary; +use super::QueryPlanResult; +use crate::json_ext::Path; +use crate::json_ext::PathElement; + +//================================================================================================== +// Public interface + +pub struct MatchFailure { + description: String, + backtrace: std::backtrace::Backtrace, +} + +impl MatchFailure { + pub fn description(&self) -> String { + self.description.clone() + } + + pub fn full_description(&self) -> String { + format!("{}\n\nBacktrace:\n{}", self.description, self.backtrace) + } + + fn new(description: String) -> MatchFailure { + MatchFailure { + description, + backtrace: std::backtrace::Backtrace::force_capture(), + } + } + + fn add_description(self: MatchFailure, description: &str) -> MatchFailure { + MatchFailure { + description: format!("{}\n{}", self.description, description), + backtrace: self.backtrace, + } + } +} + +macro_rules! check_match { + ($pred:expr) => { + if !$pred { + return Err(MatchFailure::new(format!( + "mismatch at {}", + stringify!($pred) + ))); + } + }; +} + +macro_rules! check_match_eq { + ($a:expr, $b:expr) => { + if $a != $b { + let message = format!( + "mismatch between {} and {}:\nleft: {:?}\nright: {:?}", + stringify!($a), + stringify!($b), + $a, + $b + ); + return Err(MatchFailure::new(message)); + } + }; +} + +// Note: Reexported under `apollo_router::_private` +pub fn plan_matches( + js_plan: &QueryPlanResult, + rust_plan: &NativeQueryPlan, +) -> Result<(), MatchFailure> { + let js_root_node = &js_plan.query_plan.node; + let rust_root_node = convert_root_query_plan_node(rust_plan); + opt_plan_node_matches(js_root_node, &rust_root_node) +} + +// Note: Reexported under `apollo_router::_private` +pub fn diff_plan(js_plan: &QueryPlanResult, rust_plan: &NativeQueryPlan) -> String { + let js_root_node = &js_plan.query_plan.node; + let rust_root_node = convert_root_query_plan_node(rust_plan); + + match (js_root_node, rust_root_node) { + (None, None) => String::from(""), + (None, Some(rust)) => { + let rust = &format!("{rust:#?}"); + let differences = diff::lines("", rust); + render_diff(&differences) + } + (Some(js), None) => { + let js = &format!("{js:#?}"); + let differences = diff::lines(js, ""); + render_diff(&differences) + } + (Some(js), Some(rust)) => { + let rust = &format!("{rust:#?}"); + let js = &format!("{js:#?}"); + let differences = diff::lines(js, rust); + render_diff(&differences) + } + } +} + +// Note: Reexported under `apollo_router::_private` +pub fn render_diff(differences: &[diff::Result<&str>]) -> String { + let mut output = String::new(); + for diff_line in differences { + match diff_line { + diff::Result::Left(l) => { + let trimmed = l.trim(); + if !trimmed.starts_with('#') && !trimmed.is_empty() { + writeln!(&mut output, "-{l}").expect("write will never fail"); + } else { + writeln!(&mut output, " {l}").expect("write will never fail"); + } + } + diff::Result::Both(l, _) => { + writeln!(&mut output, " {l}").expect("write will never fail"); + } + diff::Result::Right(r) => { + let trimmed = r.trim(); + if trimmed != "---" && !trimmed.is_empty() { + writeln!(&mut output, "+{r}").expect("write will never fail"); + } + } + } + } + output +} + +//================================================================================================== +// Vec comparison functions + +fn vec_matches(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) -> bool) -> bool { + this.len() == other.len() + && std::iter::zip(this, other).all(|(this, other)| item_matches(this, other)) +} + +fn vec_matches_result( + this: &[T], + other: &[T], + item_matches: impl Fn(&T, &T) -> Result<(), MatchFailure>, +) -> Result<(), MatchFailure> { + check_match_eq!(this.len(), other.len()); + std::iter::zip(this, other) + .enumerate() + .try_fold((), |_acc, (index, (this, other))| { + item_matches(this, other) + .map_err(|err| err.add_description(&format!("under item[{}]", index))) + })?; + Ok(()) +} + +fn vec_matches_sorted(this: &[T], other: &[T]) -> bool { + let mut this_sorted = this.to_owned(); + let mut other_sorted = other.to_owned(); + this_sorted.sort(); + other_sorted.sort(); + vec_matches(&this_sorted, &other_sorted, T::eq) +} + +fn vec_matches_sorted_by( + this: &[T], + other: &[T], + compare: impl Fn(&T, &T) -> std::cmp::Ordering, + item_matches: impl Fn(&T, &T) -> bool, +) -> bool { + let mut this_sorted = this.to_owned(); + let mut other_sorted = other.to_owned(); + this_sorted.sort_by(&compare); + other_sorted.sort_by(&compare); + vec_matches(&this_sorted, &other_sorted, item_matches) +} + +fn vec_matches_result_sorted_by( + this: &[T], + other: &[T], + compare: impl Fn(&T, &T) -> std::cmp::Ordering, + item_matches: impl Fn(&T, &T) -> Result<(), MatchFailure>, +) -> Result<(), MatchFailure> { + check_match_eq!(this.len(), other.len()); + let mut this_sorted = this.to_owned(); + let mut other_sorted = other.to_owned(); + this_sorted.sort_by(&compare); + other_sorted.sort_by(&compare); + std::iter::zip(&this_sorted, &other_sorted) + .try_fold((), |_acc, (this, other)| item_matches(this, other))?; + Ok(()) +} + +// `this` vector includes `other` vector as a set +fn vec_includes_as_set(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) -> bool) -> bool { + other.iter().all(|other_node| { + this.iter() + .any(|this_node| item_matches(this_node, other_node)) + }) +} + +// performs a set comparison, ignoring order +fn vec_matches_as_set(this: &[T], other: &[T], item_matches: impl Fn(&T, &T) -> bool) -> bool { + // Set-inclusion test in both directions + this.len() == other.len() + && vec_includes_as_set(this, other, &item_matches) + && vec_includes_as_set(other, this, &item_matches) +} + +// Forward/reverse mappings from one Vec items (indices) to another. +type VecMapping = (HashMap, HashMap); + +// performs a set comparison, ignoring order +// and returns a mapping from `this` to `other`. +fn vec_matches_as_set_with_mapping( + this: &[T], + other: &[T], + item_matches: impl Fn(&T, &T) -> bool, +) -> VecMapping { + // Set-inclusion test in both directions + // - record forward/reverse mapping from this items <-> other items for reporting mismatches + let mut forward_map: HashMap = HashMap::new(); + let mut reverse_map: HashMap = HashMap::new(); + for (this_pos, this_node) in this.iter().enumerate() { + if let Some(other_pos) = other + .iter() + .position(|other_node| item_matches(this_node, other_node)) + { + forward_map.insert(this_pos, other_pos); + reverse_map.insert(other_pos, this_pos); + } + } + for (other_pos, other_node) in other.iter().enumerate() { + if reverse_map.contains_key(&other_pos) { + continue; + } + if let Some(this_pos) = this + .iter() + .position(|this_node| item_matches(this_node, other_node)) + { + forward_map.insert(this_pos, other_pos); + reverse_map.insert(other_pos, this_pos); + } + } + (forward_map, reverse_map) +} + +// Returns a formatted mismatch message and an optional pair of mismatched positions if the pair +// are the only remaining unmatched items. +fn format_mismatch_as_set( + this_len: usize, + other_len: usize, + forward_map: &HashMap, + reverse_map: &HashMap, +) -> Result<(String, Option<(usize, usize)>), std::fmt::Error> { + let mut ret = String::new(); + let buf = &mut ret; + write!(buf, "- mapping from left to right: [")?; + let mut this_missing_pos = None; + for this_pos in 0..this_len { + if this_pos != 0 { + write!(buf, ", ")?; + } + if let Some(other_pos) = forward_map.get(&this_pos) { + write!(buf, "{}", other_pos)?; + } else { + this_missing_pos = Some(this_pos); + write!(buf, "?")?; + } + } + writeln!(buf, "]")?; + + write!(buf, "- left-over on the right: [")?; + let mut other_missing_count = 0; + let mut other_missing_pos = None; + for other_pos in 0..other_len { + if reverse_map.get(&other_pos).is_none() { + if other_missing_count != 0 { + write!(buf, ", ")?; + } + other_missing_count += 1; + other_missing_pos = Some(other_pos); + write!(buf, "{}", other_pos)?; + } + } + write!(buf, "]")?; + let unmatched_pair = if let (Some(this_missing_pos), Some(other_missing_pos)) = + (this_missing_pos, other_missing_pos) + { + if this_len == 1 + forward_map.len() && other_len == 1 + reverse_map.len() { + // Special case: There are only one missing item on each side. They are supposed to + // match each other. + Some((this_missing_pos, other_missing_pos)) + } else { + None + } + } else { + None + }; + Ok((ret, unmatched_pair)) +} + +fn vec_matches_result_as_set( + this: &[T], + other: &[T], + item_matches: impl Fn(&T, &T) -> Result<(), MatchFailure>, +) -> Result { + // Set-inclusion test in both directions + // - record forward/reverse mapping from this items <-> other items for reporting mismatches + let (forward_map, reverse_map) = + vec_matches_as_set_with_mapping(this, other, |a, b| item_matches(a, b).is_ok()); + if forward_map.len() == this.len() && reverse_map.len() == other.len() { + Ok((forward_map, reverse_map)) + } else { + // report mismatch + let Ok((message, unmatched_pair)) = + format_mismatch_as_set(this.len(), other.len(), &forward_map, &reverse_map) + else { + // Exception: Unable to format mismatch report => fallback to most generic message + return Err(MatchFailure::new( + "mismatch at vec_matches_result_as_set (failed to format mismatched sets)" + .to_string(), + )); + }; + if let Some(unmatched_pair) = unmatched_pair { + // found a unique pair to report => use that pair's error message + let Err(err) = item_matches(&this[unmatched_pair.0], &other[unmatched_pair.1]) else { + // Exception: Unable to format unique pair mismatch error => fallback to overall report + return Err(MatchFailure::new(format!( + "mismatched sets (failed to format unique pair mismatch error):\n{}", + message + ))); + }; + Err(err.add_description(&format!( + "under a sole unmatched pair ({} -> {}) in a set comparison", + unmatched_pair.0, unmatched_pair.1 + ))) + } else { + Err(MatchFailure::new(format!("mismatched sets:\n{}", message))) + } + } +} + +//================================================================================================== +// PlanNode comparison functions + +fn option_to_string(name: Option) -> String { + name.map_or_else(|| "".to_string(), |name| name.to_string()) +} + +fn plan_node_matches(this: &PlanNode, other: &PlanNode) -> Result<(), MatchFailure> { + match (this, other) { + (PlanNode::Sequence { nodes: this }, PlanNode::Sequence { nodes: other }) => { + vec_matches_result(this, other, plan_node_matches) + .map_err(|err| err.add_description("under Sequence node"))?; + } + (PlanNode::Parallel { nodes: this }, PlanNode::Parallel { nodes: other }) => { + vec_matches_result_as_set(this, other, plan_node_matches) + .map_err(|err| err.add_description("under Parallel node"))?; + } + (PlanNode::Fetch(this), PlanNode::Fetch(other)) => { + fetch_node_matches(this, other).map_err(|err| { + err.add_description(&format!( + "under Fetch node (operation name: {})", + option_to_string(this.operation_name.as_ref()) + )) + })?; + } + (PlanNode::Flatten(this), PlanNode::Flatten(other)) => { + flatten_node_matches(this, other).map_err(|err| { + err.add_description(&format!("under Flatten node (path: {})", this.path)) + })?; + } + ( + PlanNode::Defer { primary, deferred }, + PlanNode::Defer { + primary: other_primary, + deferred: other_deferred, + }, + ) => { + defer_primary_node_matches(primary, other_primary)?; + vec_matches_result(deferred, other_deferred, deferred_node_matches)?; + } + ( + PlanNode::Subscription { primary, rest }, + PlanNode::Subscription { + primary: other_primary, + rest: other_rest, + }, + ) => { + subscription_primary_matches(primary, other_primary)?; + opt_plan_node_matches(rest, other_rest) + .map_err(|err| err.add_description("under Subscription"))?; + } + ( + PlanNode::Condition { + condition, + if_clause, + else_clause, + }, + PlanNode::Condition { + condition: other_condition, + if_clause: other_if_clause, + else_clause: other_else_clause, + }, + ) => { + check_match_eq!(condition, other_condition); + opt_plan_node_matches(if_clause, other_if_clause) + .map_err(|err| err.add_description("under Condition node (if_clause)"))?; + opt_plan_node_matches(else_clause, other_else_clause) + .map_err(|err| err.add_description("under Condition node (else_clause)"))?; + } + _ => { + return Err(MatchFailure::new(format!( + "mismatched plan node types\nleft: {:?}\nright: {:?}", + this, other + ))) + } + }; + Ok(()) +} + +pub(crate) fn opt_plan_node_matches( + this: &Option>, + other: &Option>, +) -> Result<(), MatchFailure> { + match (this, other) { + (None, None) => Ok(()), + (None, Some(_)) | (Some(_), None) => Err(MatchFailure::new(format!( + "mismatch at opt_plan_node_matches\nleft: {:?}\nright: {:?}", + this.is_some(), + other.is_some() + ))), + (Some(this), Some(other)) => plan_node_matches(this.borrow(), other.borrow()), + } +} + +fn fetch_node_matches(this: &FetchNode, other: &FetchNode) -> Result<(), MatchFailure> { + let FetchNode { + service_name, + requires, + variable_usages, + operation, + // ignored: + // reordered parallel fetches may have different names + operation_name: _, + operation_kind, + id, + input_rewrites, + output_rewrites, + context_rewrites, + // ignored + schema_aware_hash: _, + // ignored: + // when running in comparison mode, the rust plan node does not have + // the attached cache key metadata for authorisation, since the rust plan is + // not going to be the one being executed. + authorization: _, + } = this; + + check_match_eq!(*service_name, other.service_name); + check_match_eq!(*operation_kind, other.operation_kind); + check_match_eq!(*id, other.id); + check_match!(same_requires(requires, &other.requires)); + check_match!(vec_matches_sorted(variable_usages, &other.variable_usages)); + check_match!(same_rewrites(input_rewrites, &other.input_rewrites)); + check_match!(same_rewrites(output_rewrites, &other.output_rewrites)); + check_match!(same_rewrites(context_rewrites, &other.context_rewrites)); + operation_matches(operation, &other.operation)?; + Ok(()) +} + +fn subscription_primary_matches( + this: &SubscriptionNode, + other: &SubscriptionNode, +) -> Result<(), MatchFailure> { + let SubscriptionNode { + service_name, + variable_usages, + operation, + operation_name: _, // ignored (reordered parallel fetches may have different names) + operation_kind, + input_rewrites, + output_rewrites, + } = this; + check_match_eq!(*service_name, other.service_name); + check_match_eq!(*operation_kind, other.operation_kind); + check_match!(vec_matches_sorted(variable_usages, &other.variable_usages)); + check_match!(same_rewrites(input_rewrites, &other.input_rewrites)); + check_match!(same_rewrites(output_rewrites, &other.output_rewrites)); + operation_matches(operation, &other.operation)?; + Ok(()) +} + +fn defer_primary_node_matches(this: &Primary, other: &Primary) -> Result<(), MatchFailure> { + let Primary { subselection, node } = this; + opt_document_string_matches(subselection, &other.subselection) + .map_err(|err| err.add_description("under defer primary subselection"))?; + opt_plan_node_matches(node, &other.node) + .map_err(|err| err.add_description("under defer primary plan node")) +} + +fn deferred_node_matches(this: &DeferredNode, other: &DeferredNode) -> Result<(), MatchFailure> { + let DeferredNode { + depends, + label, + query_path, + subselection, + node, + } = this; + + check_match_eq!(*depends, other.depends); + check_match_eq!(*label, other.label); + check_match_eq!(*query_path, other.query_path); + opt_document_string_matches(subselection, &other.subselection) + .map_err(|err| err.add_description("under deferred subselection"))?; + opt_plan_node_matches(node, &other.node) + .map_err(|err| err.add_description("under deferred node")) +} + +fn flatten_node_matches(this: &FlattenNode, other: &FlattenNode) -> Result<(), MatchFailure> { + let FlattenNode { path, node } = this; + check_match!(same_path(path, &other.path)); + plan_node_matches(node, &other.node) +} + +fn same_path(this: &Path, other: &Path) -> bool { + // Ignore the empty key root from the JS query planner + match this.0.split_first() { + Some((PathElement::Key(k, type_conditions), rest)) + if k.is_empty() && type_conditions.is_none() => + { + vec_matches(rest, &other.0, same_path_element) + } + _ => vec_matches(&this.0, &other.0, same_path_element), + } +} + +fn same_path_element(this: &PathElement, other: &PathElement) -> bool { + match (this, other) { + (PathElement::Index(this), PathElement::Index(other)) => this == other, + (PathElement::Fragment(this), PathElement::Fragment(other)) => this == other, + ( + PathElement::Key(this_key, this_type_conditions), + PathElement::Key(other_key, other_type_conditions), + ) => { + this_key == other_key + && same_path_condition(this_type_conditions, other_type_conditions) + } + ( + PathElement::Flatten(this_type_conditions), + PathElement::Flatten(other_type_conditions), + ) => same_path_condition(this_type_conditions, other_type_conditions), + _ => false, + } +} + +fn same_path_condition(this: &Option>, other: &Option>) -> bool { + match (this, other) { + (Some(this), Some(other)) => vec_matches_sorted(this, other), + (None, None) => true, + _ => false, + } +} + +// Copied and modified from `apollo_federation::operation::SelectionKey` +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +enum SelectionKey { + Field { + /// The field alias (if specified) or field name in the resulting selection set. + response_name: Name, + directives: ast::DirectiveList, + }, + FragmentSpread { + /// The name of the fragment. + fragment_name: Name, + directives: ast::DirectiveList, + }, + InlineFragment { + /// The optional type condition of the fragment. + type_condition: Option, + directives: ast::DirectiveList, + }, +} + +fn get_selection_key(selection: &Selection) -> SelectionKey { + match selection { + Selection::Field(field) => SelectionKey::Field { + response_name: field.response_name().clone(), + directives: Default::default(), + }, + Selection::InlineFragment(fragment) => SelectionKey::InlineFragment { + type_condition: fragment.type_condition.clone(), + directives: Default::default(), + }, + } +} + +fn hash_value(x: &T) -> u64 { + let mut hasher = DefaultHasher::new(); + x.hash(&mut hasher); + hasher.finish() +} + +fn hash_selection_key(selection: &Selection) -> u64 { + hash_value(&get_selection_key(selection)) +} + +// Note: This `Selection` struct is a limited version used for the `requires` field. +fn same_selection(x: &Selection, y: &Selection) -> bool { + match (x, y) { + (Selection::Field(x), Selection::Field(y)) => { + x.name == y.name + && x.alias == y.alias + && match (&x.selections, &y.selections) { + (Some(x), Some(y)) => same_selection_set_sorted(x, y), + (None, None) => true, + _ => false, + } + } + (Selection::InlineFragment(x), Selection::InlineFragment(y)) => { + x.type_condition == y.type_condition + && same_selection_set_sorted(&x.selections, &y.selections) + } + _ => false, + } +} + +fn same_selection_set_sorted(x: &[Selection], y: &[Selection]) -> bool { + fn sorted_by_selection_key(s: &[Selection]) -> Vec<&Selection> { + let mut sorted: Vec<&Selection> = s.iter().collect(); + sorted.sort_by_key(|x| hash_selection_key(x)); + sorted + } + + if x.len() != y.len() { + return false; + } + sorted_by_selection_key(x) + .into_iter() + .zip(sorted_by_selection_key(y)) + .all(|(x, y)| same_selection(x, y)) +} + +fn same_requires(x: &[Selection], y: &[Selection]) -> bool { + vec_matches_as_set(x, y, same_selection) +} + +fn same_rewrites(x: &Option>, y: &Option>) -> bool { + match (x, y) { + (None, None) => true, + (Some(x), Some(y)) => vec_matches_as_set(x, y, |a, b| a == b), + _ => false, + } +} + +fn operation_matches( + this: &SubgraphOperation, + other: &SubgraphOperation, +) -> Result<(), MatchFailure> { + document_str_matches(this.as_serialized(), other.as_serialized()) +} + +// Compare operation document strings such as query or just selection set. +fn document_str_matches(this: &str, other: &str) -> Result<(), MatchFailure> { + let this_ast = match ast::Document::parse(this, "this_operation.graphql") { + Ok(document) => document, + Err(_) => { + return Err(MatchFailure::new( + "Failed to parse this operation".to_string(), + )); + } + }; + let other_ast = match ast::Document::parse(other, "other_operation.graphql") { + Ok(document) => document, + Err(_) => { + return Err(MatchFailure::new( + "Failed to parse other operation".to_string(), + )); + } + }; + same_ast_document(&this_ast, &other_ast) +} + +fn opt_document_string_matches( + this: &Option, + other: &Option, +) -> Result<(), MatchFailure> { + match (this, other) { + (None, None) => Ok(()), + (Some(this_sel), Some(other_sel)) => document_str_matches(this_sel, other_sel), + _ => Err(MatchFailure::new(format!( + "mismatched at opt_document_string_matches\nleft: {:?}\nright: {:?}", + this, other + ))), + } +} + +//================================================================================================== +// AST comparison functions + +fn same_ast_document(x: &ast::Document, y: &ast::Document) -> Result<(), MatchFailure> { + fn split_definitions( + doc: &ast::Document, + ) -> ( + Vec<&ast::OperationDefinition>, + Vec<&ast::FragmentDefinition>, + Vec<&ast::Definition>, + ) { + let mut operations: Vec<&ast::OperationDefinition> = Vec::new(); + let mut fragments: Vec<&ast::FragmentDefinition> = Vec::new(); + let mut others: Vec<&ast::Definition> = Vec::new(); + for def in doc.definitions.iter() { + match def { + ast::Definition::OperationDefinition(op) => operations.push(op), + ast::Definition::FragmentDefinition(frag) => fragments.push(frag), + _ => others.push(def), + } + } + (operations, fragments, others) + } + + let (x_ops, x_frags, x_others) = split_definitions(x); + let (y_ops, y_frags, y_others) = split_definitions(y); + + debug_assert!(x_others.is_empty(), "Unexpected definition types"); + debug_assert!(y_others.is_empty(), "Unexpected definition types"); + debug_assert!( + x_ops.len() == y_ops.len(), + "Different number of operation definitions" + ); + + check_match_eq!(x_frags.len(), y_frags.len()); + let mut fragment_map: HashMap = HashMap::new(); + // Assumption: x_frags and y_frags are topologically sorted. + // Thus, we can build the fragment name mapping in a single pass and compare + // fragment definitions using the mapping at the same time, since earlier fragments + // will never reference later fragments. + x_frags.iter().try_fold((), |_, x_frag| { + let y_frag = y_frags + .iter() + .find(|y_frag| same_ast_fragment_definition(x_frag, y_frag, &fragment_map).is_ok()); + if let Some(y_frag) = y_frag { + if x_frag.name != y_frag.name { + // record it only if they are not identical + fragment_map.insert(x_frag.name.clone(), y_frag.name.clone()); + } + Ok(()) + } else { + Err(MatchFailure::new(format!( + "mismatch: no matching fragment definition for {}", + x_frag.name + ))) + } + })?; + + check_match_eq!(x_ops.len(), y_ops.len()); + x_ops + .iter() + .zip(y_ops.iter()) + .try_fold((), |_, (x_op, y_op)| { + same_ast_operation_definition(x_op, y_op, &fragment_map) + .map_err(|err| err.add_description("under operation definition")) + })?; + Ok(()) +} + +fn same_ast_operation_definition( + x: &ast::OperationDefinition, + y: &ast::OperationDefinition, + fragment_map: &HashMap, +) -> Result<(), MatchFailure> { + // Note: Operation names are ignored, since parallel fetches may have different names. + check_match_eq!(x.operation_type, y.operation_type); + vec_matches_result_sorted_by( + &x.variables, + &y.variables, + |a, b| a.name.cmp(&b.name), + |a, b| same_variable_definition(a, b), + ) + .map_err(|err| err.add_description("under Variable definition"))?; + check_match_eq!(x.directives, y.directives); + check_match!(same_ast_selection_set_sorted( + &x.selection_set, + &y.selection_set, + fragment_map, + )); + Ok(()) +} + +// `x` may be coerced to `y`. +// - `x` should be a value from JS QP. +// - `y` should be a value from Rust QP. +// - Assume: x and y are already checked not equal. +// Due to coercion differences, we need to compare AST values with special cases. +fn ast_value_maybe_coerced_to(x: &ast::Value, y: &ast::Value) -> bool { + match (x, y) { + // Special case 1: JS QP may convert an enum value into string. + // - In this case, compare them as strings. + (ast::Value::String(ref x), ast::Value::Enum(ref y)) => { + if x == y.as_str() { + return true; + } + } + + // Special case 2: Rust QP expands a object value by filling in its + // default field values. + // - If the Rust QP object value subsumes the JS QP object value, consider it a match. + // - Assuming the Rust QP object value has only default field values. + // - Warning: This is an unsound heuristic. + (ast::Value::Object(ref x), ast::Value::Object(ref y)) => { + if vec_includes_as_set(y, x, |(yy_name, yy_val), (xx_name, xx_val)| { + xx_name == yy_name + && (xx_val == yy_val || ast_value_maybe_coerced_to(xx_val, yy_val)) + }) { + return true; + } + } + + // Special case 3: JS QP may convert string to int for custom scalars, while Rust doesn't. + // - Note: This conversion seems a bit difficult to implement in the `apollo-federation`'s + // `coerce_value` function, since IntValue's constructor is private to the crate. + (ast::Value::Int(ref x), ast::Value::String(ref y)) => { + if x.as_str() == y { + return true; + } + } + + // Recurse into list items. + (ast::Value::List(ref x), ast::Value::List(ref y)) => { + if vec_matches(x, y, |xx, yy| { + xx == yy || ast_value_maybe_coerced_to(xx, yy) + }) { + return true; + } + } + + _ => {} // otherwise, fall through + } + false +} + +// Use this function, instead of `VariableDefinition`'s `PartialEq` implementation, +// due to known differences. +fn same_variable_definition( + x: &ast::VariableDefinition, + y: &ast::VariableDefinition, +) -> Result<(), MatchFailure> { + check_match_eq!(x.name, y.name); + check_match_eq!(x.ty, y.ty); + if x.default_value != y.default_value { + if let (Some(x), Some(y)) = (&x.default_value, &y.default_value) { + if ast_value_maybe_coerced_to(x, y) { + return Ok(()); + } + } + + return Err(MatchFailure::new(format!( + "mismatch between default values:\nleft: {:?}\nright: {:?}", + x.default_value, y.default_value + ))); + } + check_match_eq!(x.directives, y.directives); + Ok(()) +} + +fn same_ast_fragment_definition( + x: &ast::FragmentDefinition, + y: &ast::FragmentDefinition, + fragment_map: &HashMap, +) -> Result<(), MatchFailure> { + // Note: Fragment names at definitions are ignored. + check_match_eq!(x.type_condition, y.type_condition); + check_match_eq!(x.directives, y.directives); + check_match!(same_ast_selection_set_sorted( + &x.selection_set, + &y.selection_set, + fragment_map, + )); + Ok(()) +} + +fn same_ast_argument_value(x: &ast::Value, y: &ast::Value) -> bool { + x == y || ast_value_maybe_coerced_to(x, y) +} + +fn same_ast_argument(x: &ast::Argument, y: &ast::Argument) -> bool { + x.name == y.name && same_ast_argument_value(&x.value, &y.value) +} + +fn same_ast_arguments(x: &[Node], y: &[Node]) -> bool { + vec_matches_sorted_by( + x, + y, + |a, b| a.name.cmp(&b.name), + |a, b| same_ast_argument(a, b), + ) +} + +fn same_directives(x: &ast::DirectiveList, y: &ast::DirectiveList) -> bool { + vec_matches_sorted_by( + x, + y, + |a, b| a.name.cmp(&b.name), + |a, b| a.name == b.name && same_ast_arguments(&a.arguments, &b.arguments), + ) +} + +fn get_ast_selection_key( + selection: &ast::Selection, + fragment_map: &HashMap, +) -> SelectionKey { + match selection { + ast::Selection::Field(field) => SelectionKey::Field { + response_name: field.response_name().clone(), + directives: field.directives.clone(), + }, + ast::Selection::FragmentSpread(fragment) => SelectionKey::FragmentSpread { + fragment_name: fragment_map + .get(&fragment.fragment_name) + .unwrap_or(&fragment.fragment_name) + .clone(), + directives: fragment.directives.clone(), + }, + ast::Selection::InlineFragment(fragment) => SelectionKey::InlineFragment { + type_condition: fragment.type_condition.clone(), + directives: fragment.directives.clone(), + }, + } +} + +fn same_ast_selection( + x: &ast::Selection, + y: &ast::Selection, + fragment_map: &HashMap, +) -> bool { + match (x, y) { + (ast::Selection::Field(x), ast::Selection::Field(y)) => { + x.name == y.name + && x.alias == y.alias + && same_ast_arguments(&x.arguments, &y.arguments) + && same_directives(&x.directives, &y.directives) + && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set, fragment_map) + } + (ast::Selection::FragmentSpread(x), ast::Selection::FragmentSpread(y)) => { + let mapped_fragment_name = fragment_map + .get(&x.fragment_name) + .unwrap_or(&x.fragment_name); + *mapped_fragment_name == y.fragment_name + && same_directives(&x.directives, &y.directives) + } + (ast::Selection::InlineFragment(x), ast::Selection::InlineFragment(y)) => { + x.type_condition == y.type_condition + && same_directives(&x.directives, &y.directives) + && same_ast_selection_set_sorted(&x.selection_set, &y.selection_set, fragment_map) + } + _ => false, + } +} + +fn hash_ast_selection_key(selection: &ast::Selection, fragment_map: &HashMap) -> u64 { + hash_value(&get_ast_selection_key(selection, fragment_map)) +} + +// Selections are sorted and compared after renaming x's fragment spreads according to the +// fragment_map. +fn same_ast_selection_set_sorted( + x: &[ast::Selection], + y: &[ast::Selection], + fragment_map: &HashMap, +) -> bool { + fn sorted_by_selection_key<'a>( + s: &'a [ast::Selection], + fragment_map: &HashMap, + ) -> Vec<&'a ast::Selection> { + let mut sorted: Vec<&ast::Selection> = s.iter().collect(); + sorted.sort_by_key(|x| hash_ast_selection_key(x, fragment_map)); + sorted + } + + if x.len() != y.len() { + return false; + } + let x_sorted = sorted_by_selection_key(x, fragment_map); // Map fragment spreads + let y_sorted = sorted_by_selection_key(y, &Default::default()); // Don't map fragment spreads + x_sorted + .into_iter() + .zip(y_sorted) + .all(|(x, y)| same_ast_selection(x, y, fragment_map)) +} + +//================================================================================================== +// Unit tests + +#[cfg(test)] +mod ast_comparison_tests { + use super::*; + + #[test] + fn test_query_variable_decl_order() { + let op_x = r#"query($qv2: String!, $qv1: Int!) { x(arg1: $qv1, arg2: $qv2) }"#; + let op_y = r#"query($qv1: Int!, $qv2: String!) { x(arg1: $qv1, arg2: $qv2) }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_query_variable_decl_enum_value_coercion() { + // Note: JS QP converts enum default values into strings. + let op_x = r#"query($qv1: E! = "default_value") { x(arg1: $qv1) }"#; + let op_y = r#"query($qv1: E! = default_value) { x(arg1: $qv1) }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_query_variable_decl_object_value_coercion_empty_case() { + // Note: Rust QP expands empty object default values by filling in its default field + // values. + let op_x = r#"query($qv1: T! = {}) { x(arg1: $qv1) }"#; + let op_y = + r#"query($qv1: T! = { field1: true, field2: "default_value" }) { x(arg1: $qv1) }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_query_variable_decl_object_value_coercion_non_empty_case() { + // Note: Rust QP expands an object default values by filling in its default field values. + let op_x = r#"query($qv1: T! = {field1: true}) { x(arg1: $qv1) }"#; + let op_y = + r#"query($qv1: T! = { field1: true, field2: "default_value" }) { x(arg1: $qv1) }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_query_variable_decl_list_of_object_value_coercion() { + // Testing a combination of list and object value coercion. + let op_x = r#"query($qv1: [T!]! = [{}]) { x(arg1: $qv1) }"#; + let op_y = + r#"query($qv1: [T!]! = [{field1: true, field2: "default_value"}]) { x(arg1: $qv1) }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_entities_selection_order() { + let op_x = r#" + query subgraph1__1($representations: [_Any!]!) { + _entities(representations: $representations) { x { w } y } + } + "#; + let op_y = r#" + query subgraph1__1($representations: [_Any!]!) { + _entities(representations: $representations) { y x { w } } + } + "#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_top_level_selection_order() { + let op_x = r#"{ x { w z } y }"#; + let op_y = r#"{ y x { z w } }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_fragment_definition_order() { + let op_x = r#"{ q { ...f1 ...f2 } } fragment f1 on T { x y } fragment f2 on T { w z }"#; + let op_y = r#"{ q { ...f1 ...f2 } } fragment f2 on T { w z } fragment f1 on T { x y }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_selection_argument_is_compared() { + let op_x = r#"{ x(arg1: "one") }"#; + let op_y = r#"{ x(arg1: "two") }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_err()); + } + + #[test] + fn test_selection_argument_order() { + let op_x = r#"{ x(arg1: "one", arg2: "two") }"#; + let op_y = r#"{ x(arg2: "two", arg1: "one") }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_selection_directive_order() { + let op_x = r#"{ x @include(if:true) @skip(if:false) }"#; + let op_y = r#"{ x @skip(if:false) @include(if:true) }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_string_to_id_coercion_difference() { + // JS QP coerces strings into integer for ID type, while Rust QP doesn't. + // This tests a special case that same_ast_document accepts this difference. + let op_x = r#"{ x(id: 123) }"#; + let op_y = r#"{ x(id: "123") }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_fragment_definition_different_names() { + let op_x = r#"{ q { ...f1 ...f2 } } fragment f1 on T { x y } fragment f2 on T { w z }"#; + let op_y = r#"{ q { ...g1 ...g2 } } fragment g1 on T { x y } fragment g2 on T { w z }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_fragment_definition_different_names_nested_1() { + // Nested fragments have the same name, only top-level fragments have different names. + let op_x = r#"{ q { ...f2 } } fragment f1 on T { x y } fragment f2 on T { z ...f1 }"#; + let op_y = r#"{ q { ...g2 } } fragment f1 on T { x y } fragment g2 on T { z ...f1 }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_fragment_definition_different_names_nested_2() { + // Nested fragments have different names. + let op_x = r#"{ q { ...f2 } } fragment f1 on T { x y } fragment f2 on T { z ...f1 }"#; + let op_y = r#"{ q { ...g2 } } fragment g1 on T { x y } fragment g2 on T { z ...g1 }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } + + #[test] + fn test_fragment_definition_different_names_nested_3() { + // Nested fragments have different names. + // Also, fragment definitions are in different order. + let op_x = r#"{ q { ...f2 ...f3 } } fragment f1 on T { x y } fragment f2 on T { z ...f1 } fragment f3 on T { w } "#; + let op_y = r#"{ q { ...g2 ...g3 } } fragment g1 on T { x y } fragment g2 on T { w } fragment g3 on T { z ...g1 }"#; + let ast_x = ast::Document::parse(op_x, "op_x").unwrap(); + let ast_y = ast::Document::parse(op_y, "op_y").unwrap(); + assert!(super::same_ast_document(&ast_x, &ast_y).is_ok()); + } +} + +#[cfg(test)] +mod qp_selection_comparison_tests { + use serde_json::json; + + use super::*; + + #[test] + fn test_requires_comparison_with_same_selection_key() { + let requires_json = json!([ + { + "kind": "InlineFragment", + "typeCondition": "T", + "selections": [ + { + "kind": "Field", + "name": "id", + }, + ] + }, + { + "kind": "InlineFragment", + "typeCondition": "T", + "selections": [ + { + "kind": "Field", + "name": "id", + }, + { + "kind": "Field", + "name": "job", + } + ] + }, + ]); + + // The only difference between requires1 and requires2 is the order of selections. + // But, their items all have the same SelectionKey. + let requires1: Vec = serde_json::from_value(requires_json).unwrap(); + let requires2: Vec = requires1.iter().rev().cloned().collect(); + + // `same_selection_set_sorted` fails to match, since it doesn't account for + // two items with the same SelectionKey but in different order. + assert!(!same_selection_set_sorted(&requires1, &requires2)); + // `same_requires` should succeed. + assert!(same_requires(&requires1, &requires2)); + } +} + +#[cfg(test)] +mod path_comparison_tests { + use serde_json::json; + + use super::*; + + macro_rules! matches_deserialized_path { + ($json:expr, $expected:expr) => { + let path: Path = serde_json::from_value($json).unwrap(); + assert_eq!(path, $expected); + }; + } + + #[test] + fn test_type_condition_deserialization() { + matches_deserialized_path!( + json!(["k"]), + Path(vec![PathElement::Key("k".to_string(), None)]) + ); + matches_deserialized_path!( + json!(["k|[A]"]), + Path(vec![PathElement::Key( + "k".to_string(), + Some(vec!["A".to_string()]) + )]) + ); + matches_deserialized_path!( + json!(["k|[A,B]"]), + Path(vec![PathElement::Key( + "k".to_string(), + Some(vec!["A".to_string(), "B".to_string()]) + )]) + ); + matches_deserialized_path!( + json!(["k|[]"]), + Path(vec![PathElement::Key("k".to_string(), Some(vec![]))]) + ); + } + + macro_rules! assert_path_match { + ($a:expr, $b:expr) => { + let legacy_path: Path = serde_json::from_value($a).unwrap(); + let native_path: Path = serde_json::from_value($b).unwrap(); + assert!(same_path(&legacy_path, &native_path)); + }; + } + + macro_rules! assert_path_differ { + ($a:expr, $b:expr) => { + let legacy_path: Path = serde_json::from_value($a).unwrap(); + let native_path: Path = serde_json::from_value($b).unwrap(); + assert!(!same_path(&legacy_path, &native_path)); + }; + } + + #[test] + fn test_same_path_basic() { + // Basic dis-equality tests. + assert_path_differ!(json!([]), json!(["a"])); + assert_path_differ!(json!(["a"]), json!(["b"])); + assert_path_differ!(json!(["a", "b"]), json!(["a", "b", "c"])); + } + + #[test] + fn test_same_path_ignore_empty_root_key() { + assert_path_match!(json!(["", "k|[A]", "v"]), json!(["k|[A]", "v"])); + } + + #[test] + fn test_same_path_distinguishes_empty_conditions_from_no_conditions() { + // Create paths that use no type conditions and empty type conditions + assert_path_differ!(json!(["k|[]", "v"]), json!(["k", "v"])); + } +} diff --git a/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap b/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap index 16ba934103..7b2e0c912f 100644 --- a/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap +++ b/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap @@ -15,7 +15,7 @@ Fetch( output_rewrites: None, context_rewrites: None, schema_aware_hash: QueryHash( - "5c5036eef33484e505dd5a8666fd0a802e60d830964a4dbbf662526398563ffd", + "65e550250ef331b8dc49d9e2da8f4cd5add979720cbe83ba545a0f78ece8d329", ), authorization: CacheKeyMetadata { is_authenticated: false, diff --git a/apollo-router/src/router/event/mod.rs b/apollo-router/src/router/event/mod.rs index 2645ad4755..aa98558b6b 100644 --- a/apollo-router/src/router/event/mod.rs +++ b/apollo-router/src/router/event/mod.rs @@ -22,6 +22,7 @@ use self::Event::UpdateConfiguration; use self::Event::UpdateLicense; use self::Event::UpdateSchema; use crate::uplink::license_enforcement::LicenseState; +use crate::uplink::schema::SchemaState; use crate::Configuration; /// Messages that are broadcast across the app. @@ -33,7 +34,7 @@ pub(crate) enum Event { NoMoreConfiguration, /// The schema was updated. - UpdateSchema(String), + UpdateSchema(SchemaState), /// There are no more updates to the schema NoMoreSchema, diff --git a/apollo-router/src/router/event/schema.rs b/apollo-router/src/router/event/schema.rs index 229992fa68..f43e3dea4e 100644 --- a/apollo-router/src/router/event/schema.rs +++ b/apollo-router/src/router/event/schema.rs @@ -11,6 +11,7 @@ use url::Url; use crate::router::Event; use crate::router::Event::NoMoreSchema; use crate::router::Event::UpdateSchema; +use crate::uplink::schema::SchemaState; use crate::uplink::schema_stream::SupergraphSdlQuery; use crate::uplink::stream_from_uplink; use crate::uplink::UplinkConfig; @@ -74,9 +75,20 @@ impl SchemaSource { pub(crate) fn into_stream(self) -> impl Stream { match self { SchemaSource::Static { schema_sdl: schema } => { - stream::once(future::ready(UpdateSchema(schema))).boxed() + let update_schema = UpdateSchema(SchemaState { + sdl: schema, + launch_id: None, + }); + stream::once(future::ready(update_schema)).boxed() } - SchemaSource::Stream(stream) => stream.map(UpdateSchema).boxed(), + SchemaSource::Stream(stream) => stream + .map(|sdl| { + UpdateSchema(SchemaState { + sdl, + launch_id: None, + }) + }) + .boxed(), #[allow(deprecated)] SchemaSource::File { path, @@ -100,7 +112,13 @@ impl SchemaSource { let path = path.clone(); async move { match tokio::fs::read_to_string(&path).await { - Ok(schema) => Some(UpdateSchema(schema)), + Ok(schema) => { + let update_schema = UpdateSchema(SchemaState { + sdl: schema, + launch_id: None, + }); + Some(update_schema) + } Err(err) => { tracing::error!(reason = %err, "failed to read supergraph schema"); None @@ -110,7 +128,11 @@ impl SchemaSource { }) .boxed() } else { - stream::once(future::ready(UpdateSchema(schema))).boxed() + let update_schema = UpdateSchema(SchemaState { + sdl: schema, + launch_id: None, + }); + stream::once(future::ready(update_schema)).boxed() } } Err(err) => { @@ -121,10 +143,13 @@ impl SchemaSource { } } SchemaSource::Registry(uplink_config) => { - stream_from_uplink::(uplink_config) + stream_from_uplink::(uplink_config) .filter_map(|res| { future::ready(match res { - Ok(schema) => Some(UpdateSchema(schema)), + Ok(schema) => { + let update_schema = UpdateSchema(schema); + Some(update_schema) + } Err(e) => { tracing::error!("{}", e); None @@ -222,7 +247,13 @@ impl Fetcher { .await { Ok(res) if res.status().is_success() => match res.text().await { - Ok(schema) => return Some(UpdateSchema(schema)), + Ok(schema) => { + let update_schema = UpdateSchema(SchemaState { + sdl: schema, + launch_id: None, + }); + return Some(update_schema); + } Err(err) => { tracing::warn!( url.full = %url, @@ -346,10 +377,10 @@ mod tests { .into_stream(); assert!( - matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema == SCHEMA_1) + matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema.sdl == SCHEMA_1) ); assert!( - matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema == SCHEMA_1) + matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema.sdl == SCHEMA_1) ); } .with_subscriber(assert_snapshot_subscriber!()) @@ -382,10 +413,10 @@ mod tests { .into_stream(); assert!( - matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema == SCHEMA_2) + matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema.sdl == SCHEMA_2) ); assert!( - matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema == SCHEMA_2) + matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema.sdl == SCHEMA_2) ); } .with_subscriber(assert_snapshot_subscriber!({ @@ -448,7 +479,7 @@ mod tests { .await; assert!( - matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema == SCHEMA_1) + matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema.sdl == SCHEMA_1) ); drop(success); @@ -468,7 +499,7 @@ mod tests { .await; assert!( - matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema == SCHEMA_1) + matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema.sdl == SCHEMA_1) ); } .with_subscriber(assert_snapshot_subscriber!({ @@ -497,7 +528,7 @@ mod tests { .into_stream(); assert!( - matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema == SCHEMA_1) + matches!(stream.next().await.unwrap(), UpdateSchema(schema) if schema.sdl == SCHEMA_1) ); assert!(matches!(stream.next().await.unwrap(), NoMoreSchema)); } diff --git a/apollo-router/src/router/mod.rs b/apollo-router/src/router/mod.rs index bd5461a046..030e9797eb 100644 --- a/apollo-router/src/router/mod.rs +++ b/apollo-router/src/router/mod.rs @@ -354,6 +354,7 @@ mod tests { use crate::router::Event::UpdateLicense; use crate::router::Event::UpdateSchema; use crate::uplink::license_enforcement::LicenseState; + use crate::uplink::schema::SchemaState; use crate::Configuration; fn init_with_server() -> RouterHttpServer { @@ -417,7 +418,10 @@ mod tests { .await .unwrap(); router_handle - .send_event(UpdateSchema(schema.to_string())) + .send_event(UpdateSchema(SchemaState { + sdl: schema.to_string(), + launch_id: None, + })) .await .unwrap(); router_handle @@ -460,9 +464,10 @@ mod tests { .await .unwrap(); router_handle - .send_event(UpdateSchema( - include_str!("../testdata/supergraph_missing_name.graphql").to_string(), - )) + .send_event(UpdateSchema(SchemaState { + sdl: include_str!("../testdata/supergraph_missing_name.graphql").to_string(), + launch_id: None, + })) .await .unwrap(); router_handle @@ -502,9 +507,10 @@ mod tests { // let's update the schema to add the field router_handle - .send_event(UpdateSchema( - include_str!("../testdata/supergraph.graphql").to_string(), - )) + .send_event(UpdateSchema(SchemaState { + sdl: include_str!("../testdata/supergraph.graphql").to_string(), + launch_id: None, + })) .await .unwrap(); @@ -528,9 +534,10 @@ mod tests { // let's go back and remove the field router_handle - .send_event(UpdateSchema( - include_str!("../testdata/supergraph_missing_name.graphql").to_string(), - )) + .send_event(UpdateSchema(SchemaState { + sdl: include_str!("../testdata/supergraph_missing_name.graphql").to_string(), + launch_id: None, + })) .await .unwrap(); diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index ca40bd0a86..d34eae8b45 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -516,6 +516,7 @@ pub(crate) async fn add_plugin( schema_id: Arc, supergraph_schema: Arc>, subgraph_schemas: Arc>>>, + launch_id: Option>, notify: &crate::notification::Notify, plugin_instances: &mut Plugins, errors: &mut Vec, @@ -528,6 +529,7 @@ pub(crate) async fn add_plugin( .supergraph_schema_id(schema_id) .supergraph_schema(supergraph_schema) .subgraph_schemas(subgraph_schemas) + .launch_id(launch_id) .notify(notify.clone()) .build(), ) @@ -585,6 +587,7 @@ pub(crate) async fn create_plugins( supergraph_schema_id.clone(), supergraph_schema.clone(), subgraph_schemas.clone(), + schema.launch_id.clone(), &configuration.notify.clone(), &mut plugin_instances, &mut errors, @@ -678,6 +681,7 @@ pub(crate) async fn create_plugins( } add_mandatory_apollo_plugin!("limits"); add_mandatory_apollo_plugin!("traffic_shaping"); + add_mandatory_apollo_plugin!("fleet_detector"); add_optional_apollo_plugin!("forbid_mutations"); add_optional_apollo_plugin!("subscription"); add_optional_apollo_plugin!("override_subgraph_url"); diff --git a/apollo-router/src/services/external.rs b/apollo-router/src/services/external.rs index c356ddd39c..58bc24832d 100644 --- a/apollo-router/src/services/external.rs +++ b/apollo-router/src/services/external.rs @@ -1,4 +1,5 @@ -#![allow(missing_docs)] // FIXME +//! Structures for externalised data, communicating the state of the router pipeline at the +//! different stages. use std::collections::HashMap; use std::fmt::Debug; diff --git a/apollo-router/src/services/http/service.rs b/apollo-router/src/services/http/service.rs index d629412f0c..b8bfb38f04 100644 --- a/apollo-router/src/services/http/service.rs +++ b/apollo-router/src/services/http/service.rs @@ -233,6 +233,7 @@ impl tower::Service for HttpClientService { let HttpRequest { mut http_request, context, + .. } = request; let schema_uri = http_request.uri(); diff --git a/apollo-router/src/services/layers/persisted_queries/manifest_poller.rs b/apollo-router/src/services/layers/persisted_queries/manifest_poller.rs index 7051ec97be..611c68b832 100644 --- a/apollo-router/src/services/layers/persisted_queries/manifest_poller.rs +++ b/apollo-router/src/services/layers/persisted_queries/manifest_poller.rs @@ -21,8 +21,25 @@ use crate::uplink::stream_from_uplink_transforming_new_response; use crate::uplink::UplinkConfig; use crate::Configuration; +/// The full identifier for an operation in a PQ list consists of an operation +/// ID and an optional client name. +#[derive(Debug, Clone, Eq, Hash, PartialEq)] +pub struct FullPersistedQueryOperationId { + /// The operation ID (usually a hash). + pub operation_id: String, + /// The client name associated with the operation; if None, can be any client. + pub client_name: Option, +} + /// An in memory cache of persisted queries. -pub(crate) type PersistedQueryManifest = HashMap; +pub type PersistedQueryManifest = HashMap; + +/// Describes whether the router should allow or deny a given request. +/// with an error, or allow it but log the operation as unknown. +pub(crate) struct FreeformGraphQLAction { + pub(crate) should_allow: bool, + pub(crate) should_log: bool, +} /// How the router should respond to requests that are not resolved as the IDs /// of an operation in the manifest. (For the most part this means "requests @@ -48,49 +65,43 @@ pub(crate) enum FreeformGraphQLBehavior { }, } -/// Describes what the router should do for a given request: allow it, deny it -/// with an error, or allow it but log the operation as unknown. -pub(crate) enum FreeformGraphQLAction { - Allow, - Deny, - AllowAndLog, - DenyAndLog, -} - impl FreeformGraphQLBehavior { fn action_for_freeform_graphql( &self, ast: Result<&ast::Document, &str>, ) -> FreeformGraphQLAction { match self { - FreeformGraphQLBehavior::AllowAll { .. } => FreeformGraphQLAction::Allow, + FreeformGraphQLBehavior::AllowAll { .. } => FreeformGraphQLAction { + should_allow: true, + should_log: false, + }, // Note that this branch doesn't get called in practice, because we catch // DenyAll at an earlier phase with never_allows_freeform_graphql. - FreeformGraphQLBehavior::DenyAll { log_unknown, .. } => { - if *log_unknown { - FreeformGraphQLAction::DenyAndLog - } else { - FreeformGraphQLAction::Deny - } - } + FreeformGraphQLBehavior::DenyAll { log_unknown, .. } => FreeformGraphQLAction { + should_allow: false, + should_log: *log_unknown, + }, FreeformGraphQLBehavior::AllowIfInSafelist { safelist, log_unknown, .. } => { if safelist.is_allowed(ast) { - FreeformGraphQLAction::Allow - } else if *log_unknown { - FreeformGraphQLAction::DenyAndLog + FreeformGraphQLAction { + should_allow: true, + should_log: false, + } } else { - FreeformGraphQLAction::Deny + FreeformGraphQLAction { + should_allow: false, + should_log: *log_unknown, + } } } FreeformGraphQLBehavior::LogUnlessInSafelist { safelist, .. } => { - if safelist.is_allowed(ast) { - FreeformGraphQLAction::Allow - } else { - FreeformGraphQLAction::AllowAndLog + FreeformGraphQLAction { + should_allow: true, + should_log: !safelist.is_allowed(ast), } } } @@ -212,7 +223,7 @@ impl PersistedQueryManifestPoller { if manifest_files.is_empty() { return Err("no local persisted query list files specified".into()); } - let mut manifest: HashMap = PersistedQueryManifest::new(); + let mut manifest = PersistedQueryManifest::new(); for local_pq_list in manifest_files { tracing::info!( @@ -250,7 +261,13 @@ impl PersistedQueryManifestPoller { } for operation in manifest_file.operations { - manifest.insert(operation.id, operation.body); + manifest.insert( + FullPersistedQueryOperationId { + operation_id: operation.id, + client_name: operation.client_name, + }, + operation.body, + ); } } @@ -343,15 +360,35 @@ impl PersistedQueryManifestPoller { } } - pub(crate) fn get_operation_body(&self, persisted_query_id: &str) -> Option { + pub(crate) fn get_operation_body( + &self, + persisted_query_id: &str, + client_name: Option, + ) -> Option { let state = self .state .read() .expect("could not acquire read lock on persisted query manifest state"); - state + if let Some(body) = state .persisted_query_manifest - .get(persisted_query_id) + .get(&FullPersistedQueryOperationId { + operation_id: persisted_query_id.to_string(), + client_name: client_name.clone(), + }) .cloned() + { + Some(body) + } else if client_name.is_some() { + state + .persisted_query_manifest + .get(&FullPersistedQueryOperationId { + operation_id: persisted_query_id.to_string(), + client_name: None, + }) + .cloned() + } else { + None + } } pub(crate) fn get_all_operations(&self) -> Vec { @@ -588,7 +625,13 @@ async fn add_chunk_to_operations( match fetch_chunk(http_client.clone(), chunk_url).await { Ok(chunk) => { for operation in chunk.operations { - operations.insert(operation.id, operation.body); + operations.insert( + FullPersistedQueryOperationId { + operation_id: operation.id, + client_name: operation.client_name, + }, + operation.body, + ); } return Ok(()); } @@ -674,9 +717,11 @@ pub(crate) struct SignedUrlChunk { /// A single operation containing an ID and a body, #[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] pub(crate) struct Operation { pub(crate) id: String, pub(crate) body: String, + pub(crate) client_name: Option, } #[cfg(test)] @@ -701,7 +746,7 @@ mod tests { ) .await .unwrap(); - assert_eq!(manifest_manager.get_operation_body(&id), Some(body)) + assert_eq!(manifest_manager.get_operation_body(&id, None), Some(body)) } #[tokio::test(flavor = "multi_thread")] @@ -734,18 +779,26 @@ mod tests { ) .await .unwrap(); - assert_eq!(manifest_manager.get_operation_body(&id), Some(body)) + assert_eq!(manifest_manager.get_operation_body(&id, None), Some(body)) } #[test] fn safelist_body_normalization() { - let safelist = FreeformGraphQLSafelist::new(&PersistedQueryManifest::from([( - "valid-syntax".to_string(), - "fragment A on T { a } query SomeOp { ...A ...B } fragment,,, B on U{b c } # yeah" - .to_string(), - ), ( - "invalid-syntax".to_string(), - "}}}".to_string()), + let safelist = FreeformGraphQLSafelist::new(&PersistedQueryManifest::from([ + ( + FullPersistedQueryOperationId { + operation_id: "valid-syntax".to_string(), + client_name: None, + }, + "fragment A on T { a } query SomeOp { ...A ...B } fragment,,, B on U{b c } # yeah".to_string(), + ), + ( + FullPersistedQueryOperationId { + operation_id: "invalid-syntax".to_string(), + client_name: None, + }, + "}}}".to_string(), + ), ])); let is_allowed = |body: &str| -> bool { @@ -795,6 +848,6 @@ mod tests { ) .await .unwrap(); - assert_eq!(manifest_manager.get_operation_body(&id), Some(body)) + assert_eq!(manifest_manager.get_operation_body(&id, None), Some(body)) } } diff --git a/apollo-router/src/services/layers/persisted_queries/mod.rs b/apollo-router/src/services/layers/persisted_queries/mod.rs index 0444590958..ae849a6170 100644 --- a/apollo-router/src/services/layers/persisted_queries/mod.rs +++ b/apollo-router/src/services/layers/persisted_queries/mod.rs @@ -8,17 +8,22 @@ use http::header::CACHE_CONTROL; use http::HeaderValue; use http::StatusCode; use id_extractor::PersistedQueryIdExtractor; +pub use manifest_poller::FullPersistedQueryOperationId; +pub use manifest_poller::PersistedQueryManifest; pub(crate) use manifest_poller::PersistedQueryManifestPoller; use tower::BoxError; -use self::manifest_poller::FreeformGraphQLAction; use super::query_analysis::ParsedDocument; use crate::graphql::Error as GraphQLError; +use crate::plugins::telemetry::CLIENT_NAME; use crate::services::SupergraphRequest; use crate::services::SupergraphResponse; use crate::Configuration; const DONT_CACHE_RESPONSE_VALUE: &str = "private, no-cache, must-revalidate"; +const PERSISTED_QUERIES_CLIENT_NAME_CONTEXT_KEY: &str = "apollo_persisted_queries::client_name"; +const PERSISTED_QUERIES_SAFELIST_SKIP_ENFORCEMENT_CONTEXT_KEY: &str = + "apollo_persisted_queries::safelist::skip_enforcement"; struct UsedQueryIdFromManifest; @@ -30,6 +35,14 @@ pub(crate) struct PersistedQueryLayer { introspection_enabled: bool, } +fn skip_enforcement(request: &SupergraphRequest) -> bool { + request + .context + .get(PERSISTED_QUERIES_SAFELIST_SKIP_ENFORCEMENT_CONTEXT_KEY) + .unwrap_or_default() + .unwrap_or(false) +} + impl PersistedQueryLayer { /// Create a new [`PersistedQueryLayer`] from CLI options, YAML configuration, /// and optionally, an existing persisted query manifest poller. @@ -65,6 +78,9 @@ impl PersistedQueryLayer { manifest_poller, &persisted_query_id, ) + } else if skip_enforcement(&request) { + // A plugin told us to allow this, so let's skip to require_id check. + Ok(request) } else if let Some(log_unknown) = manifest_poller.never_allows_freeform_graphql() { // If we don't have an ID and we require an ID, return an error immediately, if log_unknown { @@ -110,9 +126,21 @@ impl PersistedQueryLayer { } else { // if there is no query, look up the persisted query in the manifest // and put the body on the `supergraph_request` - if let Some(persisted_query_body) = - manifest_poller.get_operation_body(persisted_query_id) - { + if let Some(persisted_query_body) = manifest_poller.get_operation_body( + persisted_query_id, + // Use the first one of these that exists: + // - The PQL-specific context name entry + // `apollo_persisted_queries::client_name` (which can be set + // by router_service plugins) + // - The same name used by telemetry (ie, the value of the + // header named by `telemetry.apollo.client_name_header`, + // which defaults to `apollographql-client-name` by default) + request + .context + .get(PERSISTED_QUERIES_CLIENT_NAME_CONTEXT_KEY) + .unwrap_or_default() + .or_else(|| request.context.get(CLIENT_NAME).unwrap_or_default()), + ) { let body = request.supergraph_request.body_mut(); body.query = Some(persisted_query_body); body.extensions.remove("persistedQuery"); @@ -122,7 +150,11 @@ impl PersistedQueryLayer { .context .extensions() .with_lock(|mut lock| lock.insert(UsedQueryIdFromManifest)); - tracing::info!(monotonic_counter.apollo.router.operations.persisted_queries = 1u64); + u64_counter!( + "apollo.router.operations.persisted_queries", + "Total requests with persisted queries enabled", + 1 + ); Ok(request) } else if manifest_poller.augmenting_apq_with_pre_registration_and_no_safelisting() { // The query ID isn't in our manifest, but we have APQ enabled @@ -131,9 +163,11 @@ impl PersistedQueryLayer { // safelist later for log_unknown!) Ok(request) } else { - tracing::info!( - monotonic_counter.apollo.router.operations.persisted_queries = 1u64, - persisted_quieries.not_found = true + u64_counter!( + "apollo.router.operations.persisted_queries", + "Total requests with persisted queries enabled", + 1, + persisted_queries.not_found = true ); // if APQ is not enabled, return an error indicating the query was not found Err(supergraph_err_operation_not_found( @@ -207,36 +241,39 @@ impl PersistedQueryLayer { return Ok(request); } - match manifest_poller.action_for_freeform_graphql(Ok(&doc.ast)) { - FreeformGraphQLAction::Allow => { - tracing::info!(monotonic_counter.apollo.router.operations.persisted_queries = 1u64,); - Ok(request) - } - FreeformGraphQLAction::Deny => { - tracing::info!( - monotonic_counter.apollo.router.operations.persisted_queries = 1u64, - persisted_queries.safelist.rejected.unknown = false, - ); - Err(supergraph_err_operation_not_in_safelist(request)) - } - // Note that this might even include complaining about an operation that came via APQs. - FreeformGraphQLAction::AllowAndLog => { - tracing::info!( - monotonic_counter.apollo.router.operations.persisted_queries = 1u64, - persisted_queries.logged = true - ); - log_unknown_operation(operation_body); - Ok(request) - } - FreeformGraphQLAction::DenyAndLog => { - tracing::info!( - monotonic_counter.apollo.router.operations.persisted_queries = 1u64, - persisted_queries.safelist.rejected.unknown = true, - persisted_queries.logged = true - ); - log_unknown_operation(operation_body); - Err(supergraph_err_operation_not_in_safelist(request)) - } + let mut metric_attributes = vec![]; + let freeform_graphql_action = manifest_poller.action_for_freeform_graphql(Ok(&doc.ast)); + let skip_enforcement = skip_enforcement(&request); + let allow = skip_enforcement || freeform_graphql_action.should_allow; + if !allow { + metric_attributes.push(opentelemetry::KeyValue::new( + "persisted_queries.safelist.rejected.unknown".to_string(), + true, + )); + } else if !freeform_graphql_action.should_allow { + metric_attributes.push(opentelemetry::KeyValue::new( + "persisted_queries.safelist.enforcement_skipped".to_string(), + true, + )); + } + if freeform_graphql_action.should_log { + log_unknown_operation(operation_body); + metric_attributes.push(opentelemetry::KeyValue::new( + "persisted_queries.logged".to_string(), + true, + )); + } + u64_counter!( + "apollo.router.operations.persisted_queries", + "Total requests with persisted queries enabled", + 1, + metric_attributes + ); + + if allow { + Ok(request) + } else { + Err(supergraph_err_operation_not_in_safelist(request)) } } @@ -338,9 +375,10 @@ fn supergraph_err_operation_not_in_safelist(request: SupergraphRequest) -> Super } fn graphql_err_pq_id_required() -> GraphQLError { - graphql_err("PERSISTED_QUERY_ID_REQUIRED", + graphql_err( + "PERSISTED_QUERY_ID_REQUIRED", "This endpoint does not allow freeform GraphQL requests; operations must be sent by ID in the persisted queries GraphQL extension.", - ) + ) } fn supergraph_err_pq_id_required(request: SupergraphRequest) -> SupergraphResponse { @@ -373,17 +411,22 @@ mod tests { use std::collections::HashMap; use std::time::Duration; + use maplit::hashmap; use serde_json::json; + use tracing::instrument::WithSubscriber; use super::*; + use crate::assert_snapshot_subscriber; use crate::configuration::Apq; use crate::configuration::PersistedQueries; use crate::configuration::PersistedQueriesSafelist; use crate::configuration::Supergraph; + use crate::metrics::FutureMetricsExt; use crate::services::layers::persisted_queries::manifest_poller::FreeformGraphQLBehavior; use crate::services::layers::query_analysis::QueryAnalysisLayer; use crate::spec::Schema; use crate::test_harness::mocks::persisted_queries::*; + use crate::Context; #[tokio::test(flavor = "multi_thread")] async fn disabled_pq_layer_has_no_poller() { @@ -463,6 +506,84 @@ mod tests { assert_eq!(request.supergraph_request.body().query, Some(body)); } + #[tokio::test(flavor = "multi_thread")] + async fn enabled_pq_layer_with_client_names() { + let (_mock_guard, uplink_config) = mock_pq_uplink(&hashmap! { + FullPersistedQueryOperationId { + operation_id: "both-plain-and-cliented".to_string(), + client_name: None, + } => "query { bpac_no_client: __typename }".to_string(), + FullPersistedQueryOperationId { + operation_id: "both-plain-and-cliented".to_string(), + client_name: Some("web".to_string()), + } => "query { bpac_web_client: __typename }".to_string(), + FullPersistedQueryOperationId { + operation_id: "only-cliented".to_string(), + client_name: Some("web".to_string()), + } => "query { oc_web_client: __typename }".to_string(), + }) + .await; + + let pq_layer = PersistedQueryLayer::new( + &Configuration::fake_builder() + .persisted_query(PersistedQueries::builder().enabled(true).build()) + .uplink(uplink_config) + .build() + .unwrap(), + ) + .await + .unwrap(); + + let map_to_query = |operation_id: &str, client_name: Option<&str>| -> Option { + let context = Context::new(); + if let Some(client_name) = client_name { + context + .insert( + PERSISTED_QUERIES_CLIENT_NAME_CONTEXT_KEY, + client_name.to_string(), + ) + .unwrap(); + } + + let incoming_request = SupergraphRequest::fake_builder() + .extension( + "persistedQuery", + json!({"version": 1, "sha256Hash": operation_id.to_string()}), + ) + .context(context) + .build() + .unwrap(); + + pq_layer + .supergraph_request(incoming_request) + .ok() + .expect("pq layer returned response instead of putting the query on the request") + .supergraph_request + .body() + .query + .clone() + }; + + assert_eq!( + map_to_query("both-plain-and-cliented", None), + Some("query { bpac_no_client: __typename }".to_string()) + ); + assert_eq!( + map_to_query("both-plain-and-cliented", Some("not-web")), + Some("query { bpac_no_client: __typename }".to_string()) + ); + assert_eq!( + map_to_query("both-plain-and-cliented", Some("web")), + Some("query { bpac_web_client: __typename }".to_string()) + ); + assert_eq!( + map_to_query("only-cliented", Some("web")), + Some("query { oc_web_client: __typename }".to_string()) + ); + assert_eq!(map_to_query("only-cliented", None), None); + assert_eq!(map_to_query("only-cliented", Some("not-web")), None); + } + #[tokio::test(flavor = "multi_thread")] async fn pq_layer_passes_on_to_apq_layer_when_id_not_found() { let (_id, _body, manifest) = fake_manifest(); @@ -610,9 +731,21 @@ mod tests { pq_layer: &PersistedQueryLayer, query_analysis_layer: &QueryAnalysisLayer, body: &str, + skip_enforcement: bool, ) -> SupergraphRequest { + let context = Context::new(); + if skip_enforcement { + context + .insert( + PERSISTED_QUERIES_SAFELIST_SKIP_ENFORCEMENT_CONTEXT_KEY, + true, + ) + .unwrap(); + } + let incoming_request = SupergraphRequest::fake_builder() .query(body) + .context(context) .build() .unwrap(); @@ -635,9 +768,11 @@ mod tests { pq_layer: &PersistedQueryLayer, query_analysis_layer: &QueryAnalysisLayer, body: &str, + log_unknown: bool, + counter_value: u64, ) { let request_with_analyzed_query = - run_first_two_layers(pq_layer, query_analysis_layer, body).await; + run_first_two_layers(pq_layer, query_analysis_layer, body, false).await; let mut supergraph_response = pq_layer .supergraph_request_with_analyzed_query(request_with_analyzed_query) @@ -654,113 +789,208 @@ mod tests { response.errors, vec![graphql_err_operation_not_in_safelist()] ); + let mut metric_attributes = vec![opentelemetry::KeyValue::new( + "persisted_queries.safelist.rejected.unknown".to_string(), + true, + )]; + if log_unknown { + metric_attributes.push(opentelemetry::KeyValue::new( + "persisted_queries.logged".to_string(), + true, + )); + } + assert_counter!( + "apollo.router.operations.persisted_queries", + counter_value, + &metric_attributes + ); } async fn allowed_by_safelist( pq_layer: &PersistedQueryLayer, query_analysis_layer: &QueryAnalysisLayer, body: &str, + log_unknown: bool, + skip_enforcement: bool, + counter_value: u64, ) { let request_with_analyzed_query = - run_first_two_layers(pq_layer, query_analysis_layer, body).await; + run_first_two_layers(pq_layer, query_analysis_layer, body, skip_enforcement).await; pq_layer .supergraph_request_with_analyzed_query(request_with_analyzed_query) .await .ok() .expect("pq layer second hook returned error response instead of returning a request"); - } - #[tokio::test(flavor = "multi_thread")] - async fn pq_layer_freeform_graphql_with_safelist() { - let manifest = HashMap::from([( - "valid-syntax".to_string(), - "fragment A on Query { me { id } } query SomeOp { ...A ...B } fragment,,, B on Query{me{name,username} } # yeah" - .to_string(), - ), ( - "invalid-syntax".to_string(), - "}}}".to_string()), - ]); + let mut metric_attributes = vec![]; + if skip_enforcement { + metric_attributes.push(opentelemetry::KeyValue::new( + "persisted_queries.safelist.enforcement_skipped".to_string(), + true, + )); + if log_unknown { + metric_attributes.push(opentelemetry::KeyValue::new( + "persisted_queries.logged".to_string(), + true, + )); + } + } - let (_mock_guard, uplink_config) = mock_pq_uplink(&manifest).await; + assert_counter!( + "apollo.router.operations.persisted_queries", + counter_value, + &metric_attributes + ); + } - let config = Configuration::fake_builder() - .persisted_query( - PersistedQueries::builder() - .enabled(true) - .safelist(PersistedQueriesSafelist::builder().enabled(true).build()) - .build(), - ) - .uplink(uplink_config) - .apq(Apq::fake_builder().enabled(false).build()) - .supergraph(Supergraph::fake_builder().introspection(true).build()) - .build() - .unwrap(); + async fn pq_layer_freeform_graphql_with_safelist(log_unknown: bool) { + async move { + let manifest = HashMap::from([ + ( + FullPersistedQueryOperationId { + operation_id: "valid-syntax".to_string(), + client_name: None, + }, + "fragment A on Query { me { id } } query SomeOp { ...A ...B } fragment,,, B on Query{me{name,username} } # yeah" + .to_string(), + ), + ( + FullPersistedQueryOperationId { + operation_id: "invalid-syntax".to_string(), + client_name: None, + }, + "}}}".to_string(), + ), + ]); - let pq_layer = PersistedQueryLayer::new(&config).await.unwrap(); + let (_mock_guard, uplink_config) = mock_pq_uplink(&manifest).await; - let schema = Arc::new( - Schema::parse( - include_str!("../../../testdata/supergraph.graphql"), - &Default::default(), - ) - .unwrap(), - ); + let config = Configuration::fake_builder() + .persisted_query( + PersistedQueries::builder() + .enabled(true) + .safelist(PersistedQueriesSafelist::builder().enabled(true).build()) + .log_unknown(log_unknown) + .build(), + ) + .uplink(uplink_config) + .apq(Apq::fake_builder().enabled(false).build()) + .supergraph(Supergraph::fake_builder().introspection(true).build()) + .build() + .unwrap(); - let query_analysis_layer = QueryAnalysisLayer::new(schema, Arc::new(config)).await; + let pq_layer = PersistedQueryLayer::new(&config).await.unwrap(); - // A random query is blocked. - denied_by_safelist( - &pq_layer, - &query_analysis_layer, - "query SomeQuery { me { id } }", - ) - .await; + let schema = Arc::new(Schema::parse(include_str!("../../../testdata/supergraph.graphql"), &Default::default()).unwrap()); - // The exact string from the manifest is allowed. - allowed_by_safelist( - &pq_layer, - &query_analysis_layer, - "fragment A on Query { me { id } } query SomeOp { ...A ...B } fragment,,, B on Query{me{name,username} } # yeah", - ) - .await; + let query_analysis_layer = QueryAnalysisLayer::new(schema, Arc::new(config)).await; - // Reordering definitions and reformatting a bit matches. - allowed_by_safelist( + // A random query is blocked. + denied_by_safelist( &pq_layer, &query_analysis_layer, - "#comment\n fragment, B on Query , { me{name username} } query SomeOp { ...A ...B } fragment \nA on Query { me{ id} }" + "query SomeQuery { me { id } }", + log_unknown, + 1, ).await; - // Reordering fields does not match! - denied_by_safelist( + // But it is allowed with skip_enforcement set. + allowed_by_safelist( &pq_layer, &query_analysis_layer, - "fragment A on Query { me { id } } query SomeOp { ...A ...B } fragment,,, B on Query{me{username,name} } # yeah" + "query SomeQuery { me { id } }", + log_unknown, + true, + 1, ).await; - // Introspection queries are allowed (even using fragments and aliases), because - // introspection is enabled. - allowed_by_safelist( - &pq_layer, - &query_analysis_layer, - r#"fragment F on Query { __typename foo: __schema { __typename } } query Q { __type(name: "foo") { name } ...F }"#, - ).await; - - // Multiple spreads of the same fragment are also allowed - // (https://github.com/apollographql/apollo-rs/issues/613) - allowed_by_safelist( - &pq_layer, - &query_analysis_layer, - r#"fragment F on Query { __typename foo: __schema { __typename } } query Q { __type(name: "foo") { name } ...F ...F }"#, - ).await; - - // But adding any top-level non-introspection field is enough to make it not count as introspection. - denied_by_safelist( - &pq_layer, - &query_analysis_layer, - r#"fragment F on Query { __typename foo: __schema { __typename } me { id } } query Q { __type(name: "foo") { name } ...F }"#, - ).await; + // The exact string from the manifest is allowed. + allowed_by_safelist( + &pq_layer, + &query_analysis_layer, + "fragment A on Query { me { id } } query SomeOp { ...A ...B } fragment,,, B on Query{me{name,username} } # yeah", + log_unknown, + false, + 1, + ) + .await; + + // Reordering definitions and reformatting a bit matches. + allowed_by_safelist( + &pq_layer, + &query_analysis_layer, + "#comment\n fragment, B on Query , { me{name username} } query SomeOp { ...A ...B } fragment \nA on Query { me{ id} }", + log_unknown, + false, + 2, + ) + .await; + + // Reordering fields does not match! + denied_by_safelist( + &pq_layer, + &query_analysis_layer, + "fragment A on Query { me { id } } query SomeOp { ...A ...B } fragment,,, B on Query{me{username,name} } # yeah", + log_unknown, + 2, + ) + .await; + + // Introspection queries are allowed (even using fragments and aliases), because + // introspection is enabled. + allowed_by_safelist( + &pq_layer, + &query_analysis_layer, + r#"fragment F on Query { __typename foo: __schema { __typename } } query Q { __type(name: "foo") { name } ...F }"#, + log_unknown, + false, + // Note that introspection queries don't actually interact with the PQ machinery enough + // to update this metric, for better or for worse. + 2, + ) + .await; + + // Multiple spreads of the same fragment are also allowed + // (https://github.com/apollographql/apollo-rs/issues/613) + allowed_by_safelist( + &pq_layer, + &query_analysis_layer, + r#"fragment F on Query { __typename foo: __schema { __typename } } query Q { __type(name: "foo") { name } ...F ...F }"#, + log_unknown, + false, + // Note that introspection queries don't actually interact with the PQ machinery enough + // to update this metric, for better or for worse. + 2, + ) + .await; + + // But adding any top-level non-introspection field is enough to make it not count as introspection. + denied_by_safelist( + &pq_layer, + &query_analysis_layer, + r#"fragment F on Query { __typename foo: __schema { __typename } me { id } } query Q { __type(name: "foo") { name } ...F }"#, + log_unknown, + 3, + ) + .await; + } + .with_metrics() + .await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn pq_layer_freeform_graphql_with_safelist_log_unknown_false() { + pq_layer_freeform_graphql_with_safelist(false).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn pq_layer_freeform_graphql_with_safelist_log_unknown_true() { + async { + pq_layer_freeform_graphql_with_safelist(true).await; + } + .with_subscriber(assert_snapshot_subscriber!()) + .await } #[tokio::test(flavor = "multi_thread")] @@ -924,6 +1154,22 @@ mod tests { .await .expect("could not get response from pq layer"); assert_eq!(response.errors, vec![graphql_err_pq_id_required()]); + + // Try again skipping enforcement. + let context = Context::new(); + context + .insert( + PERSISTED_QUERIES_SAFELIST_SKIP_ENFORCEMENT_CONTEXT_KEY, + true, + ) + .unwrap(); + let incoming_request = SupergraphRequest::fake_builder() + .query("query { typename }") + .context(context) + .build() + .unwrap(); + assert!(incoming_request.supergraph_request.body().query.is_some()); + assert!(pq_layer.supergraph_request(incoming_request).is_ok()); } #[tokio::test(flavor = "multi_thread")] diff --git a/apollo-router/src/services/layers/persisted_queries/snapshots/apollo_router__services__layers__persisted_queries__tests__pq_layer_freeform_graphql_with_safelist_log_unknown_true@logs.snap b/apollo-router/src/services/layers/persisted_queries/snapshots/apollo_router__services__layers__persisted_queries__tests__pq_layer_freeform_graphql_with_safelist_log_unknown_true@logs.snap new file mode 100644 index 0000000000..f9a850f1c8 --- /dev/null +++ b/apollo-router/src/services/layers/persisted_queries/snapshots/apollo_router__services__layers__persisted_queries__tests__pq_layer_freeform_graphql_with_safelist_log_unknown_true@logs.snap @@ -0,0 +1,21 @@ +--- +source: apollo-router/src/services/layers/persisted_queries/mod.rs +expression: yaml +snapshot_kind: text +--- +- fields: + operation_body: "query SomeQuery { me { id } }" + level: WARN + message: unknown operation +- fields: + operation_body: "query SomeQuery { me { id } }" + level: WARN + message: unknown operation +- fields: + operation_body: "fragment A on Query { me { id } } query SomeOp { ...A ...B } fragment,,, B on Query{me{username,name} } # yeah" + level: WARN + message: unknown operation +- fields: + operation_body: "fragment F on Query { __typename foo: __schema { __typename } me { id } } query Q { __type(name: \"foo\") { name } ...F }" + level: WARN + message: unknown operation diff --git a/apollo-router/src/services/router/service.rs b/apollo-router/src/services/router/service.rs index e5792c1a4d..acc67e332d 100644 --- a/apollo-router/src/services/router/service.rs +++ b/apollo-router/src/services/router/service.rs @@ -378,8 +378,10 @@ impl RouterService { Ok(RouterResponse { response, context }) } else { - tracing::info!( - monotonic_counter.apollo.router.graphql_error = 1u64, + u64_counter!( + "apollo.router.graphql_error", + "Number of GraphQL error responses returned by the router", + 1, code = "INVALID_ACCEPT_HEADER" ); // Useful for selector in spans/instruments/events @@ -799,12 +801,18 @@ impl RouterService { for (code, count) in map { match code { None => { - tracing::info!(monotonic_counter.apollo.router.graphql_error = count,); + u64_counter!( + "apollo.router.graphql_error", + "Number of GraphQL error responses returned by the router", + count + ); } Some(code) => { - tracing::info!( - monotonic_counter.apollo.router.graphql_error = count, - code = code + u64_counter!( + "apollo.router.graphql_error", + "Number of GraphQL error responses returned by the router", + count, + code = code.to_string() ); } } diff --git a/apollo-router/src/services/router/snapshots/apollo_router__services__router__tests__escaped_quotes_in_string_literal.snap b/apollo-router/src/services/router/snapshots/apollo_router__services__router__tests__escaped_quotes_in_string_literal.snap index 4c8165c12c..c4471110ea 100644 --- a/apollo-router/src/services/router/snapshots/apollo_router__services__router__tests__escaped_quotes_in_string_literal.snap +++ b/apollo-router/src/services/router/snapshots/apollo_router__services__router__tests__escaped_quotes_in_string_literal.snap @@ -35,13 +35,13 @@ expression: "(graphql_response, &subgraph_query_log)" ( "products", Some( - "query TopProducts__products__0($first:Int){topProducts(first:$first){__typename upc name}}", + "query TopProducts__products__0($first: Int) { topProducts(first: $first) { __typename upc name } }", ), ), ( "reviews", Some( - "query TopProducts__reviews__1($representations:[_Any!]!){_entities(representations:$representations){..._generated_onProduct1_0}}fragment _generated_onProduct1_0 on Product{reviewsForAuthor(authorID:\"\\\"1\\\"\"){body}}", + "query TopProducts__reviews__1($representations: [_Any!]!) { _entities(representations: $representations) { ..._generated_onProduct1_0 } } fragment _generated_onProduct1_0 on Product { reviewsForAuthor(authorID: \"\\\"1\\\"\") { body } }", ), ), ], diff --git a/apollo-router/src/services/router/tests.rs b/apollo-router/src/services/router/tests.rs index 94caafd006..59737d44f3 100644 --- a/apollo-router/src/services/router/tests.rs +++ b/apollo-router/src/services/router/tests.rs @@ -569,5 +569,5 @@ async fn escaped_quotes_in_string_literal() { let subgraph_query = subgraph_query_log[1].1.as_ref().unwrap(); // The string literal made it through unchanged: - assert!(subgraph_query.contains(r#"reviewsForAuthor(authorID:"\"1\"")"#)); + assert!(subgraph_query.contains(r#"reviewsForAuthor(authorID: "\"1\"")"#)); } diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index 9dbb9fb773..dc93499143 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -300,16 +300,20 @@ impl tower::Service for SubgraphService { })?; stream_tx.send(Box::pin(handle.into_stream())).await?; - tracing::info!( - monotonic_counter.apollo.router.operations.subscriptions = 1u64, - subscriptions.mode = %"callback", + u64_counter!( + "apollo.router.operations.subscriptions", + "Total requests with subscription operations", + 1, + subscriptions.mode = "callback", subscriptions.deduplicated = !created, - subgraph.service.name = service_name, + subgraph.service.name = service_name.clone() ); if !created { - tracing::info!( - monotonic_counter.apollo_router_deduplicated_subscriptions_total = 1u64, - mode = %"callback", + u64_counter!( + "apollo_router_deduplicated_subscriptions_total", + "Total deduplicated subscription requests (deprecated)", + 1, + mode = "callback" ); // Dedup happens here return Ok(SubgraphResponse::builder() @@ -507,19 +511,23 @@ async fn call_websocket( let (handle, created) = notify .create_or_subscribe(subscription_hash.clone(), false) .await?; - tracing::info!( - monotonic_counter.apollo.router.operations.subscriptions = 1u64, - subscriptions.mode = %"passthrough", + u64_counter!( + "apollo.router.operations.subscriptions", + "Total requests with subscription operations", + 1, + subscriptions.mode = "passthrough", subscriptions.deduplicated = !created, - subgraph.service.name = service_name, + subgraph.service.name = service_name.clone() ); if !created { subscription_stream_tx .send(Box::pin(handle.into_stream())) .await?; - tracing::info!( - monotonic_counter.apollo_router_deduplicated_subscriptions_total = 1u64, - mode = %"passthrough", + u64_counter!( + "apollo_router_deduplicated_subscriptions_total", + "Total deduplicated subscription requests (deprecated)", + 1, + mode = "passthrough" ); // Dedup happens here @@ -868,9 +876,14 @@ pub(crate) async fn process_batch( subgraph = &service ); - tracing::info!(monotonic_counter.apollo.router.operations.batching = 1u64, - mode = %BatchingMode::BatchHttpLink, // Only supported mode right now - subgraph = &service + u64_counter!( + "apollo.router.operations.batching", + "Total requests with batched operations", + 1, + // XXX(@goto-bus-stop): Should these be `batching.mode`, `batching.subgraph`? + // Also, other metrics use a different convention to report the subgraph name + mode = BatchingMode::BatchHttpLink.to_string(), // Only supported mode right now + subgraph = service.clone() ); // Perform the actual fetch. If this fails then we didn't manage to make the call at all, so we can't do anything with it. diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index f87c81727f..b059c7aa07 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -40,7 +40,6 @@ use crate::plugins::telemetry::config_new::events::log_event; use crate::plugins::telemetry::config_new::events::SupergraphEventResponse; use crate::plugins::telemetry::consts::QUERY_PLANNING_SPAN_NAME; use crate::plugins::telemetry::tracing::apollo_telemetry::APOLLO_PRIVATE_DURATION_NS; -use crate::plugins::telemetry::Telemetry; use crate::plugins::telemetry::LOGGING_DISPLAY_BODY; use crate::plugins::traffic_shaping::TrafficShaping; use crate::plugins::traffic_shaping::APOLLO_TRAFFIC_SHAPING; @@ -176,11 +175,15 @@ async fn service_call( body.operation_name.clone(), context.clone(), schema.clone(), + // We cannot assume that the query is present as it may have been modified by coprocessors or plugins. + // There is a deeper issue here in that query analysis is doing a bunch of stuff that it should not and + // places the results in context. Therefore plugins that have modified the query won't actually take effect. + // However, this can't be resolved before looking at the pipeline again. req.supergraph_request .body() .query .clone() - .expect("query presence was checked before"), + .unwrap_or_default(), ) .await { @@ -806,9 +809,7 @@ impl PluggableSupergraphServiceBuilder { // Activate the telemetry plugin. // We must NOT fail to go live with the new router from this point as the telemetry plugin activate interacts with globals. for (_, plugin) in self.plugins.iter() { - if let Some(telemetry) = plugin.as_any().downcast_ref::() { - telemetry.activate(); - } + plugin.activate(); } // We need a non-fallible hook so that once we know we are going live with a pipeline we do final initialization. diff --git a/apollo-router/src/services/supergraph/tests.rs b/apollo-router/src/services/supergraph/tests.rs index ac2dbab21a..059d7baa09 100644 --- a/apollo-router/src/services/supergraph/tests.rs +++ b/apollo-router/src/services/supergraph/tests.rs @@ -3342,119 +3342,6 @@ async fn interface_object_typename() { insta::assert_json_snapshot!(stream.next_response().await.unwrap()); } -#[tokio::test] -async fn fragment_reuse() { - const SCHEMA: &str = r#"schema - @link(url: "https://specs.apollo.dev/link/v1.0") - @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) - { - query: Query - } - directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA - directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE - directive @join__field(graph: join__Graph, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION - directive @join__graph(name: String!, url: String!) on ENUM_VALUE - directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true, isInterfaceObject: Boolean! = false) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR - directive @join__unionMember(graph: join__Graph!, member: String!) repeatable on UNION - directive @join__implements( graph: join__Graph! interface: String!) repeatable on OBJECT | INTERFACE - - scalar link__Import - - enum link__Purpose { - SECURITY - EXECUTION - } - scalar join__FieldSet - - enum join__Graph { - USER @join__graph(name: "user", url: "http://localhost:4001/graphql") - ORGA @join__graph(name: "orga", url: "http://localhost:4002/graphql") - } - - type Query - @join__type(graph: ORGA) - @join__type(graph: USER) - { - me: User @join__field(graph: USER) - } - - type User - @join__type(graph: ORGA, key: "id") - @join__type(graph: USER, key: "id") - { - id: ID! - name: String - organizations: [Organization] @join__field(graph: ORGA) - } - type Organization - @join__type(graph: ORGA, key: "id") - { - id: ID - name: String @join__field(graph: ORGA) - }"#; - - let subgraphs = MockedSubgraphs([ - ("user", MockSubgraph::builder().with_json( - serde_json::json!{{ - "query":"query Query__user__0($a:Boolean!=true$b:Boolean!=true){me{name ...on User@include(if:$a){__typename id}...on User@include(if:$b){__typename id}}}", - "operationName": "Query__user__0" - }}, - serde_json::json!{{"data": {"me": { "name": "Ada", "__typename": "User", "id": "1" }}}} - ).build()), - ("orga", MockSubgraph::builder().with_json( - serde_json::json!{{ - "query":"query Query__orga__1($representations:[_Any!]!$a:Boolean!=true$b:Boolean!=true){_entities(representations:$representations){...F@include(if:$a)...F@include(if:$b)}}fragment F on User{organizations{id name}}", - "operationName": "Query__orga__1", - "variables":{"representations":[{"__typename":"User","id":"1"}]} - }}, - serde_json::json!{{"data": {"_entities": [{ "organizations": [{"id": "2", "name": "Apollo"}] }]}}} - ).build()) - ].into_iter().collect()); - - let service = TestHarness::builder() - .configuration_json(serde_json::json!({ - "include_subgraph_errors": { "all": true }, - "supergraph": { - "generate_query_fragments": false, - "experimental_reuse_query_fragments": true, - } - })) - .unwrap() - .schema(SCHEMA) - .extra_plugin(subgraphs) - .build_supergraph() - .await - .unwrap(); - - let request = supergraph::Request::fake_builder() - .query( - r#"query Query($a: Boolean! = true, $b: Boolean! = true) { - me { - name - ...F @include(if: $a) - ...F @include(if: $b) - } - } - fragment F on User { - organizations { - id - name - } - }"#, - ) - .build() - .unwrap(); - let response = service - .oneshot(request) - .await - .unwrap() - .next_response() - .await - .unwrap(); - - insta::assert_json_snapshot!(response); -} - #[tokio::test] async fn abstract_types_in_requires() { let schema = r#"schema diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 3ad855870a..460f8cac3d 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -324,7 +324,9 @@ impl Query { let operation = Operation::from_hir(&operation, schema, &mut defer_stats, &fragments)?; let mut visitor = - QueryHashVisitor::new(schema.supergraph_schema(), &schema.raw_sdl, document); + QueryHashVisitor::new(schema.supergraph_schema(), &schema.raw_sdl, document).map_err( + |e| SpecError::QueryHashing(format!("could not calculate the query hash: {e}")), + )?; traverse::document(&mut visitor, document, operation_name).map_err(|e| { SpecError::QueryHashing(format!("could not calculate the query hash: {e}")) })?; diff --git a/apollo-router/src/spec/query/change.rs b/apollo-router/src/spec/query/change.rs index 8bca0e025b..73bda2881f 100644 --- a/apollo-router/src/spec/query/change.rs +++ b/apollo-router/src/spec/query/change.rs @@ -1,16 +1,57 @@ +//! Schema aware query hashing algorithm +//! +//! This is a query visitor that calculates a hash of all fields, along with all +//! the relevant types and directives in the schema. It is designed to generate +//! the same hash for the same query across schema updates if the schema change +//! would not affect that query. As an example, if a new type is added to the +//! schema, we know that it will have no impact to an existing query that cannot +//! be using it. +//! This algorithm is used in 2 places: +//! * in the query planner cache: generating query plans can be expensive, so the +//! router has a warm up feature, where upon receving a new schema, it will take +//! the most used queries and plan them, before switching traffic to the new +//! schema. Generating all of those plans takes a lot of time. By using this +//! hashing algorithm, we can detect that the schema change does not affect the +//! query, which means that we can reuse the old query plan directly and avoid +//! the expensive planning task +//! * in entity caching: the responses returned by subgraphs can change depending +//! on the schema (example: a field moving from String to Int), so we need to +//! detect that. One way to do it was to add the schema hash to the cache key, but +//! as a result it wipes the cache on every schema update, which will cause +//! performance and reliability issues. With this hashing algorithm, cached entries +//! can be kept across schema updates +//! +//! ## Technical details +//! +//! ### Query string hashing +//! A full hash of the query string is added along with the schema level data. This +//! is technically making the algorithm less useful, because the same query with +//! different indentation would get a different hash, while there would be no difference +//! in the query plan or the subgraph response. But this makes sure that if we forget +//! something in the way we hash the query, we will avoid collisions. +//! +//! ### Prefixes and suffixes +//! Across the entire visitor, we add prefixes and suffixes like this: +//! +//! ```rust +//! "^SCHEMA".hash(self); +//! ``` +//! +//! This prevents possible collision while hashing multiple things in a sequence. The +//! `^` character cannot be present in GraphQL names, so this is a good separator. use std::collections::HashMap; use std::collections::HashSet; use std::hash::Hash; use std::hash::Hasher; use apollo_compiler::ast; -use apollo_compiler::ast::Argument; use apollo_compiler::ast::FieldDefinition; use apollo_compiler::executable; use apollo_compiler::parser::Parser; use apollo_compiler::schema; use apollo_compiler::schema::DirectiveList; use apollo_compiler::schema::ExtendedType; +use apollo_compiler::schema::InterfaceType; use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_compiler::Node; @@ -25,6 +66,8 @@ use crate::plugins::progressive_override::JOIN_SPEC_BASE_URL; use crate::spec::Schema; pub(crate) const JOIN_TYPE_DIRECTIVE_NAME: &str = "join__type"; +pub(crate) const CONTEXT_SPEC_BASE_URL: &str = "https://specs.apollo.dev/context"; +pub(crate) const CONTEXT_DIRECTIVE_NAME: &str = "context"; /// Calculates a hash of the query and the schema, but only looking at the parts of the /// schema which affect the query. @@ -33,17 +76,19 @@ pub(crate) const JOIN_TYPE_DIRECTIVE_NAME: &str = "join__type"; pub(crate) struct QueryHashVisitor<'a> { schema: &'a schema::Schema, // TODO: remove once introspection has been moved out of query planning - // For now, introspection is stiull handled by the planner, so when an + // For now, introspection is still handled by the planner, so when an // introspection query is hashed, it should take the whole schema into account schema_str: &'a str, hasher: Sha256, fragments: HashMap<&'a Name, &'a Node>, hashed_types: HashSet, - // name, field - hashed_fields: HashSet<(String, String)>, + hashed_field_definitions: HashSet<(String, String)>, seen_introspection: bool, join_field_directive_name: Option, join_type_directive_name: Option, + context_directive_name: Option, + // map from context string to list of type names + contexts: HashMap>, } impl<'a> QueryHashVisitor<'a> { @@ -51,14 +96,14 @@ impl<'a> QueryHashVisitor<'a> { schema: &'a schema::Schema, schema_str: &'a str, executable: &'a executable::ExecutableDocument, - ) -> Self { - Self { + ) -> Result { + let mut visitor = Self { schema, schema_str, hasher: Sha256::new(), fragments: executable.fragments.iter().collect(), hashed_types: HashSet::new(), - hashed_fields: HashSet::new(), + hashed_field_definitions: HashSet::new(), seen_introspection: false, // should we just return an error if we do not find those directives? join_field_directive_name: Schema::directive_name( @@ -73,7 +118,30 @@ impl<'a> QueryHashVisitor<'a> { ">=0.1.0", JOIN_TYPE_DIRECTIVE_NAME, ), + context_directive_name: Schema::directive_name( + schema, + CONTEXT_SPEC_BASE_URL, + ">=0.1.0", + CONTEXT_DIRECTIVE_NAME, + ), + contexts: HashMap::new(), + }; + + visitor.hash_schema()?; + + Ok(visitor) + } + + pub(crate) fn hash_schema(&mut self) -> Result<(), BoxError> { + "^SCHEMA".hash(self); + for directive_definition in self.schema.directive_definitions.values() { + self.hash_directive_definition(directive_definition)?; } + + self.hash_directive_list_schema(&self.schema.schema_definition.directives); + + "^SCHEMA-END".hash(self); + Ok(()) } pub(crate) fn hash_query( @@ -82,8 +150,9 @@ impl<'a> QueryHashVisitor<'a> { executable: &'a executable::ExecutableDocument, operation_name: Option<&str>, ) -> Result, BoxError> { - let mut visitor = QueryHashVisitor::new(schema, schema_str, executable); + let mut visitor = QueryHashVisitor::new(schema, schema_str, executable)?; traverse::document(&mut visitor, executable, operation_name)?; + // hash the entire query string to prevent collisions executable.to_string().hash(&mut visitor); Ok(visitor.finish()) } @@ -92,180 +161,317 @@ impl<'a> QueryHashVisitor<'a> { self.hasher.finalize().as_slice().into() } + fn hash_directive_definition( + &mut self, + directive_definition: &Node, + ) -> Result<(), BoxError> { + "^DIRECTIVE_DEFINITION".hash(self); + directive_definition.name.as_str().hash(self); + "^ARGUMENT_LIST".hash(self); + for argument in &directive_definition.arguments { + self.hash_input_value_definition(argument)?; + } + "^ARGUMENT_LIST_END".hash(self); + + "^DIRECTIVE_DEFINITION-END".hash(self); + + Ok(()) + } + + fn hash_directive_list_schema(&mut self, directive_list: &schema::DirectiveList) { + "^DIRECTIVE_LIST".hash(self); + for directive in directive_list { + self.hash_directive(directive); + } + "^DIRECTIVE_LIST_END".hash(self); + } + + fn hash_directive_list_ast(&mut self, directive_list: &ast::DirectiveList) { + "^DIRECTIVE_LIST".hash(self); + for directive in directive_list { + self.hash_directive(directive); + } + "^DIRECTIVE_LIST_END".hash(self); + } + fn hash_directive(&mut self, directive: &Node) { + "^DIRECTIVE".hash(self); directive.name.as_str().hash(self); + "^ARGUMENT_LIST".hash(self); for argument in &directive.arguments { - self.hash_argument(argument) + self.hash_argument(argument); } + "^ARGUMENT_END".hash(self); + + "^DIRECTIVE-END".hash(self); } fn hash_argument(&mut self, argument: &Node) { + "^ARGUMENT".hash(self); argument.name.hash(self); self.hash_value(&argument.value); + "^ARGUMENT-END".hash(self); } fn hash_value(&mut self, value: &ast::Value) { + "^VALUE".hash(self); + match value { - schema::Value::Null => "null".hash(self), + schema::Value::Null => "^null".hash(self), schema::Value::Enum(e) => { - "enum".hash(self); + "^enum".hash(self); e.hash(self); } schema::Value::Variable(v) => { - "variable".hash(self); + "^variable".hash(self); v.hash(self); } schema::Value::String(s) => { - "string".hash(self); + "^string".hash(self); s.hash(self); } schema::Value::Float(f) => { - "float".hash(self); + "^float".hash(self); f.hash(self); } schema::Value::Int(i) => { - "int".hash(self); + "^int".hash(self); i.hash(self); } schema::Value::Boolean(b) => { - "boolean".hash(self); + "^boolean".hash(self); b.hash(self); } schema::Value::List(l) => { - "list[".hash(self); + "^list[".hash(self); for v in l.iter() { self.hash_value(v); } - "]".hash(self); + "^]".hash(self); } schema::Value::Object(o) => { - "object{".hash(self); + "^object{".hash(self); for (k, v) in o.iter() { + "^key".hash(self); + k.hash(self); - ":".hash(self); + "^value:".hash(self); self.hash_value(v); } - "}".hash(self); + "^}".hash(self); } } + "^VALUE-END".hash(self); } - fn hash_type_by_name(&mut self, t: &str) -> Result<(), BoxError> { - if self.hashed_types.contains(t) { + fn hash_type_by_name(&mut self, name: &str) -> Result<(), BoxError> { + "^TYPE_BY_NAME".hash(self); + + name.hash(self); + + // we need this this to avoid an infinite loop when hashing types that refer to each other + if self.hashed_types.contains(name) { return Ok(()); } - self.hashed_types.insert(t.to_string()); + self.hashed_types.insert(name.to_string()); - if let Some(ty) = self.schema.types.get(t) { + if let Some(ty) = self.schema.types.get(name) { self.hash_extended_type(ty)?; } + "^TYPE_BY_NAME-END".hash(self); + Ok(()) } fn hash_extended_type(&mut self, t: &'a ExtendedType) -> Result<(), BoxError> { + "^EXTENDED_TYPE".hash(self); + match t { ExtendedType::Scalar(s) => { - for directive in &s.directives { - self.hash_directive(&directive.node); - } + "^SCALAR".hash(self); + self.hash_directive_list_schema(&s.directives); + "^SCALAR_END".hash(self); } + // this only hashes the type level info, not the fields, because those will be taken from the query + // we will still hash the fields using for the key ExtendedType::Object(o) => { - for directive in &o.directives { - self.hash_directive(&directive.node); - } + "^OBJECT".hash(self); + + self.hash_directive_list_schema(&o.directives); self.hash_join_type(&o.name, &o.directives)?; + + self.record_context(&o.name, &o.directives)?; + + "^IMPLEMENTED_INTERFACES_LIST".hash(self); + for interface in &o.implements_interfaces { + self.hash_type_by_name(&interface.name)?; + } + "^IMPLEMENTED_INTERFACES_LIST_END".hash(self); + "^OBJECT_END".hash(self); } ExtendedType::Interface(i) => { - for directive in &i.directives { - self.hash_directive(&directive.node); - } + "^INTERFACE".hash(self); + + self.hash_directive_list_schema(&i.directives); + self.hash_join_type(&i.name, &i.directives)?; + + self.record_context(&i.name, &i.directives)?; + + "^IMPLEMENTED_INTERFACES_LIST".hash(self); + for implementor in &i.implements_interfaces { + self.hash_type_by_name(&implementor.name)?; + } + "^IMPLEMENTED_INTERFACES_LIST_END".hash(self); + + if let Some(implementers) = self.schema().implementers_map().get(&i.name) { + "^IMPLEMENTER_OBJECT_LIST".hash(self); + + for object in &implementers.objects { + self.hash_type_by_name(object)?; + } + "^IMPLEMENTER_OBJECT_LIST_END".hash(self); + + "^IMPLEMENTER_INTERFACE_LIST".hash(self); + for interface in &implementers.interfaces { + self.hash_type_by_name(interface)?; + } + "^IMPLEMENTER_INTERFACE_LIST_END".hash(self); + } + + "^INTERFACE_END".hash(self); } ExtendedType::Union(u) => { - for directive in &u.directives { - self.hash_directive(&directive.node); - } + "^UNION".hash(self); + + self.hash_directive_list_schema(&u.directives); + self.record_context(&u.name, &u.directives)?; + + "^MEMBER_LIST".hash(self); for member in &u.members { self.hash_type_by_name(member.as_str())?; } + "^MEMBER_LIST_END".hash(self); + "^UNION_END".hash(self); } ExtendedType::Enum(e) => { - for directive in &e.directives { - self.hash_directive(&directive.node); - } + "^ENUM".hash(self); + self.hash_directive_list_schema(&e.directives); + + "^ENUM_VALUE_LIST".hash(self); for (value, def) in &e.values { + "^VALUE".hash(self); + value.hash(self); - for directive in &def.directives { - self.hash_directive(directive); - } + self.hash_directive_list_ast(&def.directives); + "^VALUE_END".hash(self); } + "^ENUM_VALUE_LIST_END".hash(self); + "^ENUM_END".hash(self); } ExtendedType::InputObject(o) => { - for directive in &o.directives { - self.hash_directive(&directive.node); - } + "^INPUT_OBJECT".hash(self); + self.hash_directive_list_schema(&o.directives); + "^FIELD_LIST".hash(self); for (name, ty) in &o.fields { - if ty.default_value.is_some() { - name.hash(self); - self.hash_input_value_definition(&ty.node)?; - } + "^NAME".hash(self); + name.hash(self); + + "^ARGUMENT".hash(self); + self.hash_input_value_definition(&ty.node)?; } + "^FIELD_LIST_END".hash(self); + "^INPUT_OBJECT_END".hash(self); } } + "^EXTENDED_TYPE-END".hash(self); + Ok(()) } fn hash_type(&mut self, t: &ast::Type) -> Result<(), BoxError> { + "^TYPE".hash(self); + match t { - schema::Type::Named(name) => self.hash_type_by_name(name.as_str()), + schema::Type::Named(name) => self.hash_type_by_name(name.as_str())?, schema::Type::NonNullNamed(name) => { "!".hash(self); - self.hash_type_by_name(name.as_str()) + self.hash_type_by_name(name.as_str())?; } schema::Type::List(t) => { "[]".hash(self); - self.hash_type(t) + self.hash_type(t)?; } schema::Type::NonNullList(t) => { "[]!".hash(self); - self.hash_type(t) + self.hash_type(t)?; } } + "^TYPE-END".hash(self); + Ok(()) } fn hash_field( &mut self, - parent_type: String, - type_name: String, + parent_type: &str, field_def: &FieldDefinition, - arguments: &[Node], + node: &executable::Field, ) -> Result<(), BoxError> { - if self.hashed_fields.insert((parent_type.clone(), type_name)) { - self.hash_type_by_name(&parent_type)?; + "^FIELD".hash(self); + self.hash_field_definition(parent_type, field_def)?; + + "^ARGUMENT_LIST".hash(self); + for argument in &node.arguments { + self.hash_argument(argument); + } + "^ARGUMENT_LIST_END".hash(self); - field_def.name.hash(self); + self.hash_directive_list_ast(&node.directives); - for argument in &field_def.arguments { - self.hash_input_value_definition(argument)?; - } + node.alias.hash(self); + "^FIELD-END".hash(self); - for argument in arguments { - self.hash_argument(argument); - } + Ok(()) + } - self.hash_type(&field_def.ty)?; + fn hash_field_definition( + &mut self, + parent_type: &str, + field_def: &FieldDefinition, + ) -> Result<(), BoxError> { + "^FIELD_DEFINITION".hash(self); - for directive in &field_def.directives { - self.hash_directive(directive); - } + let field_index = (parent_type.to_string(), field_def.name.as_str().to_string()); + if self.hashed_field_definitions.contains(&field_index) { + return Ok(()); + } - self.hash_join_field(&parent_type, &field_def.directives)?; + self.hashed_field_definitions.insert(field_index); + + self.hash_type_by_name(parent_type)?; + + field_def.name.hash(self); + self.hash_type(&field_def.ty)?; + + // for every field, we also need to look at fields defined in `@requires` because + // they will affect the query plan + self.hash_join_field(parent_type, &field_def.directives)?; + + self.hash_directive_list_ast(&field_def.directives); + + "^ARGUMENT_DEF_LIST".hash(self); + for argument in &field_def.arguments { + self.hash_input_value_definition(argument)?; } + "^ARGUMENT_DEF_LIST_END".hash(self); + + "^FIELD_DEFINITION_END".hash(self); + Ok(()) } @@ -273,17 +479,23 @@ impl<'a> QueryHashVisitor<'a> { &mut self, t: &Node, ) -> Result<(), BoxError> { + "^INPUT_VALUE".hash(self); + self.hash_type(&t.ty)?; - for directive in &t.directives { - self.hash_directive(directive); - } + self.hash_directive_list_ast(&t.directives); + if let Some(value) = t.default_value.as_ref() { self.hash_value(value); + } else { + "^INPUT_VALUE-NO_DEFAULT".hash(self); } + "^INPUT_VALUE-END".hash(self); Ok(()) } fn hash_join_type(&mut self, name: &Name, directives: &DirectiveList) -> Result<(), BoxError> { + "^JOIN_TYPE".hash(self); + if let Some(dir_name) = self.join_type_directive_name.as_deref() { if let Some(dir) = directives.get(dir_name) { if let Some(key) = dir @@ -306,6 +518,7 @@ impl<'a> QueryHashVisitor<'a> { } } } + "^JOIN_TYPE-END".hash(self); Ok(()) } @@ -315,6 +528,8 @@ impl<'a> QueryHashVisitor<'a> { parent_type: &str, directives: &ast::DirectiveList, ) -> Result<(), BoxError> { + "^JOIN_FIELD".hash(self); + if let Some(dir_name) = self.join_field_directive_name.as_deref() { if let Some(dir) = directives.get(dir_name) { if let Some(requires) = dir @@ -338,9 +553,114 @@ impl<'a> QueryHashVisitor<'a> { } } } + + if let Some(context_arguments) = dir + .specified_argument_by_name("contextArguments") + .and_then(|value| value.as_list()) + { + for argument in context_arguments { + self.hash_context_argument(argument)?; + } + } + } + } + "^JOIN_FIELD-END".hash(self); + + Ok(()) + } + + fn record_context( + &mut self, + parent_type: &str, + directives: &DirectiveList, + ) -> Result<(), BoxError> { + if let Some(dir_name) = self.context_directive_name.as_deref() { + if let Some(dir) = directives.get(dir_name) { + if let Some(name) = dir + .specified_argument_by_name("name") + .and_then(|arg| arg.as_str()) + { + self.contexts + .entry(name.to_string()) + .or_default() + .push(parent_type.to_string()); + } + } + } + Ok(()) + } + + /// Hashes the context argument of a field + /// + /// contextArgument contains a selection that must be applied to a parent type in the + /// query that matches the context name. We store in advance which type names map to + /// which contexts, to reuse them here when we encounter the selection. + fn hash_context_argument(&mut self, argument: &ast::Value) -> Result<(), BoxError> { + if let Some(obj) = argument.as_object() { + let context_name = Name::new("context")?; + let selection_name = Name::new("selection")?; + // the contextArgument input type is defined as follows: + // input join__ContextArgument { + // name: String! + // type: String! + // context: String! + // selection: join__FieldValue! + // } + // and that is checked by schema validation, so the `context` and `selection` fields + // are guaranteed to be present and to be strings. + if let (Some(context), Some(selection)) = ( + obj.iter() + .find(|(k, _)| k == &context_name) + .and_then(|(_, v)| v.as_str()), + obj.iter() + .find(|(k, _)| k == &selection_name) + .and_then(|(_, v)| v.as_str()), + ) { + if let Some(types) = self.contexts.get(context).cloned() { + for ty in types { + if let Ok(parent_type) = Name::new(ty.as_str()) { + let mut parser = Parser::new(); + + // we assume that the selection was already checked by schema validation + if let Ok(field_set) = parser.parse_field_set( + Valid::assume_valid_ref(self.schema), + parent_type.clone(), + selection, + std::path::Path::new("schema.graphql"), + ) { + traverse::selection_set( + self, + parent_type.as_str(), + &field_set.selection_set.selections[..], + )?; + } + } + } + } + } + Ok(()) + } else { + Err("context argument value is not an object".into()) + } + } + + fn hash_interface_implementers( + &mut self, + intf: &InterfaceType, + node: &executable::Field, + ) -> Result<(), BoxError> { + "^INTERFACE_IMPL".hash(self); + + if let Some(implementers) = self.schema.implementers_map().get(&intf.name) { + "^IMPLEMENTER_LIST".hash(self); + for object in &implementers.objects { + self.hash_type_by_name(object)?; + traverse::selection_set(self, object, &node.selection_set.selections)?; } + "^IMPLEMENTER_LIST_END".hash(self); } + "^INTERFACE_IMPL-END".hash(self); Ok(()) } } @@ -351,16 +671,41 @@ impl<'a> Hasher for QueryHashVisitor<'a> { } fn write(&mut self, bytes: &[u8]) { + // byte separator between each part that is hashed + self.hasher.update(&[0xFF][..]); self.hasher.update(bytes); } } impl<'a> Visitor for QueryHashVisitor<'a> { fn operation(&mut self, root_type: &str, node: &executable::Operation) -> Result<(), BoxError> { + "^VISIT_OPERATION".hash(self); + root_type.hash(self); self.hash_type_by_name(root_type)?; + node.operation_type.hash(self); + node.name.hash(self); - traverse::operation(self, root_type, node) + "^VARIABLE_LIST".hash(self); + for variable in &node.variables { + variable.name.hash(self); + self.hash_type(&variable.ty)?; + + if let Some(value) = variable.default_value.as_ref() { + self.hash_value(value); + } else { + "^VISIT_OPERATION-NO_DEFAULT".hash(self); + } + + self.hash_directive_list_ast(&variable.directives); + } + "^VARIABLE_LIST_END".hash(self); + + self.hash_directive_list_ast(&node.directives); + + traverse::operation(self, root_type, node)?; + "^VISIT_OPERATION-END".hash(self); + Ok(()) } fn field( @@ -369,30 +714,44 @@ impl<'a> Visitor for QueryHashVisitor<'a> { field_def: &ast::FieldDefinition, node: &executable::Field, ) -> Result<(), BoxError> { + "^VISIT_FIELD".hash(self); + if !self.seen_introspection && (field_def.name == "__schema" || field_def.name == "__type") { self.seen_introspection = true; self.schema_str.hash(self); } - self.hash_field( - parent_type.to_string(), - field_def.name.as_str().to_string(), - field_def, - &node.arguments, - )?; + self.hash_field(parent_type, field_def, node)?; - traverse::field(self, field_def, node) + if let Some(ExtendedType::Interface(intf)) = + self.schema.types.get(field_def.ty.inner_named_type()) + { + self.hash_interface_implementers(intf, node)?; + } + + traverse::field(self, field_def, node)?; + "^VISIT_FIELD_END".hash(self); + Ok(()) } fn fragment(&mut self, node: &executable::Fragment) -> Result<(), BoxError> { + "^VISIT_FRAGMENT".hash(self); + node.name.hash(self); self.hash_type_by_name(node.type_condition())?; - traverse::fragment(self, node) + self.hash_directive_list_ast(&node.directives); + + traverse::fragment(self, node)?; + "^VISIT_FRAGMENT-END".hash(self); + + Ok(()) } fn fragment_spread(&mut self, node: &executable::FragmentSpread) -> Result<(), BoxError> { + "^VISIT_FRAGMENT_SPREAD".hash(self); + node.fragment_name.hash(self); let type_condition = &self .fragments @@ -401,7 +760,12 @@ impl<'a> Visitor for QueryHashVisitor<'a> { .type_condition(); self.hash_type_by_name(type_condition)?; - traverse::fragment_spread(self, node) + self.hash_directive_list_ast(&node.directives); + + traverse::fragment_spread(self, node)?; + "^VISIT_FRAGMENT_SPREAD-END".hash(self); + + Ok(()) } fn inline_fragment( @@ -409,10 +773,16 @@ impl<'a> Visitor for QueryHashVisitor<'a> { parent_type: &str, node: &executable::InlineFragment, ) -> Result<(), BoxError> { + "^VISIT_INLINE_FRAGMENT".hash(self); + if let Some(type_condition) = &node.type_condition { self.hash_type_by_name(type_condition)?; } - traverse::inline_fragment(self, parent_type, node) + self.hash_directive_list_ast(&node.directives); + + traverse::inline_fragment(self, parent_type, node)?; + "^VISIT_INLINE_FRAGMENT-END".hash(self); + Ok(()) } fn schema(&self) -> &apollo_compiler::Schema { @@ -470,7 +840,7 @@ mod tests { .unwrap() .validate(&schema) .unwrap(); - let mut visitor = QueryHashVisitor::new(&schema, schema_str, &exec); + let mut visitor = QueryHashVisitor::new(&schema, schema_str, &exec).unwrap(); traverse::document(&mut visitor, &exec, None).unwrap(); ( @@ -489,7 +859,7 @@ mod tests { .unwrap() .validate(&schema) .unwrap(); - let mut visitor = QueryHashVisitor::new(&schema, schema_str, &exec); + let mut visitor = QueryHashVisitor::new(&schema, schema_str, &exec).unwrap(); traverse::document(&mut visitor, &exec, None).unwrap(); hex::encode(visitor.finish()) @@ -498,10 +868,6 @@ mod tests { #[test] fn me() { let schema1: &str = r#" - schema { - query: Query - } - type Query { me: User customer: User @@ -514,10 +880,6 @@ mod tests { "#; let schema2: &str = r#" - schema { - query: Query - } - type Query { me: User } @@ -546,38 +908,84 @@ mod tests { #[test] fn directive() { let schema1: &str = r#" - schema { - query: Query - } - directive @test on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM + directive @test on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM | UNION | INPUT_OBJECT type Query { me: User customer: User + s: S + u: U + e: E + inp(i: I): ID } type User { id: ID! name: String } + + scalar S + + type A { + a: ID + } + + type B { + b: ID + } + + union U = A | B + + enum E { + A + B + } + + input I { + a: Int = 0 + b: Int + } "#; let schema2: &str = r#" - schema { - query: Query - } - directive @test on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM - + directive @test on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM | UNION | INPUT_OBJECT + type Query { me: User customer: User @test + s: S + u: U + e: E + inp(i: I): ID } - type User { id: ID! @test name: String } + + scalar S @test + + type A { + a: ID + } + + type B { + b: ID + } + + union U @test = A | B + + enum E @test { + A + B + } + + + input I @test { + a: Int = 0 + b: Int + } "#; let query = "query { me { name } }"; assert!(hash(schema1, query).equals(&hash(schema2, query))); @@ -587,14 +995,23 @@ mod tests { let query = "query { customer { id } }"; assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { s }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { u { ...on A { a } ...on B { b } } }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { e }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { inp(i: { b: 0 }) }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); } #[test] fn interface() { let schema1: &str = r#" - schema { - query: Query - } directive @test on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM type Query { @@ -634,7 +1051,7 @@ mod tests { "#; let query = "query { me { id name } }"; - assert!(hash(schema1, query).equals(&hash(schema2, query))); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); let query = "query { customer { id } }"; assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); @@ -644,12 +1061,12 @@ mod tests { } #[test] - fn arguments() { + fn arguments_int() { let schema1: &str = r#" type Query { a(i: Int): Int b(i: Int = 1): Int - c(i: Int = 1, j: Int): Int + c(i: Int = 1, j: Int = null): Int } "#; @@ -657,7 +1074,7 @@ mod tests { type Query { a(i: Int!): Int b(i: Int = 2): Int - c(i: Int = 2, j: Int): Int + c(i: Int = 2, j: Int = null): Int } "#; @@ -678,66 +1095,181 @@ mod tests { } #[test] - fn entities() { + fn arguments_float() { let schema1: &str = r#" - schema { - query: Query - } - - scalar _Any - - union _Entity = User - type Query { - _entities(representations: [_Any!]!): [_Entity]! - me: User - customer: User - } - - type User { - id: ID - name: String + a(i: Float): Int + b(i: Float = 1.0): Int + c(i: Float = 1.0, j: Int): Int } "#; let schema2: &str = r#" - schema { - query: Query - } - - scalar _Any - - union _Entity = User - type Query { - _entities(representations: [_Any!]!): [_Entity]! - me: User - } - - - type User { - id: ID! - name: String - } + a(i: Float!): Int + b(i: Float = 2.0): Int + c(i: Float = 2.0, j: Int): Int + } "#; - let query1 = r#"query Query1($representations:[_Any!]!){ - _entities(representations:$representations){ - ...on User { - id - name - } - } - }"#; - - println!("query1: {query1}"); + let query = "query { a(i: 0) }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); - let hash1 = hash_subgraph_query(schema1, query1); - println!("hash1: {hash1}"); + let query = "query { b }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); - let hash2 = hash_subgraph_query(schema2, query1); - println!("hash2: {hash2}"); + let query = "query { b(i: 0)}"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + let query = "query { c(j: 0)}"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { c(i:0, j: 0)}"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + } + + #[test] + fn arguments_list() { + let schema1: &str = r#" + type Query { + a(i: [Float]): Int + b(i: [Float] = [1.0]): Int + c(i: [Float] = [1.0], j: Int): Int + } + "#; + + let schema2: &str = r#" + type Query { + a(i: [Float!]): Int + b(i: [Float] = [2.0]): Int + c(i: [Float] = [2.0], j: Int): Int + } + "#; + + let query = "query { a(i: [0]) }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { b }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { b(i: [0])}"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { c(j: 0)}"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { c(i: [0], j: 0)}"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + } + + #[test] + fn arguments_object() { + let schema1: &str = r#" + input T { + d: Int + e: String + } + input U { + c: Int + } + input V { + d: Int = 0 + } + + type Query { + a(i: T): Int + b(i: T = { d: 1, e: "a" }): Int + c(c: U): Int + d(d: V): Int + } + "#; + + let schema2: &str = r#" + input T { + d: Int + e: String + } + input U { + c: Int! + } + input V { + d: Int = 1 + } + + type Query { + a(i: T!): Int + b(i: T = { d: 2, e: "b" }): Int + c(c: U): Int + d(d: V): Int + } + "#; + + let query = "query { a(i: { d: 1, e: \"a\" }) }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { b }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { b(i: { d: 3, e: \"c\" })}"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { c(c: { c: 0 }) }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { d(d: { }) }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + + let query = "query { d(d: { d: 2 }) }"; + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + } + + #[test] + fn entities() { + let schema1: &str = r#" + scalar _Any + + union _Entity = User + + type Query { + _entities(representations: [_Any!]!): [_Entity]! + me: User + customer: User + } + + type User { + id: ID + name: String + } + "#; + + let schema2: &str = r#" + scalar _Any + + union _Entity = User + + type Query { + _entities(representations: [_Any!]!): [_Entity]! + me: User + } + + + type User { + id: ID! + name: String + } + "#; + + let query1 = r#"query Query1($representations:[_Any!]!){ + _entities(representations:$representations){ + ...on User { + id + name + } + } + }"#; + + let hash1 = hash_subgraph_query(schema1, query1); + let hash2 = hash_subgraph_query(schema2, query1); assert_ne!(hash1, hash2); let query2 = r#"query Query1($representations:[_Any!]!){ @@ -748,14 +1280,8 @@ mod tests { } }"#; - println!("query2: {query2}"); - let hash1 = hash_subgraph_query(schema1, query2); - println!("hash1: {hash1}"); - let hash2 = hash_subgraph_query(schema2, query2); - println!("hash2: {hash2}"); - assert_eq!(hash1, hash2); } @@ -1041,10 +1567,6 @@ mod tests { #[test] fn fields_with_different_arguments_have_different_hashes() { let schema: &str = r#" - schema { - query: Query - } - type Query { test(arg: Int): String } @@ -1063,19 +1585,35 @@ mod tests { } #[test] - fn fields_with_different_aliases_have_different_hashes() { + fn fields_with_different_arguments_on_nest_field_different_hashes() { let schema: &str = r#" - schema { - query: Query + type Test { + test(arg: Int): String + recursiveLink: Test } - + + type Query { + directLink: Test + } + "#; + + let query_one = "{ directLink { test recursiveLink { test(arg: 1) } } }"; + let query_two = "{ directLink { test recursiveLink { test(arg: 2) } } }"; + + assert!(hash(schema, query_one).from_hash_query != hash(schema, query_two).from_hash_query); + assert!(hash(schema, query_one).from_visitor != hash(schema, query_two).from_visitor); + } + + #[test] + fn fields_with_different_aliases_have_different_hashes() { + let schema: &str = r#" type Query { test(arg: Int): String } "#; - let query_one = "query { a: test }"; - let query_two = "query { b: test }"; + let query_one = "{ a: test }"; + let query_two = "{ b: test }"; // This assertion tests an internal hash function that isn't directly // used for the query hash, and we'll need to make it pass to rely @@ -1084,4 +1622,1035 @@ mod tests { // assert!(hash(schema, query_one).doesnt_match(&hash(schema, query_two))); assert!(hash(schema, query_one).from_hash_query != hash(schema, query_two).from_hash_query); } + + #[test] + fn operations_with_different_names_have_different_hash() { + let schema: &str = r#" + type Query { + test: String + } + "#; + + let query_one = "query Foo { test }"; + let query_two = "query Bar { test }"; + + assert!(hash(schema, query_one).from_hash_query != hash(schema, query_two).from_hash_query); + assert!(hash(schema, query_one).from_visitor != hash(schema, query_two).from_visitor); + } + + #[test] + fn adding_directive_on_operation_changes_hash() { + let schema: &str = r#" + directive @test on QUERY + type Query { + test: String + } + "#; + + let query_one = "query { test }"; + let query_two = "query @test { test }"; + + assert!(hash(schema, query_one).from_hash_query != hash(schema, query_two).from_hash_query); + assert!(hash(schema, query_one).from_visitor != hash(schema, query_two).from_visitor); + } + + #[test] + fn order_of_variables_changes_hash() { + let schema: &str = r#" + type Query { + test1(arg: Int): String + test2(arg: Int): String + } + "#; + + let query_one = "query ($foo: Int, $bar: Int) { test1(arg: $foo) test2(arg: $bar) }"; + let query_two = "query ($foo: Int, $bar: Int) { test1(arg: $bar) test2(arg: $foo) }"; + + assert!(hash(schema, query_one).doesnt_match(&hash(schema, query_two))); + } + + #[test] + fn query_variables_with_different_types_have_different_hash() { + let schema: &str = r#" + type Query { + test(arg: Int): String + } + "#; + + let query_one = "query ($var: Int) { test(arg: $var) }"; + let query_two = "query ($var: Int!) { test(arg: $var) }"; + + assert!(hash(schema, query_one).from_hash_query != hash(schema, query_two).from_hash_query); + assert!(hash(schema, query_one).from_visitor != hash(schema, query_two).from_visitor); + } + + #[test] + fn query_variables_with_different_default_values_have_different_hash() { + let schema: &str = r#" + type Query { + test(arg: Int): String + } + "#; + + let query_one = "query ($var: Int = 1) { test(arg: $var) }"; + let query_two = "query ($var: Int = 2) { test(arg: $var) }"; + + assert!(hash(schema, query_one).from_hash_query != hash(schema, query_two).from_hash_query); + assert!(hash(schema, query_one).from_visitor != hash(schema, query_two).from_visitor); + } + + #[test] + fn adding_directive_to_query_variable_change_hash() { + let schema: &str = r#" + directive @test on VARIABLE_DEFINITION + + type Query { + test(arg: Int): String + } + "#; + + let query_one = "query ($var: Int) { test(arg: $var) }"; + let query_two = "query ($var: Int @test) { test(arg: $var) }"; + + assert!(hash(schema, query_one).from_hash_query != hash(schema, query_two).from_hash_query); + assert!(hash(schema, query_one).from_visitor != hash(schema, query_two).from_visitor); + } + + #[test] + fn order_of_directives_change_hash() { + let schema: &str = r#" + directive @foo on FIELD + directive @bar on FIELD + + type Query { + test(arg: Int): String + } + "#; + + let query_one = "{ test @foo @bar }"; + let query_two = "{ test @bar @foo }"; + + assert!(hash(schema, query_one).from_hash_query != hash(schema, query_two).from_hash_query); + assert!(hash(schema, query_one).from_visitor != hash(schema, query_two).from_visitor); + } + + #[test] + fn directive_argument_type_change_hash() { + let schema1: &str = r#" + directive @foo(a: Int) on FIELD + directive @bar on FIELD + + type Query { + test(arg: Int): String + } + "#; + + let schema2: &str = r#" + directive @foo(a: Int!) on FIELD + directive @bar on FIELD + + type Query { + test(arg: Int): String + } + "#; + + let query = "{ test @foo(a: 1) }"; + + assert!(hash(schema1, query).from_hash_query != hash(schema2, query).from_hash_query); + assert!(hash(schema1, query).from_visitor != hash(schema2, query).from_visitor); + } + + #[test] + fn adding_directive_on_schema_changes_hash() { + let schema1: &str = r#" + schema { + query: Query + } + + type Query { + foo: String + } + "#; + + let schema2: &str = r#" + directive @test on SCHEMA + schema @test { + query: Query + } + + type Query { + foo: String + } + "#; + + let query = "{ foo }"; + + assert!(hash(schema1, query).from_hash_query != hash(schema2, query).from_hash_query); + assert!(hash(schema1, query).from_visitor != hash(schema2, query).from_visitor); + } + + #[test] + fn changing_type_of_field_changes_hash() { + let schema1: &str = r#" + type Query { + test: Int + } + "#; + + let schema2: &str = r#" + type Query { + test: Float + } + "#; + + let query = "{ test }"; + + assert!(hash(schema1, query).from_hash_query != hash(schema2, query).from_hash_query); + assert!(hash(schema1, query).from_visitor != hash(schema2, query).from_visitor); + } + + #[test] + fn changing_type_to_interface_changes_hash() { + let schema1: &str = r#" + type Query { + foo: Foo + } + + interface Foo { + value: String + } + "#; + + let schema2: &str = r#" + type Query { + foo: Foo + } + + type Foo { + value: String + } + "#; + + let query = "{ foo { value } }"; + + assert!(hash(schema1, query).from_hash_query != hash(schema2, query).from_hash_query); + assert!(hash(schema1, query).from_visitor != hash(schema2, query).from_visitor); + } + + #[test] + fn changing_operation_kind_changes_hash() { + let schema: &str = r#" + schema { + query: Test + mutation: Test + } + + type Test { + test: String + } + "#; + + let query_one = "query { test }"; + let query_two = "mutation { test }"; + + assert_ne!( + hash(schema, query_one).from_hash_query, + hash(schema, query_two).from_hash_query + ); + assert_ne!( + hash(schema, query_one).from_visitor, + hash(schema, query_two).from_visitor + ); + } + + #[test] + fn adding_directive_on_field_should_change_hash() { + let schema: &str = r#" + directive @test on FIELD + + type Query { + test: String + } + "#; + + let query_one = "{ test }"; + let query_two = "{ test @test }"; + + assert_ne!( + hash(schema, query_one).from_hash_query, + hash(schema, query_two).from_hash_query + ); + assert_ne!( + hash(schema, query_one).from_visitor, + hash(schema, query_two).from_visitor + ); + } + + #[test] + fn adding_directive_on_fragment_spread_change_hash() { + let schema: &str = r#" + type Query { + test: String + } + "#; + + let query_one = r#" + { ...Test } + + fragment Test on Query { + test + } + "#; + let query_two = r#" + { ...Test @skip(if: false) } + + fragment Test on Query { + test + } + "#; + + assert_ne!( + hash(schema, query_one).from_hash_query, + hash(schema, query_two).from_hash_query + ); + assert_ne!( + hash(schema, query_one).from_visitor, + hash(schema, query_two).from_visitor + ); + } + + #[test] + fn adding_directive_on_fragment_change_hash() { + let schema: &str = r#" + directive @test on FRAGMENT_DEFINITION + + type Query { + test: String + } + "#; + + let query_one = r#" + { ...Test } + + fragment Test on Query { + test + } + "#; + let query_two = r#" + { ...Test } + + fragment Test on Query @test { + test + } + "#; + + assert_ne!( + hash(schema, query_one).from_hash_query, + hash(schema, query_two).from_hash_query + ); + assert_ne!( + hash(schema, query_one).from_visitor, + hash(schema, query_two).from_visitor + ); + } + + #[test] + fn adding_directive_on_inline_fragment_change_hash() { + let schema: &str = r#" + type Query { + test: String + } + "#; + + let query_one = "{ ... { test } }"; + let query_two = "{ ... @skip(if: false) { test } }"; + + assert_ne!( + hash(schema, query_one).from_hash_query, + hash(schema, query_two).from_hash_query + ); + assert_ne!( + hash(schema, query_one).from_visitor, + hash(schema, query_two).from_visitor + ); + } + + #[test] + fn moving_field_changes_hash() { + let schema: &str = r#" + type Query { + me: User + } + + type User { + id: ID + name: String + friend: User + } + "#; + + let query_one = r#" + { + me { + friend { + id + name + } + } + } + "#; + let query_two = r#" + { + me { + friend { + id + } + name + } + } + "#; + + assert_ne!( + hash(schema, query_one).from_hash_query, + hash(schema, query_two).from_hash_query + ); + assert_ne!( + hash(schema, query_one).from_visitor, + hash(schema, query_two).from_visitor + ); + } + + #[test] + fn changing_type_of_fragment_changes_hash() { + let schema: &str = r#" + type Query { + fooOrBar: FooOrBar + } + + type Foo { + id: ID + value: String + } + + type Bar { + id: ID + value: String + } + + union FooOrBar = Foo | Bar + "#; + + let query_one = r#" + { + fooOrBar { + ... on Foo { id } + ... on Bar { id } + ... Test + } + } + + fragment Test on Foo { + value + } + "#; + let query_two = r#" + { + fooOrBar { + ... on Foo { id } + ... on Bar { id } + ... Test + } + } + + fragment Test on Bar { + value + } + "#; + + assert_ne!( + hash(schema, query_one).from_hash_query, + hash(schema, query_two).from_hash_query + ); + assert_ne!( + hash(schema, query_one).from_visitor, + hash(schema, query_two).from_visitor + ); + } + + #[test] + fn changing_interface_implementors_changes_hash() { + let schema1: &str = r#" + type Query { + data: I + } + + interface I { + id: ID + value: String + } + + type Foo implements I { + id: ID + value: String + foo: String + } + + type Bar { + id: ID + value: String + bar: String + } + "#; + + let schema2: &str = r#" + type Query { + data: I + } + + interface I { + id: ID + value: String + } + + type Foo implements I { + id: ID + value: String + foo2: String + } + + type Bar { + id: ID + value: String + bar: String + } + "#; + + let schema3: &str = r#" + type Query { + data: I + } + + interface I { + id: ID + value: String + } + + type Foo implements I { + id: ID + value: String + foo: String + } + + type Bar implements I { + id: ID + value: String + bar: String + } + "#; + + let query = r#" + { + data { + id + value + } + } + "#; + + // changing an unrelated field in implementors does not change the hash + assert_eq!( + hash(schema1, query).from_hash_query, + hash(schema2, query).from_hash_query + ); + assert_eq!( + hash(schema1, query).from_visitor, + hash(schema2, query).from_visitor + ); + + // adding a new implementor changes the hash + assert_ne!( + hash(schema1, query).from_hash_query, + hash(schema3, query).from_hash_query + ); + assert_ne!( + hash(schema1, query).from_visitor, + hash(schema3, query).from_visitor + ); + } + + #[test] + fn changing_interface_directives_changes_hash() { + let schema1: &str = r#" + directive @a(name: String) on INTERFACE + + type Query { + data: I + } + + interface I @a { + id: ID + value: String + } + + type Foo implements I { + id: ID + value: String + foo: String + } + "#; + + let schema2: &str = r#" + directive @a(name: String) on INTERFACE + + type Query { + data: I + } + + interface I @a(name: "abc") { + id: ID + value: String + } + + type Foo implements I { + id: ID + value: String + foo2: String + } + + "#; + + let query = r#" + { + data { + id + value + } + } + "#; + + // changing a directive applied on the interface definition changes the hash + assert_ne!( + hash(schema1, query).from_hash_query, + hash(schema2, query).from_hash_query + ); + assert_ne!( + hash(schema1, query).from_visitor, + hash(schema2, query).from_visitor + ); + } + + #[test] + fn it_is_weird_so_i_dont_know_how_to_name_it_change_hash() { + let schema: &str = r#" + type Query { + id: ID + someField: SomeType + test: String + } + + type SomeType { + id: ID + test: String + } + "#; + + let query_one = r#" + { + test + someField { id test } + id + } + "#; + let query_two = r#" + { + ...test + someField { id } + } + + fragment test on Query { + id + } + "#; + + assert_ne!( + hash(schema, query_one).from_hash_query, + hash(schema, query_two).from_hash_query + ); + assert_ne!( + hash(schema, query_one).from_visitor, + hash(schema, query_two).from_visitor + ); + } + + #[test] + fn it_change_directive_location() { + let schema: &str = r#" + directive @foo on QUERY | VARIABLE_DEFINITION + + type Query { + field(arg: String): String + } + "#; + + let query_one = r#" + query Test ($arg: String @foo) { + field(arg: $arg) + } + "#; + let query_two = r#" + query Test ($arg: String) @foo { + field(arg: $arg) + } + "#; + + assert_ne!( + hash(schema, query_one).from_hash_query, + hash(schema, query_two).from_hash_query + ); + assert_ne!( + hash(schema, query_one).from_visitor, + hash(schema, query_two).from_visitor + ); + } + + #[test] + fn it_changes_on_implementors_list_changes() { + let schema_one: &str = r#" + interface SomeInterface { + value: String + } + + type Foo implements SomeInterface { + value: String + } + + type Bar implements SomeInterface { + value: String + } + + union FooOrBar = Foo | Bar + + type Query { + fooOrBar: FooOrBar + } + "#; + let schema_two: &str = r#" + interface SomeInterface { + value: String + } + + type Foo { + value: String # <= This field shouldn't be a part of query plan anymore + } + + type Bar implements SomeInterface { + value: String + } + + union FooOrBar = Foo | Bar + + type Query { + fooOrBar: FooOrBar + } + "#; + + let query = r#" + { + fooOrBar { + ... on SomeInterface { + value + } + } + } + "#; + + assert_ne!( + hash(schema_one, query).from_hash_query, + hash(schema_two, query).from_hash_query + ); + assert_ne!( + hash(schema_one, query).from_visitor, + hash(schema_two, query).from_visitor + ); + } + + #[test] + fn it_changes_on_context_changes() { + let schema_one: &str = r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://specs.apollo.dev/context/v0.1", for: SECURITY) { + query: Query +} + +directive @context(name: String!) repeatable on INTERFACE | OBJECT | UNION + +directive @context__fromContext(field: String) on ARGUMENT_DEFINITION + +directive @join__directive( + graphs: [join__Graph!] + name: String! + args: join__DirectiveArguments +) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field( + graph: join__Graph + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean + override: String + usedOverridden: Boolean + overrideLabel: String + contextArguments: [join__ContextArgument!] +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__type( + graph: join__Graph! + key: join__FieldSet + extension: Boolean! = false + resolvable: Boolean! = true + isInterfaceObject: Boolean! = false +) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +scalar context__context + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "https://Subgraph1") + SUBGRAPH2 @join__graph(name: "Subgraph2", url: "https://Subgraph2") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query @join__type(graph: SUBGRAPH1) { + t: T! @join__field(graph: SUBGRAPH1) +} + + +type T + @join__type(graph: SUBGRAPH1, key: "id") + @context(name: "Subgraph1__context") { + id: ID! + u: U! + uList: [U]! + prop: String! +} + +type U + @join__type(graph: SUBGRAPH1, key: "id") + @join__type(graph: SUBGRAPH2, key: "id") { + id: ID! + b: String! @join__field(graph: SUBGRAPH2) + field: Int! + @join__field( + graph: SUBGRAPH1 + contextArguments: [ + { + context: "Subgraph1__context" + name: "a" + type: "String" + selection: "{ prop }" + } + ] + ) +} + "#; + + // changing T.prop from String! to String + let schema_two: &str = r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.5", for: EXECUTION) + @link(url: "https://specs.apollo.dev/context/v0.1", for: SECURITY) { + query: Query +} + +directive @context(name: String!) repeatable on INTERFACE | OBJECT | UNION + +directive @context__fromContext(field: String) on ARGUMENT_DEFINITION + +directive @join__directive( + graphs: [join__Graph!] + name: String! + args: join__DirectiveArguments +) repeatable on SCHEMA | OBJECT | INTERFACE | FIELD_DEFINITION + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field( + graph: join__Graph + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean + override: String + usedOverridden: Boolean + overrideLabel: String + contextArguments: [join__ContextArgument!] +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__type( + graph: join__Graph! + key: join__FieldSet + extension: Boolean! = false + resolvable: Boolean! = true + isInterfaceObject: Boolean! = false +) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +scalar context__context + +input join__ContextArgument { + name: String! + type: String! + context: String! + selection: join__FieldValue! +} + +scalar join__DirectiveArguments + +scalar join__FieldSet + +scalar join__FieldValue + +enum join__Graph { + SUBGRAPH1 @join__graph(name: "Subgraph1", url: "https://Subgraph1") + SUBGRAPH2 @join__graph(name: "Subgraph2", url: "https://Subgraph2") +} + +scalar link__Import + +enum link__Purpose { + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + + """ + `EXECUTION` features provide metadata necessary for operation execution. + """ + EXECUTION +} + +type Query @join__type(graph: SUBGRAPH1) { + t: T! @join__field(graph: SUBGRAPH1) +} + + +type T + @join__type(graph: SUBGRAPH1, key: "id") + @context(name: "Subgraph1__context") { + id: ID! + u: U! + uList: [U]! + prop: String +} + +type U + @join__type(graph: SUBGRAPH1, key: "id") + @join__type(graph: SUBGRAPH2, key: "id") { + id: ID! + b: String! @join__field(graph: SUBGRAPH2) + field: Int! + @join__field( + graph: SUBGRAPH1 + contextArguments: [ + { + context: "Subgraph1__context" + name: "a" + type: "String" + selection: "{ prop }" + } + ] + ) +} + "#; + + let query = r#" + query Query { + t { + __typename + id + u { + __typename + field + } + } + } + "#; + + assert_ne!( + hash(schema_one, query).from_hash_query, + hash(schema_two, query).from_hash_query + ); + assert_ne!( + hash(schema_one, query).from_visitor, + hash(schema_two, query).from_visitor + ); + } } diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs index 2208a5863e..8bfda05e64 100644 --- a/apollo-router/src/spec/schema.rs +++ b/apollo-router/src/spec/schema.rs @@ -20,6 +20,7 @@ use sha2::Sha256; use crate::error::ParseErrors; use crate::error::SchemaError; use crate::query_planner::OperationKind; +use crate::uplink::schema::SchemaState; use crate::Configuration; /// A GraphQL schema. @@ -30,6 +31,7 @@ pub(crate) struct Schema { pub(crate) implementers_map: apollo_compiler::collections::HashMap, api_schema: ApiSchema, pub(crate) schema_id: Arc, + pub(crate) launch_id: Option>, } /// Wrapper type to distinguish from `Schema::definitions` for the supergraph schema @@ -38,16 +40,16 @@ pub(crate) struct ApiSchema(pub(crate) ValidFederationSchema); impl Schema { pub(crate) fn parse(raw_sdl: &str, config: &Configuration) -> Result { - Self::parse_arc(raw_sdl.to_owned().into(), config) + Self::parse_arc(raw_sdl.parse::().unwrap().into(), config) } pub(crate) fn parse_arc( - raw_sdl: Arc, + raw_sdl: Arc, config: &Configuration, ) -> Result { let start = Instant::now(); let mut parser = apollo_compiler::parser::Parser::new(); - let result = parser.parse_ast(raw_sdl.as_ref(), "schema.graphql"); + let result = parser.parse_ast(&raw_sdl.sdl, "schema.graphql"); // Trace log recursion limit data let recursion_limit = parser.recursion_reached(); @@ -110,7 +112,7 @@ impl Schema { let implementers_map = definitions.implementers_map(); let supergraph = Supergraph::from_schema(definitions)?; - let schema_id = Arc::new(Schema::schema_id(&raw_sdl)); + let schema_id = Arc::new(Schema::schema_id(&raw_sdl.sdl)); let api_schema = supergraph .to_api_schema(ApiSchemaOptions { @@ -124,7 +126,12 @@ impl Schema { })?; Ok(Schema { - raw_sdl, + launch_id: raw_sdl + .launch_id + .as_ref() + .map(ToString::to_string) + .map(Arc::new), + raw_sdl: Arc::new(raw_sdl.sdl.to_string()), supergraph, subgraphs, implementers_map, @@ -336,7 +343,8 @@ impl std::fmt::Debug for Schema { subgraphs, implementers_map, api_schema: _, // skip - schema_id: _, + schema_id: _, // skip + launch_id: _, // skip } = self; f.debug_struct("Schema") .field("raw_sdl", raw_sdl) diff --git a/apollo-router/src/state_machine.rs b/apollo-router/src/state_machine.rs index e3ce6c3a67..669a70d9ef 100644 --- a/apollo-router/src/state_machine.rs +++ b/apollo-router/src/state_machine.rs @@ -39,6 +39,7 @@ use crate::spec::Schema; use crate::uplink::license_enforcement::LicenseEnforcementReport; use crate::uplink::license_enforcement::LicenseState; use crate::uplink::license_enforcement::LICENSE_EXPIRED_URL; +use crate::uplink::schema::SchemaState; use crate::ApolloRouterError::NoLicense; const STATE_CHANGE: &str = "state change"; @@ -54,14 +55,14 @@ pub(crate) struct ListenAddresses { enum State { Startup { configuration: Option>, - schema: Option>, + schema: Option>, license: Option, listen_addresses_guard: OwnedRwLockWriteGuard, }, Running { configuration: Arc, _metrics: Option, - schema: Arc, + schema: Arc, license: LicenseState, server_handle: Option, router_service_factory: FA::RouterFactory, @@ -118,7 +119,7 @@ impl State { async fn update_inputs( mut self, state_machine: &mut StateMachine, - new_schema: Option>, + new_schema: Option>, new_configuration: Option>, new_license: Option, ) -> Self @@ -308,7 +309,7 @@ impl State { server_handle: &mut Option, previous_router_service_factory: Option<&FA::RouterFactory>, configuration: Arc, - sdl: Arc, + schema_state: Arc, license: LicenseState, listen_addresses_guard: &mut OwnedRwLockWriteGuard, mut all_connections_stopped_signals: Vec>, @@ -318,7 +319,7 @@ impl State { FA: RouterSuperServiceFactory, { let schema = Arc::new( - Schema::parse_arc(sdl.clone(), &configuration) + Schema::parse_arc(schema_state.clone(), &configuration) .map_err(|e| ServiceCreationError(e.to_string().into()))?, ); // Check the license @@ -422,7 +423,7 @@ impl State { Ok(Running { configuration, _metrics: metrics, - schema: sdl, + schema: schema_state, license, server_handle: Some(server_handle), router_service_factory, @@ -557,13 +558,20 @@ where #[cfg(test)] self.notify_updated.notify_one(); - tracing::debug!( - monotonic_counter.apollo_router_state_change_total = 1u64, + tracing::info!( event = event_name, state = ?state, previous_state, "state machine transitioned" ); + u64_counter!( + "apollo_router_state_change_total", + "Router state changes", + 1, + event = event_name, + state = format!("{state:?}"), + previous_state = previous_state + ); // If we've errored then exit even if there are potentially more messages if matches!(&state, Stopped | Errored(_)) { @@ -612,11 +620,15 @@ mod tests { use crate::services::new_service::ServiceFactory; use crate::services::router; use crate::services::RouterRequest; + use crate::uplink::schema::SchemaState; type SharedOneShotReceiver = Arc>>>; - fn example_schema() -> String { - include_str!("testdata/supergraph.graphql").to_owned() + fn example_schema() -> SchemaState { + SchemaState { + sdl: include_str!("testdata/supergraph.graphql").to_owned(), + launch_id: None, + } } macro_rules! assert_matches { @@ -870,7 +882,10 @@ mod tests { router_factory, stream::iter(vec![ UpdateConfiguration(Configuration::builder().build().unwrap()), - UpdateSchema(minimal_schema.to_owned()), + UpdateSchema(SchemaState { + sdl: minimal_schema.to_owned(), + launch_id: None + }), UpdateLicense(LicenseState::default()), UpdateSchema(example_schema()), Shutdown @@ -893,9 +908,15 @@ mod tests { router_factory, stream::iter(vec![ UpdateConfiguration(Configuration::builder().build().unwrap()), - UpdateSchema(minimal_schema.to_owned()), + UpdateSchema(SchemaState { + sdl: minimal_schema.to_owned(), + launch_id: None + }), UpdateLicense(LicenseState::default()), - UpdateSchema(minimal_schema.to_owned()), + UpdateSchema(SchemaState { + sdl: minimal_schema.to_owned(), + launch_id: None + }), Shutdown ]) ) @@ -916,7 +937,10 @@ mod tests { router_factory, stream::iter(vec![ UpdateConfiguration(Configuration::builder().build().unwrap()), - UpdateSchema(minimal_schema.to_owned()), + UpdateSchema(SchemaState { + sdl: minimal_schema.to_owned(), + launch_id: None + }), UpdateLicense(LicenseState::default()), UpdateLicense(LicenseState::Licensed), Shutdown @@ -1039,7 +1063,10 @@ mod tests { UpdateConfiguration(Configuration::builder().build().unwrap()), UpdateSchema(example_schema()), UpdateLicense(LicenseState::default()), - UpdateSchema(minimal_schema.to_owned()), + UpdateSchema(SchemaState { + sdl: minimal_schema.to_owned(), + launch_id: None + }), Shutdown ]) ) @@ -1097,7 +1124,10 @@ mod tests { .build() .unwrap() ), - UpdateSchema(minimal_schema.to_owned()), + UpdateSchema(SchemaState { + sdl: minimal_schema.to_owned(), + launch_id: None + }), Shutdown ]), ) diff --git a/apollo-router/src/test_harness.rs b/apollo-router/src/test_harness.rs index 516048e3d7..2eb49be5f2 100644 --- a/apollo-router/src/test_harness.rs +++ b/apollo-router/src/test_harness.rs @@ -176,7 +176,7 @@ impl<'a> TestHarness<'a> { ), }; - self.extra_plugins.push((name, Box::new(plugin))); + self.extra_plugins.push((name, plugin.into())); self } diff --git a/apollo-router/src/test_harness/mocks/persisted_queries.rs b/apollo-router/src/test_harness/mocks/persisted_queries.rs index cdf8b0e5f9..7d513305bf 100644 --- a/apollo-router/src/test_harness/mocks/persisted_queries.rs +++ b/apollo-router/src/test_harness/mocks/persisted_queries.rs @@ -14,14 +14,21 @@ use wiremock::Mock; use wiremock::MockServer; use wiremock::ResponseTemplate; +pub use crate::services::layers::persisted_queries::FullPersistedQueryOperationId; +pub use crate::services::layers::persisted_queries::PersistedQueryManifest; use crate::uplink::Endpoints; use crate::uplink::UplinkConfig; /// Get a query ID, body, and a PQ manifest with that ID and body. -pub fn fake_manifest() -> (String, String, HashMap) { +pub fn fake_manifest() -> (String, String, PersistedQueryManifest) { let id = "1234".to_string(); let body = r#"query { typename }"#.to_string(); - let manifest = hashmap! { id.to_string() => body.to_string() }; + let manifest = hashmap! { + FullPersistedQueryOperationId { + operation_id: id.to_string(), + client_name: None, + } => body.to_string() + }; (id, body, manifest) } @@ -32,7 +39,7 @@ pub async fn mock_empty_pq_uplink() -> (UplinkMockGuard, UplinkConfig) { /// Mocks an uplink server with a persisted query list with a delay. pub async fn mock_pq_uplink_with_delay( - manifest: &HashMap, + manifest: &PersistedQueryManifest, delay: Duration, ) -> (UplinkMockGuard, UplinkConfig) { let (guard, url) = mock_pq_uplink_one_endpoint(manifest, Some(delay)).await; @@ -43,7 +50,7 @@ pub async fn mock_pq_uplink_with_delay( } /// Mocks an uplink server with a persisted query list containing operations passed to this function. -pub async fn mock_pq_uplink(manifest: &HashMap) -> (UplinkMockGuard, UplinkConfig) { +pub async fn mock_pq_uplink(manifest: &PersistedQueryManifest) -> (UplinkMockGuard, UplinkConfig) { let (guard, url) = mock_pq_uplink_one_endpoint(manifest, None).await; ( guard, @@ -58,22 +65,29 @@ pub struct UplinkMockGuard { } #[derive(Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] struct Operation { id: String, body: String, + #[serde(skip_serializing_if = "Option::is_none", default)] + client_name: Option, } /// Mocks an uplink server; returns a single Url rather than a full UplinkConfig, so you /// can combine it with another one to test failover. pub async fn mock_pq_uplink_one_endpoint( - manifest: &HashMap, + manifest: &PersistedQueryManifest, delay: Option, ) -> (UplinkMockGuard, Url) { let operations: Vec = manifest // clone the manifest so the caller can still make assertions about it .clone() .drain() - .map(|(id, body)| Operation { id, body }) + .map(|(full_id, body)| Operation { + id: full_id.operation_id, + body, + client_name: full_id.client_name, + }) .collect(); let mock_gcs_server = MockServer::start().await; diff --git a/apollo-router/src/uplink/mod.rs b/apollo-router/src/uplink/mod.rs index 6a8974699e..6bc9508efe 100644 --- a/apollo-router/src/uplink/mod.rs +++ b/apollo-router/src/uplink/mod.rs @@ -17,6 +17,7 @@ use url::Url; pub(crate) mod license_enforcement; pub(crate) mod license_stream; pub(crate) mod persisted_queries_manifest_stream; +pub(crate) mod schema; pub(crate) mod schema_stream; const GCP_URL: &str = "https://uplink.api.apollographql.com"; @@ -210,7 +211,7 @@ where Response: Send + 'static + Debug, TransformedResponse: Send + 'static + Debug, { - let query = query_name::(); + let query_name = query_name::(); let (sender, receiver) = channel(2); let client = match reqwest::Client::builder() .no_gzip() @@ -245,10 +246,12 @@ where .await { Ok(response) => { - tracing::info!( - monotonic_counter.apollo_router_uplink_fetch_count_total = 1u64, + u64_counter!( + "apollo_router_uplink_fetch_count_total", + "Total number of requests to Apollo Uplink", + 1u64, status = "success", - query + query = query_name ); match response { UplinkResponse::New { @@ -294,10 +297,12 @@ where } } Err(err) => { - tracing::info!( - monotonic_counter.apollo_router_uplink_fetch_count_total = 1u64, + u64_counter!( + "apollo_router_uplink_fetch_count_total", + "Total number of requests to Apollo Uplink", + 1u64, status = "failure", - query + query = query_name ); if let Err(e) = sender.send(Err(err)).await { tracing::debug!("failed to send error to uplink stream. This is likely to be because the router is shutting down: {e}"); diff --git a/apollo-router/src/uplink/schema.rs b/apollo-router/src/uplink/schema.rs new file mode 100644 index 0000000000..57cce6ba0c --- /dev/null +++ b/apollo-router/src/uplink/schema.rs @@ -0,0 +1,20 @@ +use std::convert::Infallible; +use std::str::FromStr; + +/// Represents the new state of a schema after an update. +#[derive(Eq, PartialEq, Debug)] +pub(crate) struct SchemaState { + pub(crate) sdl: String, + pub(crate) launch_id: Option, +} + +impl FromStr for SchemaState { + type Err = Infallible; + + fn from_str(s: &str) -> Result { + Ok(Self { + sdl: s.to_string(), + launch_id: None, + }) + } +} diff --git a/apollo-router/src/uplink/schema_stream.rs b/apollo-router/src/uplink/schema_stream.rs index ee1dcbda27..376f600e74 100644 --- a/apollo-router/src/uplink/schema_stream.rs +++ b/apollo-router/src/uplink/schema_stream.rs @@ -5,6 +5,7 @@ use graphql_client::GraphQLQuery; +use super::schema::SchemaState; use crate::uplink::schema_stream::supergraph_sdl_query::FetchErrorCode; use crate::uplink::schema_stream::supergraph_sdl_query::SupergraphSdlQueryRouterConfig; use crate::uplink::UplinkRequest; @@ -63,6 +64,41 @@ impl From for UplinkResponse { } } +impl From for UplinkResponse { + fn from(response: supergraph_sdl_query::ResponseData) -> Self { + match response.router_config { + SupergraphSdlQueryRouterConfig::RouterConfigResult(result) => UplinkResponse::New { + response: SchemaState { + sdl: result.supergraph_sdl, + launch_id: Some(result.id.clone()), + }, + id: result.id, + // this will truncate the number of seconds to under u64::MAX, which should be + // a large enough delay anyway + delay: result.min_delay_seconds as u64, + }, + SupergraphSdlQueryRouterConfig::Unchanged(response) => UplinkResponse::Unchanged { + id: Some(response.id), + delay: Some(response.min_delay_seconds as u64), + }, + SupergraphSdlQueryRouterConfig::FetchError(err) => UplinkResponse::Error { + retry_later: err.code == FetchErrorCode::RETRY_LATER, + code: match err.code { + FetchErrorCode::AUTHENTICATION_FAILED => "AUTHENTICATION_FAILED".to_string(), + FetchErrorCode::ACCESS_DENIED => "ACCESS_DENIED".to_string(), + FetchErrorCode::UNKNOWN_REF => "UNKNOWN_REF".to_string(), + FetchErrorCode::RETRY_LATER => "RETRY_LATER".to_string(), + FetchErrorCode::NOT_IMPLEMENTED_ON_THIS_INSTANCE => { + "NOT_IMPLEMENTED_ON_THIS_INSTANCE".to_string() + } + FetchErrorCode::Other(other) => other, + }, + message: err.message, + }, + } + } +} + #[cfg(test)] mod test { use std::str::FromStr; diff --git a/apollo-router/tests/integration/batching.rs b/apollo-router/tests/integration/batching.rs index 7998a21528..15dfd38de2 100644 --- a/apollo-router/tests/integration/batching.rs +++ b/apollo-router/tests/integration/batching.rs @@ -445,7 +445,7 @@ async fn it_handles_single_request_cancelled_by_rhai() -> Result<(), BoxError> { assert_eq!( request.query, Some(format!( - "query op{index}__b__0{{entryB(count:{REQUEST_COUNT}){{index}}}}", + "query op{index}__b__0 {{ entryB(count: {REQUEST_COUNT}) {{ index }} }}", )) ); } @@ -683,7 +683,7 @@ async fn it_handles_single_request_cancelled_by_coprocessor() -> Result<(), BoxE assert_eq!( request.query, Some(format!( - "query op{index}__a__0{{entryA(count:{REQUEST_COUNT}){{index}}}}", + "query op{index}__a__0 {{ entryA(count: {REQUEST_COUNT}) {{ index }} }}", )) ); } @@ -785,7 +785,7 @@ async fn it_handles_single_invalid_graphql() -> Result<(), BoxError> { assert_eq!( request.query, Some(format!( - "query op{index}__a__0{{entryA(count:{REQUEST_COUNT}){{index}}}}", + "query op{index}__a__0 {{ entryA(count: {REQUEST_COUNT}) {{ index }} }}", )) ); } @@ -927,7 +927,7 @@ mod helper { // Extract info about this operation let (subgraph, count): (String, usize) = { - let re = regex::Regex::new(r"entry([AB])\(count:([0-9]+)\)").unwrap(); + let re = regex::Regex::new(r"entry([AB])\(count: ?([0-9]+)\)").unwrap(); let captures = re.captures(requests[0].query.as_ref().unwrap()).unwrap(); (captures[1].to_string(), captures[2].parse().unwrap()) @@ -943,7 +943,7 @@ mod helper { assert_eq!( request.query, Some(format!( - "query op{index}__{}__0{{entry{}(count:{count}){{index}}}}", + "query op{index}__{}__0 {{ entry{}(count: {count}) {{ index }} }}", subgraph.to_lowercase(), subgraph )) @@ -971,7 +971,7 @@ mod helper { // Extract info about this operation let (subgraph, count): (String, usize) = { - let re = regex::Regex::new(r"entry([AB])\(count:([0-9]+)\)").unwrap(); + let re = regex::Regex::new(r"entry([AB])\(count: ?([0-9]+)\)").unwrap(); let captures = re.captures(requests[0].query.as_ref().unwrap()).unwrap(); (captures[1].to_string(), captures[2].parse().unwrap()) @@ -1010,7 +1010,7 @@ mod helper { // Extract info about this operation let (_, count): (String, usize) = { - let re = regex::Regex::new(r"entry([AB])\(count:([0-9]+)\)").unwrap(); + let re = regex::Regex::new(r"entry([AB])\(count: ?([0-9]+)\)").unwrap(); let captures = re.captures(requests[0].query.as_ref().unwrap()).unwrap(); (captures[1].to_string(), captures[2].parse().unwrap()) diff --git a/apollo-router/tests/integration/coprocessor.rs b/apollo-router/tests/integration/coprocessor.rs index d1492fbc87..d9ce741892 100644 --- a/apollo-router/tests/integration/coprocessor.rs +++ b/apollo-router/tests/integration/coprocessor.rs @@ -83,6 +83,127 @@ async fn test_coprocessor_limit_payload() -> Result<(), BoxError> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn test_coprocessor_response_handling() -> Result<(), BoxError> { + if !graph_os_enabled() { + return Ok(()); + } + test_full_pipeline(400, "RouterRequest", empty_body_string).await; + test_full_pipeline(200, "RouterResponse", empty_body_string).await; + test_full_pipeline(500, "SupergraphRequest", empty_body_string).await; + test_full_pipeline(500, "SupergraphResponse", empty_body_string).await; + test_full_pipeline(200, "SubgraphRequest", empty_body_string).await; + test_full_pipeline(200, "SubgraphResponse", empty_body_string).await; + test_full_pipeline(500, "ExecutionRequest", empty_body_string).await; + test_full_pipeline(500, "ExecutionResponse", empty_body_string).await; + + test_full_pipeline(500, "RouterRequest", empty_body_object).await; + test_full_pipeline(500, "RouterResponse", empty_body_object).await; + test_full_pipeline(200, "SupergraphRequest", empty_body_object).await; + test_full_pipeline(200, "SupergraphResponse", empty_body_object).await; + test_full_pipeline(200, "SubgraphRequest", empty_body_object).await; + test_full_pipeline(200, "SubgraphResponse", empty_body_object).await; + test_full_pipeline(200, "ExecutionRequest", empty_body_object).await; + test_full_pipeline(200, "ExecutionResponse", empty_body_object).await; + + test_full_pipeline(200, "RouterRequest", remove_body).await; + test_full_pipeline(200, "RouterResponse", remove_body).await; + test_full_pipeline(200, "SupergraphRequest", remove_body).await; + test_full_pipeline(200, "SupergraphResponse", remove_body).await; + test_full_pipeline(200, "SubgraphRequest", remove_body).await; + test_full_pipeline(200, "SubgraphResponse", remove_body).await; + test_full_pipeline(200, "ExecutionRequest", remove_body).await; + test_full_pipeline(200, "ExecutionResponse", remove_body).await; + + test_full_pipeline(500, "RouterRequest", null_out_response).await; + test_full_pipeline(500, "RouterResponse", null_out_response).await; + test_full_pipeline(500, "SupergraphRequest", null_out_response).await; + test_full_pipeline(500, "SupergraphResponse", null_out_response).await; + test_full_pipeline(200, "SubgraphRequest", null_out_response).await; + test_full_pipeline(200, "SubgraphResponse", null_out_response).await; + test_full_pipeline(500, "ExecutionRequest", null_out_response).await; + test_full_pipeline(500, "ExecutionResponse", null_out_response).await; + Ok(()) +} + +fn empty_body_object(mut body: serde_json::Value) -> serde_json::Value { + *body + .as_object_mut() + .expect("body") + .get_mut("body") + .expect("body") = serde_json::Value::Object(serde_json::Map::new()); + body +} + +fn empty_body_string(mut body: serde_json::Value) -> serde_json::Value { + *body + .as_object_mut() + .expect("body") + .get_mut("body") + .expect("body") = serde_json::Value::String("".to_string()); + body +} + +fn remove_body(mut body: serde_json::Value) -> serde_json::Value { + body.as_object_mut().expect("body").remove("body"); + body +} + +fn null_out_response(_body: serde_json::Value) -> serde_json::Value { + serde_json::Value::String("".to_string()) +} + +async fn test_full_pipeline( + response_status: u16, + stage: &'static str, + coprocessor: impl Fn(serde_json::Value) -> serde_json::Value + Send + Sync + 'static, +) { + let mock_server = wiremock::MockServer::start().await; + let coprocessor_address = mock_server.uri(); + + // Expect a small query + Mock::given(method("POST")) + .and(path("/")) + .respond_with(move |req: &wiremock::Request| { + let mut body = req.body_json::().expect("body"); + if body + .as_object() + .unwrap() + .get("stage") + .unwrap() + .as_str() + .unwrap() + == stage + { + body = coprocessor(body); + } + ResponseTemplate::new(200).set_body_json(body) + }) + .mount(&mock_server) + .await; + + let mut router = IntegrationTest::builder() + .config( + include_str!("fixtures/coprocessor.router.yaml") + .replace("", &coprocessor_address), + ) + .build() + .await; + + router.start().await; + router.assert_started().await; + + let (_trace_id, response) = router.execute_default_query().await; + assert_eq!( + response.status(), + response_status, + "Failed at stage {}", + stage + ); + + router.graceful_shutdown().await; +} + #[tokio::test(flavor = "multi_thread")] async fn test_coprocessor_demand_control_access() -> Result<(), BoxError> { if !graph_os_enabled() { diff --git a/apollo-router/tests/integration/file_upload.rs b/apollo-router/tests/integration/file_upload.rs index fd272f9d28..d179f39b66 100644 --- a/apollo-router/tests/integration/file_upload.rs +++ b/apollo-router/tests/integration/file_upload.rs @@ -68,14 +68,14 @@ async fn it_uploads_file_to_subgraph() -> Result<(), BoxError> { assert_eq!(operations_field.name(), Some("operations")); let operations: helper::Operation = serde_json::from_slice(&operations_field.bytes().await.unwrap()).unwrap(); - insta::assert_json_snapshot!(operations, @r#" + insta::assert_json_snapshot!(operations, @r###" { - "query": "mutation SomeMutation__uploads__0($file:Upload){file:singleUpload(file:$file){filename body}}", + "query": "mutation SomeMutation__uploads__0($file: Upload) { file: singleUpload(file: $file) { filename body } }", "variables": { "file": null } } - "#); + "###); let map_field = multipart .next_field() diff --git a/apollo-router/tests/integration/fixtures/coprocessor.router.yaml b/apollo-router/tests/integration/fixtures/coprocessor.router.yaml new file mode 100644 index 0000000000..a408b3a203 --- /dev/null +++ b/apollo-router/tests/integration/fixtures/coprocessor.router.yaml @@ -0,0 +1,24 @@ +# This coprocessor doesn't point to anything +coprocessor: + url: "" + router: + request: + body: true + response: + body: true + supergraph: + request: + body: true + response: + body: true + subgraph: + all: + request: + body: true + response: + body: true + execution: + request: + body: true + response: + body: true \ No newline at end of file diff --git a/apollo-router/tests/integration/fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml b/apollo-router/tests/integration/fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml index 14136a0268..6f24cebc9a 100644 --- a/apollo-router/tests/integration/fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml +++ b/apollo-router/tests/integration/fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml @@ -8,4 +8,5 @@ supergraph: - redis://localhost:6379 ttl: 10s experimental_reuse_query_fragments: true - + generate_query_fragments: false +experimental_query_planner_mode: legacy \ No newline at end of file diff --git a/apollo-router/tests/integration/query_planner.rs b/apollo-router/tests/integration/query_planner.rs index 5bf0ca799c..d2834bfd84 100644 --- a/apollo-router/tests/integration/query_planner.rs +++ b/apollo-router/tests/integration/query_planner.rs @@ -10,6 +10,7 @@ const LEGACY_QP: &str = "experimental_query_planner_mode: legacy"; const NEW_QP: &str = "experimental_query_planner_mode: new"; const BOTH_QP: &str = "experimental_query_planner_mode: both"; const BOTH_BEST_EFFORT_QP: &str = "experimental_query_planner_mode: both_best_effort"; +const NEW_BEST_EFFORT_QP: &str = "experimental_query_planner_mode: new_best_effort"; #[tokio::test(flavor = "multi_thread")] async fn fed1_schema_with_legacy_qp() { @@ -81,6 +82,27 @@ async fn fed1_schema_with_both_best_effort_qp() { router.graceful_shutdown().await; } +#[tokio::test(flavor = "multi_thread")] +async fn fed1_schema_with_new_best_effort_qp() { + let mut router = IntegrationTest::builder() + .config(NEW_BEST_EFFORT_QP) + .supergraph("../examples/graphql/supergraph-fed1.graphql") + .build() + .await; + router.start().await; + router + .assert_log_contains( + "Falling back to the legacy query planner: \ + failed to initialize the query planner: \ + Supergraphs composed with federation version 1 are not supported. \ + Please recompose your supergraph with federation version 2 or greater", + ) + .await; + router.assert_started().await; + router.execute_default_query().await; + router.graceful_shutdown().await; +} + #[tokio::test(flavor = "multi_thread")] async fn fed1_schema_with_legacy_qp_reload_to_new_keep_previous_config() { let config = format!("{PROMETHEUS_METRICS_CONFIG}\n{LEGACY_QP}"); diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index 955cbc9290..f5e3da3438 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -51,7 +51,10 @@ async fn query_planner_cache() -> Result<(), BoxError> { } // If this test fails and the cache key format changed you'll need to update the key here. // Look at the top of the file for instructions on getting the new cache key. - let known_cache_key = "plan:cache:1:federation:v2.9.3:70f115ebba5991355c17f4f56ba25bb093c519c4db49a30f3b10de279a4e3fa4:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:1cfc840090ac76a98f8bd51442f41fd6ca4c8d918b3f8d87894170745acf0734"; + let known_cache_key = &format!( + "plan:router:{}:8c0b4bfb4630635c2b5748c260d686ddb301d164e5818c63d6d9d77e13631676:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:924b36a9ae6af4ff198220b1302b14b6329c4beb7c022fd31d6fef82eaad7ccb", + env!("CARGO_PKG_VERSION") + ); let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap(); let client = RedisClient::new(config, None, None, None); @@ -448,14 +451,15 @@ async fn entity_cache_basic() -> Result<(), BoxError> { .unwrap(); insta::assert_json_snapshot!(response); + // if this is failing due to a cache key change, hook up redis-cli with the MONITOR command to see the keys being set let s:String = client - .get("version:1.0:subgraph:products:type:Query:hash:0b4d791a3403d76643db0a9e4a8d304b1cd1f8c4ab68cb58ab7ccdc116a1da1c:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:products:type:Query:hash:5e8ac155fe1fb5b3b69292f89b7df818a39d88a3bf77031a6bd60c22eeb4b242:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap()); - let s: String = client.get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:04c47a3b857394fb0feef5b999adc073b8ab7416e3bc871f54c0b885daae8359:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c").await.unwrap(); + let s: String = client.get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:50354623eb0a347d47a62f002fae74c0f579ee693af1fdb9a1e4744b4723dd2c:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c").await.unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap()); @@ -567,7 +571,7 @@ async fn entity_cache_basic() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("version:1.0:subgraph:reviews:type:Product:entity:d9a4cd73308dd13ca136390c10340823f94c335b9da198d2339c886c738abf0d:hash:04c47a3b857394fb0feef5b999adc073b8ab7416e3bc871f54c0b885daae8359:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:reviews:type:Product:entity:d9a4cd73308dd13ca136390c10340823f94c335b9da198d2339c886c738abf0d:hash:50354623eb0a347d47a62f002fae74c0f579ee693af1fdb9a1e4744b4723dd2c:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -796,7 +800,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("version:1.0:subgraph:products:type:Query:hash:0b4d791a3403d76643db0a9e4a8d304b1cd1f8c4ab68cb58ab7ccdc116a1da1c:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:products:type:Query:hash:5e8ac155fe1fb5b3b69292f89b7df818a39d88a3bf77031a6bd60c22eeb4b242:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -817,7 +821,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { ); let s: String = client - .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:04c47a3b857394fb0feef5b999adc073b8ab7416e3bc871f54c0b885daae8359:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:50354623eb0a347d47a62f002fae74c0f579ee693af1fdb9a1e4744b4723dd2c:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -861,7 +865,7 @@ async fn entity_cache_authorization() -> Result<(), BoxError> { insta::assert_json_snapshot!(response); let s:String = client - .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:f7d6d3af2706afe346e3d5fd353e61bd186d2fc64cb7b3c13a62162189519b5f:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("version:1.0:subgraph:reviews:type:Product:entity:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:hash:2253830e3b366dcfdfa4e1acf6afa9e05d3c80ff50171243768a3e416536c89b:data:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -973,10 +977,24 @@ async fn connection_failure_blocks_startup() { #[tokio::test(flavor = "multi_thread")] async fn query_planner_redis_update_query_fragments() { + // If this test fails and the cache key format changed you'll need to update + // the key here. Look at the top of the file for instructions on getting + // the new cache key. + // + // You first need to follow the process and update the key in + // `test_redis_query_plan_config_update`, and then update the key in this + // test. + // + // This test requires graphos license, so make sure you have + // "TEST_APOLLO_KEY" and "TEST_APOLLO_GRAPH_REF" env vars set, otherwise the + // test just passes locally. test_redis_query_plan_config_update( // This configuration turns the fragment generation option *off*. include_str!("fixtures/query_planner_redis_config_update_query_fragments.router.yaml"), - "plan:cache:1:federation:v2.9.3:e15b4f5cd51b8cc728e3f5171611073455601e81196cd3cbafc5610d9769a370:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:0ade8e18db172d9d51b36a2112513c15032d103100644df418a50596de3adfba", + &format!( + "plan:router:{}:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:28acec2bebc3922cd261ed3c8a13b26d53b49e891797a199e3e1ce8089e813e6", + env!("CARGO_PKG_VERSION") + ), ) .await; } @@ -1006,7 +1024,10 @@ async fn query_planner_redis_update_defer() { // test just passes locally. test_redis_query_plan_config_update( include_str!("fixtures/query_planner_redis_config_update_defer.router.yaml"), - "plan:cache:1:federation:v2.9.3:e15b4f5cd51b8cc728e3f5171611073455601e81196cd3cbafc5610d9769a370:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:066f41523274aed2428e0f08c9de077ee748a1d8470ec31edb5224030a198f3b", + &format!( + "plan:router:{}:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:af3139ddd647c755d2eab5e6a177dc443030a528db278c19ad7b45c5c0324378", + env!("CARGO_PKG_VERSION") + ), ) .await; } @@ -1028,11 +1049,15 @@ async fn query_planner_redis_update_type_conditional_fetching() { include_str!( "fixtures/query_planner_redis_config_update_type_conditional_fetching.router.yaml" ), - "plan:cache:1:federation:v2.9.3:e15b4f5cd51b8cc728e3f5171611073455601e81196cd3cbafc5610d9769a370:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:b31d320db1af4015998cc89027f0ede2305dcc61724365e9b76d4252f90c7677", + &format!( + "plan:router:{}:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:8b87abe2e45d38df4712af966aa540f33dbab6fc2868a409f2dbb6a5a4fb2d08", + env!("CARGO_PKG_VERSION") + ), ) .await; } +// TODO drop this test once we remove the JS QP #[tokio::test(flavor = "multi_thread")] async fn query_planner_redis_update_reuse_query_fragments() { // If this test fails and the cache key format changed you'll need to update @@ -1050,7 +1075,10 @@ async fn query_planner_redis_update_reuse_query_fragments() { include_str!( "fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml" ), - "plan:cache:1:federation:v2.9.3:e15b4f5cd51b8cc728e3f5171611073455601e81196cd3cbafc5610d9769a370:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:d54414eeede3a1bf631d88a84a1e3a354683be87746e79a69769cf18d919cc01", + &format!( + "plan:router:{}:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:9af18c8afd568c197050fc1a60c52a8c98656f1775016110516fabfbedc135fe", + env!("CARGO_PKG_VERSION") + ), ) .await; } @@ -1074,7 +1102,11 @@ async fn test_redis_query_plan_config_update(updated_config: &str, new_cache_key router.assert_started().await; router.clear_redis_cache().await; - let starting_key = "plan:cache:1:federation:v2.9.3:e15b4f5cd51b8cc728e3f5171611073455601e81196cd3cbafc5610d9769a370:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:1cfc840090ac76a98f8bd51442f41fd6ca4c8d918b3f8d87894170745acf0734"; + // If the tests above are failing, this is the key that needs to be changed first. + let starting_key = &format!( + "plan:router:{}:5938623f2155169070684a48be1e0b8468d0f2c662b5527a2247f683173f7d05:opname:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:metadata:924b36a9ae6af4ff198220b1302b14b6329c4beb7c022fd31d6fef82eaad7ccb", + env!("CARGO_PKG_VERSION") + ); assert_ne!(starting_key, new_cache_key, "starting_key (cache key for the initial config) and new_cache_key (cache key with the updated config) should not be equal. This either means that the cache key is not being generated correctly, or that the test is not actually checking the updated key."); router.execute_default_query().await; diff --git a/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap b/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap index d7330676f2..7f7fd862db 100644 --- a/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap +++ b/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__query_planner_cache.snap @@ -6,14 +6,14 @@ expression: query_plan "kind": "Fetch", "serviceName": "products", "variableUsages": [], - "operation": "{topProducts{name name2:name}}", + "operation": "{ topProducts { name name2: name } }", "operationName": null, "operationKind": "query", "id": null, "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "d38dcce02eea33b3834447eefedabb09d3b14f3b01ad512e881f9e65137f0565", + "schemaAwareHash": "b86f4d9d705538498ec90551f9d90f9eee4386be36ad087638932dad3f44bf66", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/integration/typename.rs b/apollo-router/tests/integration/typename.rs index 782e90adb6..15b2363503 100644 --- a/apollo-router/tests/integration/typename.rs +++ b/apollo-router/tests/integration/typename.rs @@ -106,11 +106,7 @@ async fn aliased() { "###); } -// FIXME: bellow test panic because of bug in query planner, failing with: -// "value retrieval failed: empty query plan. This behavior is unexpected and we suggest opening an issue to apollographql/router with a reproduction." -// See: https://github.com/apollographql/router/issues/6154 #[tokio::test] -#[should_panic] async fn inside_inline_fragment() { let request = Request::fake_builder() .query("{ ... { __typename } }") @@ -120,14 +116,13 @@ async fn inside_inline_fragment() { insta::assert_json_snapshot!(response, @r###" { "data": { - "n": "MyQuery" + "__typename": "MyQuery" } } "###); } #[tokio::test] -#[should_panic] // See above FIXME async fn inside_fragment() { let query = r#" { ...SomeFragment } @@ -141,14 +136,13 @@ async fn inside_fragment() { insta::assert_json_snapshot!(response, @r###" { "data": { - "n": "MyQuery" + "__typename": "MyQuery" } } "###); } #[tokio::test] -#[should_panic] // See above FIXME async fn deeply_nested_inside_fragments() { let query = r#" { ...SomeFragment } @@ -168,7 +162,7 @@ async fn deeply_nested_inside_fragments() { insta::assert_json_snapshot!(response, @r###" { "data": { - "n": "MyQuery" + "__typename": "MyQuery" } } "###); diff --git a/apollo-router/tests/integration_tests.rs b/apollo-router/tests/integration_tests.rs index 378a20ddd5..4346ba2f71 100644 --- a/apollo-router/tests/integration_tests.rs +++ b/apollo-router/tests/integration_tests.rs @@ -8,6 +8,7 @@ use std::ffi::OsStr; use std::sync::Arc; use std::sync::Mutex; +use apollo_router::_private::create_test_service_factory_from_yaml; use apollo_router::graphql; use apollo_router::plugin::Plugin; use apollo_router::plugin::PluginInit; @@ -17,7 +18,6 @@ use apollo_router::services::supergraph; use apollo_router::test_harness::mocks::persisted_queries::*; use apollo_router::Configuration; use apollo_router::Context; -use apollo_router::_private::create_test_service_factory_from_yaml; use futures::StreamExt; use http::header::ACCEPT; use http::header::CONTENT_TYPE; @@ -401,137 +401,6 @@ async fn automated_persisted_queries() { assert_eq!(registry.totals(), expected_service_hits); } -#[tokio::test(flavor = "multi_thread")] -async fn persisted_queries() { - use hyper::header::HeaderValue; - use serde_json::json; - - /// Construct a persisted query request from an ID. - fn pq_request(persisted_query_id: &str) -> router::Request { - supergraph::Request::fake_builder() - .extension( - "persistedQuery", - json!({ - "version": 1, - "sha256Hash": persisted_query_id - }), - ) - .build() - .expect("expecting valid request") - .try_into() - .expect("could not convert supergraph::Request to router::Request") - } - - // set up a PQM with one query - const PERSISTED_QUERY_ID: &str = "GetMyNameID"; - const PERSISTED_QUERY_BODY: &str = "query GetMyName { me { name } }"; - let expected_data = serde_json_bytes::json!({ - "me": { - "name": "Ada Lovelace" - } - }); - - let (_mock_guard, uplink_config) = mock_pq_uplink( - &hashmap! { PERSISTED_QUERY_ID.to_string() => PERSISTED_QUERY_BODY.to_string() }, - ) - .await; - - let config = serde_json::json!({ - "persisted_queries": { - "enabled": true - }, - "apq": { - "enabled": false - } - }); - - let mut config: Configuration = serde_json::from_value(config).unwrap(); - config.uplink = Some(uplink_config); - let (router, registry) = setup_router_and_registry_with_config(config).await.unwrap(); - - // Successfully run a persisted query. - let actual = query_with_router(router.clone(), pq_request(PERSISTED_QUERY_ID)).await; - assert!(actual.errors.is_empty()); - assert_eq!(actual.data.as_ref(), Some(&expected_data)); - assert_eq!(registry.totals(), hashmap! {"accounts".to_string() => 1}); - - // Error on unpersisted query. - const UNKNOWN_QUERY_ID: &str = "unknown_query"; - const UNPERSISTED_QUERY_BODY: &str = "query GetYourName { you: me { name } }"; - let expected_data = serde_json_bytes::json!({ - "you": { - "name": "Ada Lovelace" - } - }); - let actual = query_with_router(router.clone(), pq_request(UNKNOWN_QUERY_ID)).await; - assert_eq!( - actual.errors, - vec![apollo_router::graphql::Error::builder() - .message(format!( - "Persisted query '{UNKNOWN_QUERY_ID}' not found in the persisted query list" - )) - .extension_code("PERSISTED_QUERY_NOT_IN_LIST") - .build()] - ); - assert_eq!(actual.data, None); - assert_eq!(registry.totals(), hashmap! {"accounts".to_string() => 1}); - - // We didn't break normal GETs. - let actual = query_with_router( - router.clone(), - supergraph::Request::fake_builder() - .query(UNPERSISTED_QUERY_BODY) - .method(Method::GET) - .build() - .unwrap() - .try_into() - .unwrap(), - ) - .await; - assert!(actual.errors.is_empty()); - assert_eq!(actual.data.as_ref(), Some(&expected_data)); - assert_eq!(registry.totals(), hashmap! {"accounts".to_string() => 2}); - - // We didn't break normal POSTs. - let actual = query_with_router( - router.clone(), - supergraph::Request::fake_builder() - .query(UNPERSISTED_QUERY_BODY) - .method(Method::POST) - .build() - .unwrap() - .try_into() - .unwrap(), - ) - .await; - assert!(actual.errors.is_empty()); - assert_eq!(actual.data, Some(expected_data)); - assert_eq!(registry.totals(), hashmap! {"accounts".to_string() => 3}); - - // Proper error when sending malformed request body - let actual = query_with_router( - router.clone(), - http::Request::builder() - .uri("http://default") - .method(Method::POST) - .header( - CONTENT_TYPE, - HeaderValue::from_static(APPLICATION_JSON.essence_str()), - ) - .body(router::Body::empty()) - .unwrap() - .into(), - ) - .await; - assert_eq!(actual.errors.len(), 1); - - assert_eq!(actual.errors[0].message, "Invalid GraphQL request"); - assert_eq!( - actual.errors[0].extensions["code"], - "INVALID_GRAPHQL_REQUEST" - ); -} - #[tokio::test(flavor = "multi_thread")] async fn missing_variables() { let request = supergraph::Request::fake_builder() diff --git a/apollo-router/tests/samples/README.md b/apollo-router/tests/samples/README.md index c37c65e06b..28aa1a149c 100644 --- a/apollo-router/tests/samples/README.md +++ b/apollo-router/tests/samples/README.md @@ -4,7 +4,7 @@ This folder contains a series of Router integration tests that can be defined en ## How to write a test -One test is recognized as a folder containing a `plan.json` file. Any number of subfolders is accepted, and the test name will be the path to the test folder. If the folder contains a `README.md` file, it will be added to the captured output of the test, and displayed if the test failed. +One test is recognized as a folder containing a `plan.json` (or `plan.yaml`) file. Any number of subfolders is accepted, and the test name will be the path to the test folder. If the folder contains a `README.md` file, it will be added to the captured output of the test, and displayed if the test failed. The `plan.json` file contains a top level JSON object with an `actions` field, containing an array of possible actions, that will be executed one by one: @@ -119,4 +119,25 @@ Stops the Router. If the Router does not stop correctly, then this action will f { "type": "Stop" } -``` \ No newline at end of file +``` + +## Troubleshooting + +### Query planning related + +When execution does something unexpected, checking the generated query plan can help. +Make sure the YAML Router configuration enables the _expose query plan_ plugin: + +```yaml +plugins: + experimental.expose_query_plan: true +``` + +In a `"type": "Request"` step of `plan.json`, temporarily add the header to ask +for the response to include `extensions.apolloQueryPlan`: + +```json +"headers": { + "Apollo-Expose-Query-Plan": "true" +}, +``` diff --git a/apollo-router/tests/samples/basic/interface-object/plan.json b/apollo-router/tests/samples/basic/interface-object/plan.json index 91a5690a0c..f50fbc4589 100644 --- a/apollo-router/tests/samples/basic/interface-object/plan.json +++ b/apollo-router/tests/samples/basic/interface-object/plan.json @@ -10,7 +10,7 @@ { "request": { "body": { - "query": "query TestItf__accounts__0{i{__typename id x ...on A{a}...on B{b}}}", + "query": "query TestItf__accounts__0 { i { __typename id x ... on A { a } ... on B { b } } }", "operationName": "TestItf__accounts__0" } }, @@ -55,7 +55,7 @@ { "request": { "body": { - "query": "query TestItf__products__1($representations:[_Any!]!){_entities(representations:$representations){...on I{y}}}", + "query": "query TestItf__products__1($representations: [_Any!]!) { _entities(representations: $representations) { ... on I { y } } }", "operationName": "TestItf__products__1", "variables": { "representations": [ @@ -143,7 +143,7 @@ { "request": { "body": { - "query": "query TestItf2__accounts__0{req{__typename id i{__typename id x}}}", + "query": "query TestItf2__accounts__0 { req { __typename id i { __typename id x } } }", "operationName": "TestItf2__accounts__0" } }, @@ -170,7 +170,7 @@ { "request": { "body": { - "query": "query TestItf2__products__1($representations:[_Any!]!){_entities(representations:$representations){...on I{y}}}", + "query": "query TestItf2__products__1($representations: [_Any!]!) { _entities(representations: $representations) { ... on I { y } } }", "operationName": "TestItf2__products__1", "variables": { "representations": [ @@ -201,7 +201,7 @@ { "request": { "body": { - "query": "query TestItf2__reviews__2($representations:[_Any!]!){_entities(representations:$representations){...on C{c}}}", + "query": "query TestItf2__reviews__2($representations: [_Any!]!) { _entities(representations: $representations) { ... on C { c } } }", "operationName": "TestItf2__reviews__2", "variables": { "representations": [ diff --git a/apollo-router/tests/samples/core/defer/plan.json b/apollo-router/tests/samples/core/defer/plan.json index 72dd5efed0..5ea73b76e1 100644 --- a/apollo-router/tests/samples/core/defer/plan.json +++ b/apollo-router/tests/samples/core/defer/plan.json @@ -10,7 +10,7 @@ { "request": { "body": { - "query": "{me{__typename name id}}" + "query": "{ me { __typename name id } }" } }, "response": { @@ -32,7 +32,7 @@ { "request": { "body": { - "query": "query($representations:[_Any!]!){_entities(representations:$representations){..._generated_onUser1_0}}fragment _generated_onUser1_0 on User{reviews{body}}", + "query": "query($representations: [_Any!]!) { _entities(representations: $representations) { ..._generated_onUser1_0 } } fragment _generated_onUser1_0 on User { reviews { body } }", "variables": { "representations": [ { @@ -103,4 +103,4 @@ "type": "Stop" } ] -} +} \ No newline at end of file diff --git a/apollo-router/tests/samples/core/query1/plan.json b/apollo-router/tests/samples/core/query1/plan.json index d8fe2c400d..d451236d33 100644 --- a/apollo-router/tests/samples/core/query1/plan.json +++ b/apollo-router/tests/samples/core/query1/plan.json @@ -9,7 +9,7 @@ "requests": [ { "request": { - "body": {"query":"{me{name}}"} + "body": {"query":"{ me { name } }"} }, "response": { "body": {"data": { "me": { "name": "test" } } } @@ -17,7 +17,7 @@ }, { "request": { - "body": {"query":"{me{nom:name}}"} + "body": {"query":"{ me { nom: name } }"} }, "response": { "body": {"data": { "me": { "nom": "test" } } } diff --git a/apollo-router/tests/samples/enterprise/entity-cache/defer/plan.json b/apollo-router/tests/samples/enterprise/entity-cache/defer/plan.json index 83a777d329..eb223d567a 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/defer/plan.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/defer/plan.json @@ -12,7 +12,7 @@ { "request": { "body": { - "query": "query CacheDefer__cache_defer_accounts__0{me{__typename name id}}", + "query": "query CacheDefer__cache_defer_accounts__0 { me { __typename name id } }", "operationName": "CacheDefer__cache_defer_accounts__0" } }, @@ -39,7 +39,7 @@ { "request": { "body": { - "query": "query CacheDefer__cache_defer_reviews__1($representations:[_Any!]!){_entities(representations:$representations){..._generated_onUser1_0}}fragment _generated_onUser1_0 on User{reviews{body}}", + "query": "query CacheDefer__cache_defer_reviews__1($representations: [_Any!]!) { _entities(representations: $representations) { ..._generated_onUser1_0 } } fragment _generated_onUser1_0 on User { reviews { body } }", "operationName": "CacheDefer__cache_defer_reviews__1", "variables": { "representations": [ @@ -110,4 +110,4 @@ "type": "Stop" } ] -} +} \ No newline at end of file diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json index c8588ed2b7..fd55c50c8e 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-entity-key/plan.json @@ -12,7 +12,7 @@ { "request": { "body": { - "query":"query InvalidationEntityKey__invalidation_entity_key_products__0{topProducts{__typename upc}}", + "query":"query InvalidationEntityKey__invalidation_entity_key_products__0 { topProducts { __typename upc } }", "operationName": "InvalidationEntityKey__invalidation_entity_key_products__0" } }, @@ -31,7 +31,7 @@ { "request": { "body": { - "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations:[_Any!]!){_entities(representations:$representations){..._generated_onProduct1_0}}fragment _generated_onProduct1_0 on Product{reviews{body}}", + "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations: [_Any!]!) { _entities(representations: $representations) { ..._generated_onProduct1_0 } } fragment _generated_onProduct1_0 on Product { reviews { body } }", "operationName": "InvalidationEntityKey__invalidation_entity_key_reviews__1", "variables":{"representations":[{"upc":"0","__typename":"Product"},{"upc":"1","__typename":"Product"}]} } @@ -90,7 +90,8 @@ { "request": { "body": { - "query":"mutation InvalidationEntityKey__invalidation_entity_key_reviews__0{invalidateProductReview}" + "query":"mutation InvalidationEntityKey__invalidation_entity_key_reviews__0 { invalidateProductReview }", + "operationName": "InvalidationEntityKey__invalidation_entity_key_reviews__0" } }, "response": { @@ -115,7 +116,8 @@ { "request": { "body": { - "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations:[_Any!]!){_entities(representations:$representations){..._generated_onProduct1_0}}fragment _generated_onProduct1_0 on Product{reviews{body}}", + "query": "query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations: [_Any!]!) { _entities(representations: $representations) { ..._generated_onProduct1_0 } } fragment _generated_onProduct1_0 on Product { reviews { body } }", + "operationName": "InvalidationEntityKey__invalidation_entity_key_reviews__1", "variables":{"representations":[{"upc":"1","__typename":"Product"}]} } }, @@ -202,7 +204,8 @@ { "request": { "body": { - "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations:[_Any!]!){_entities(representations:$representations){..._generated_onProduct1_0}}fragment _generated_onProduct1_0 on Product{reviews{body}}", + "query":"query InvalidationEntityKey__invalidation_entity_key_reviews__1($representations: [_Any!]!) { _entities(representations: $representations) { ..._generated_onProduct1_0 } } fragment _generated_onProduct1_0 on Product { reviews { body } }", + "operationName": "InvalidationEntityKey__invalidation_entity_key_reviews__1", "variables":{"representations":[{"upc":"1","__typename":"Product"}]} } }, diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/plan.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/plan.json index 9bbbd1d90c..b60d008b38 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/plan.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-name/plan.json @@ -11,7 +11,7 @@ "requests": [ { "request": { - "body": {"query":"query InvalidationSubgraphName__invalidation_subgraph_name_accounts__0{me{name}}"} + "body": {"query":"query InvalidationSubgraphName__invalidation_subgraph_name_accounts__0 { me { name } }"} }, "response": { "headers": { @@ -45,7 +45,7 @@ "requests": [ { "request": { - "body": {"query":"mutation InvalidationSubgraphName__invalidation_subgraph_name_accounts__0{updateMyAccount{name}}"} + "body": {"query":"mutation InvalidationSubgraphName__invalidation_subgraph_name_accounts__0 { updateMyAccount { name } }"} }, "response": { "headers": { @@ -99,7 +99,7 @@ "requests": [ { "request": { - "body": {"query":"query InvalidationSubgraphName__invalidation_subgraph_name_accounts__0{me{name}}"} + "body": {"query":"query InvalidationSubgraphName__invalidation_subgraph_name_accounts__0 { me { name } }"} }, "response": { "headers": { diff --git a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/plan.json b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/plan.json index 72e39a7b80..5c2a63dd6d 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/plan.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/invalidation-subgraph-type/plan.json @@ -11,7 +11,10 @@ "requests": [ { "request": { - "body": {"query":"query InvalidationSubgraphType__invalidation_subgraph_type_accounts__0{me{name id}}","operationName":"InvalidationSubgraphType__invalidation_subgraph_type_accounts__0"} + "body": { + "query": "query InvalidationSubgraphType__invalidation_subgraph_type_accounts__0 { me { name id } }", + "operationName": "InvalidationSubgraphType__invalidation_subgraph_type_accounts__0" + } }, "response": { "headers": { @@ -83,7 +86,10 @@ "requests": [ { "request": { - "body": {"query":"query InvalidationSubgraphType__invalidation_subgraph_type_accounts__0{me{name id}}", "operationName":"InvalidationSubgraphType__invalidation_subgraph_type_accounts__0"} + "body": { + "query": "query InvalidationSubgraphType__invalidation_subgraph_type_accounts__0 { me { name id } }", + "operationName": "InvalidationSubgraphType__invalidation_subgraph_type_accounts__0" + } }, "response": { "headers": { diff --git a/apollo-router/tests/samples/enterprise/entity-cache/private/plan.json b/apollo-router/tests/samples/enterprise/entity-cache/private/plan.json index b466291766..ecb3b32e71 100644 --- a/apollo-router/tests/samples/enterprise/entity-cache/private/plan.json +++ b/apollo-router/tests/samples/enterprise/entity-cache/private/plan.json @@ -11,7 +11,7 @@ "requests": [ { "request": { - "body": {"query":"query private__accounts__0{me{name}}"} + "body": {"query":"query private__accounts__0 { me { name } }"} }, "response": { "headers": { @@ -48,7 +48,7 @@ "requests": [ { "request": { - "body": {"query":"query private__accounts__0{me{name}}"} + "body": {"query":"query private__accounts__0 { me { name } }"} }, "response": { "headers": { diff --git a/apollo-router/tests/samples/enterprise/persisted-queries/basic/README.md b/apollo-router/tests/samples/enterprise/persisted-queries/basic/README.md new file mode 100644 index 0000000000..09cba4f996 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/persisted-queries/basic/README.md @@ -0,0 +1,3 @@ +# Persisted Queries + +This tests Persisted Query Lists: https://www.apollographql.com/docs/graphos/routing/security/persisted-queries diff --git a/apollo-router/tests/samples/enterprise/persisted-queries/basic/configuration.yaml b/apollo-router/tests/samples/enterprise/persisted-queries/basic/configuration.yaml new file mode 100644 index 0000000000..8d7c0e2439 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/persisted-queries/basic/configuration.yaml @@ -0,0 +1,14 @@ +persisted_queries: + enabled: true + experimental_local_manifests: + - tests/samples/enterprise/persisted-queries/basic/persisted-query-manifest.json +apq: + enabled: false +telemetry: + apollo: + client_name_header: custom-client-name +rhai: + scripts: "tests/samples/enterprise/persisted-queries/basic/rhai" + main: "main.rhai" +include_subgraph_errors: + all: true diff --git a/apollo-router/tests/samples/enterprise/persisted-queries/basic/persisted-query-manifest.json b/apollo-router/tests/samples/enterprise/persisted-queries/basic/persisted-query-manifest.json new file mode 100644 index 0000000000..1659290d24 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/persisted-queries/basic/persisted-query-manifest.json @@ -0,0 +1,15 @@ +{ + "format": "apollo-persisted-query-manifest", + "version": 1, + "operations": [ + { + "id": "GetMyNameID", + "body": "query GetMyName { me { name } }" + }, + { + "id": "GetMyNameID", + "clientName": "mobile", + "body": "query GetMyName { me { mobileAlias: name } }" + } + ] +} diff --git a/apollo-router/tests/samples/enterprise/persisted-queries/basic/plan.yaml b/apollo-router/tests/samples/enterprise/persisted-queries/basic/plan.yaml new file mode 100644 index 0000000000..82c7a680fc --- /dev/null +++ b/apollo-router/tests/samples/enterprise/persisted-queries/basic/plan.yaml @@ -0,0 +1,129 @@ +enterprise: true + +actions: +- type: Start + schema_path: ./supergraph.graphql + configuration_path: ./configuration.yaml + subgraphs: + accounts: + requests: + - request: + body: + query: "query GetMyName__accounts__0 { me { name } }" + response: + body: + data: + me: + name: "Ada Lovelace" + - request: + body: + query: "query GetMyName__accounts__0 { me { mobileAlias: name } }" + response: + body: + data: + me: + mobileAlias: "Ada Lovelace" + - request: + body: + query: "query GetYourName__accounts__0 { you: me { name } }" + response: + body: + data: + you: + name: "Ada Lovelace" + +# Successfully run a persisted query. +- type: Request + description: "Run a persisted query" + request: + extensions: + persistedQuery: + version: 1 + sha256Hash: "GetMyNameID" + expected_response: + data: + me: + name: "Ada Lovelace" + +# Successfully run a persisted query with client name that has its own +# operation, using the client name header configured in +# `telemetry.apollo.client_name_header`. +- type: Request + description: "Run a persisted query with client_name_header" + request: + extensions: + persistedQuery: + version: 1 + sha256Hash: "GetMyNameID" + headers: + custom-client-name: mobile + expected_response: + data: + me: + mobileAlias: "Ada Lovelace" + +# Successfully run a persisted query with client name that has its own +# operation, setting the client name via context in a Rhai plugin. +- type: Request + description: "Run a persisted query with plugin-set client name" + request: + extensions: + persistedQuery: + version: 1 + sha256Hash: "GetMyNameID" + headers: + plugin-client-name: mobile + expected_response: + data: + me: + mobileAlias: "Ada Lovelace" + +# Successfully run a persisted query with random client name falling back to the +# version without client name. +- type: Request + description: "Run a persisted query with fallback client name" + request: + extensions: + persistedQuery: + version: 1 + sha256Hash: "GetMyNameID" + headers: + custom-client-name: something-not-mobile + expected_response: + data: + me: + name: "Ada Lovelace" + +- type: Request + description: "Unknown persisted query ID" + request: + extensions: + persistedQuery: + version: 1 + sha256Hash: "unknown_query" + expected_response: + errors: + - message: "Persisted query 'unknown_query' not found in the persisted query list" + extensions: + code: PERSISTED_QUERY_NOT_IN_LIST + +- type: Request + description: "Normal non-PQ POSTs work" + request: + query: "query GetYourName { you: me { name } }" + expected_response: + data: + you: + name: "Ada Lovelace" + +- type: Request + description: "Proper error when sending malformed request body" + request: "" + expected_response: + errors: + - message: "Invalid GraphQL request" + extensions: + code: INVALID_GRAPHQL_REQUEST + details: 'failed to deserialize the request body into JSON: invalid type: string "", expected a GraphQL request at line 1 column 2' + +- type: Stop diff --git a/apollo-router/tests/samples/enterprise/persisted-queries/basic/rhai/main.rhai b/apollo-router/tests/samples/enterprise/persisted-queries/basic/rhai/main.rhai new file mode 100644 index 0000000000..b31e749479 --- /dev/null +++ b/apollo-router/tests/samples/enterprise/persisted-queries/basic/rhai/main.rhai @@ -0,0 +1,7 @@ +fn router_service(service) { + service.map_request(|request| { + if (request.headers.contains("plugin-client-name")) { + request.context["apollo_persisted_queries::client_name"] = request.headers["plugin-client-name"]; + } + }); +} diff --git a/apollo-router/tests/samples/enterprise/persisted-queries/basic/supergraph.graphql b/apollo-router/tests/samples/enterprise/persisted-queries/basic/supergraph.graphql new file mode 100644 index 0000000000..c5a920730a --- /dev/null +++ b/apollo-router/tests/samples/enterprise/persisted-queries/basic/supergraph.graphql @@ -0,0 +1,124 @@ +schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { + query: Query + mutation: Mutation +} + +directive @inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + +directive @tag( + name: String! +) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + +directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + +directive @join__field( + graph: join__Graph + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean + override: String + usedOverridden: Boolean +) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + +directive @join__graph(name: String!, url: String!) on ENUM_VALUE + +directive @join__implements( + graph: join__Graph! + interface: String! +) repeatable on OBJECT | INTERFACE + +directive @join__type( + graph: join__Graph! + key: join__FieldSet + extension: Boolean! = false + resolvable: Boolean! = true + isInterfaceObject: Boolean! = false +) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + +directive @join__unionMember( + graph: join__Graph! + member: String! +) repeatable on UNION + +directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] +) repeatable on SCHEMA + +scalar join__FieldSet +scalar link__Import + +enum join__Graph { + ACCOUNTS + @join__graph(name: "accounts", url: "https://accounts.demo.starstuff.dev/") + INVENTORY + @join__graph( + name: "inventory" + url: "https://inventory.demo.starstuff.dev/" + ) + PRODUCTS + @join__graph(name: "products", url: "https://products.demo.starstuff.dev/") + REVIEWS + @join__graph(name: "reviews", url: "https://reviews.demo.starstuff.dev/") +} + +enum link__Purpose { + SECURITY + EXECUTION +} + +type Mutation @join__type(graph: PRODUCTS) @join__type(graph: REVIEWS) { + createProduct(upc: ID!, name: String): Product @join__field(graph: PRODUCTS) + createReview(upc: ID!, id: ID!, body: String): Review + @join__field(graph: REVIEWS) +} + +type Product + @join__type(graph: INVENTORY, key: "upc") + @join__type(graph: PRODUCTS, key: "upc") + @join__type(graph: REVIEWS, key: "upc") { + inStock: Boolean + @join__field(graph: INVENTORY) + @tag(name: "private") + @inaccessible + name: String @join__field(graph: PRODUCTS) + weight: Int @join__field(graph: INVENTORY, external: true) @join__field(graph: PRODUCTS) + price: Int @join__field(graph: INVENTORY, external: true) @join__field(graph: PRODUCTS) + reviews: [Review] @join__field(graph: REVIEWS) + reviewsForAuthor(authorID: ID!): [Review] @join__field(graph: REVIEWS) + shippingEstimate: Int @join__field(graph: INVENTORY, requires: "price weight") + upc: String! +} + +type Query + @join__type(graph: ACCOUNTS) + @join__type(graph: INVENTORY) + @join__type(graph: PRODUCTS) + @join__type(graph: REVIEWS) { + me: User @join__field(graph: ACCOUNTS) + topProducts(first: Int = 5): [Product] @join__field(graph: PRODUCTS) +} + +type Review @join__type(graph: REVIEWS, key: "id") { + id: ID! + author: User @join__field(graph: REVIEWS, provides: "username") + body: String @join__field(graph: REVIEWS) + product: Product @join__field(graph: REVIEWS) +} + +type User + @join__type(graph: ACCOUNTS, key: "id") + @join__type(graph: REVIEWS, key: "id") { + id: ID! + name: String @join__field(graph: ACCOUNTS) + username: String + @join__field(graph: ACCOUNTS) + @join__field(graph: REVIEWS, external: true) + reviews: [Review] @join__field(graph: REVIEWS) +} diff --git a/apollo-router/tests/samples/enterprise/query-planning-redis/plan.json b/apollo-router/tests/samples/enterprise/query-planning-redis/plan.json index a864862620..c25a95d031 100644 --- a/apollo-router/tests/samples/enterprise/query-planning-redis/plan.json +++ b/apollo-router/tests/samples/enterprise/query-planning-redis/plan.json @@ -11,7 +11,7 @@ "requests": [ { "request": { - "body": {"query":"{me{name}}"} + "body": {"query":"{ me { name } }"} }, "response": { "body": {"data": { "me": { "name": "test" } } } @@ -19,7 +19,7 @@ }, { "request": { - "body": {"query":"{me{nom:name}}"} + "body": {"query":"{ me { nom: name } }"} }, "response": { "body": {"data": { "me": { "nom": "test" } } } diff --git a/apollo-router/tests/samples_tests.rs b/apollo-router/tests/samples_tests.rs index 8159a40179..7f06f1d5cc 100644 --- a/apollo-router/tests/samples_tests.rs +++ b/apollo-router/tests/samples_tests.rs @@ -56,7 +56,7 @@ fn lookup_dir( path.file_name().unwrap().to_str().unwrap() ); - if path.join("plan.json").exists() { + let plan: Option = if path.join("plan.json").exists() { let mut file = File::open(path.join("plan.json")).map_err(|e| { format!( "could not open file at path '{:?}': {e}", @@ -71,8 +71,8 @@ fn lookup_dir( ) })?; - let plan: Plan = match serde_json::from_str(&s) { - Ok(data) => data, + match serde_json::from_str(&s) { + Ok(data) => Some(data), Err(e) => { return Err(format!( "could not deserialize test plan at {}: {e}", @@ -80,8 +80,37 @@ fn lookup_dir( ) .into()); } - }; + } + } else if path.join("plan.yaml").exists() { + let mut file = File::open(path.join("plan.yaml")).map_err(|e| { + format!( + "could not open file at path '{:?}': {e}", + &path.join("plan.yaml") + ) + })?; + let mut s = String::new(); + file.read_to_string(&mut s).map_err(|e| { + format!( + "could not read file at path: '{:?}': {e}", + &path.join("plan.yaml") + ) + })?; + match serde_yaml::from_str(&s) { + Ok(data) => Some(data), + Err(e) => { + return Err(format!( + "could not deserialize test plan at {}: {e}", + path.display() + ) + .into()); + } + } + } else { + None + }; + + if let Some(plan) = plan { if plan.enterprise && !(std::env::var("TEST_APOLLO_KEY").is_ok() && std::env::var("TEST_APOLLO_GRAPH_REF").is_ok()) @@ -172,6 +201,7 @@ impl TestExecution { .await } Action::Request { + description, request, query_path, headers, @@ -179,6 +209,7 @@ impl TestExecution { expected_headers, } => { self.request( + description.clone(), request.clone(), query_path.as_deref(), headers, @@ -429,6 +460,7 @@ impl TestExecution { #[allow(clippy::too_many_arguments)] async fn request( &mut self, + description: Option, mut request: Value, query_path: Option<&str>, headers: &HashMap, @@ -456,6 +488,11 @@ impl TestExecution { } } + writeln!(out).unwrap(); + if let Some(description) = description { + writeln!(out, "description: {description}").unwrap(); + } + writeln!(out, "query: {}\n", serde_json::to_string(&request).unwrap()).unwrap(); writeln!(out, "header: {:?}\n", headers).unwrap(); @@ -482,6 +519,7 @@ impl TestExecution { } } if failed { + self.print_received_requests(out).await; let f: Failed = out.clone().into(); return Err(f); } @@ -560,22 +598,7 @@ impl TestExecution { }; if expected_response != &graphql_response { - if let Some(requests) = self - .subgraphs_server - .as_ref() - .unwrap() - .received_requests() - .await - { - writeln!(out, "subgraphs received requests:").unwrap(); - for request in requests { - writeln!(out, "\tmethod: {}", request.method).unwrap(); - writeln!(out, "\tpath: {}", request.url).unwrap(); - writeln!(out, "\t{}\n", std::str::from_utf8(&request.body).unwrap()).unwrap(); - } - } else { - writeln!(out, "subgraphs received no requests").unwrap(); - } + self.print_received_requests(out).await; writeln!(out, "assertion `left == right` failed").unwrap(); writeln!( @@ -596,6 +619,25 @@ impl TestExecution { Ok(()) } + async fn print_received_requests(&mut self, out: &mut String) { + if let Some(requests) = self + .subgraphs_server + .as_ref() + .unwrap() + .received_requests() + .await + { + writeln!(out, "subgraphs received requests:").unwrap(); + for request in requests { + writeln!(out, "\tmethod: {}", request.method).unwrap(); + writeln!(out, "\tpath: {}", request.url).unwrap(); + writeln!(out, "\t{}\n", std::str::from_utf8(&request.body).unwrap()).unwrap(); + } + } else { + writeln!(out, "subgraphs received no requests").unwrap(); + } + } + async fn endpoint_request( &mut self, url: &url::Url, @@ -660,6 +702,7 @@ fn check_path(path: &Path, out: &mut String) -> Result<(), Failed> { #[derive(Deserialize)] #[allow(dead_code)] +#[serde(deny_unknown_fields)] struct Plan { #[serde(default)] enterprise: bool, @@ -669,7 +712,7 @@ struct Plan { } #[derive(Deserialize)] -#[serde(tag = "type")] +#[serde(tag = "type", deny_unknown_fields)] enum Action { Start { schema_path: String, @@ -689,6 +732,7 @@ enum Action { update_url_overrides: bool, }, Request { + description: Option, request: Value, query_path: Option, #[serde(default)] @@ -705,17 +749,20 @@ enum Action { } #[derive(Clone, Debug, Deserialize)] +#[serde(deny_unknown_fields)] struct Subgraph { requests: Vec, } #[derive(Clone, Debug, Deserialize)] +#[serde(deny_unknown_fields)] struct SubgraphRequestMock { request: HttpRequest, response: HttpResponse, } #[derive(Clone, Debug, Deserialize)] +#[serde(deny_unknown_fields)] struct HttpRequest { method: Option, path: Option, @@ -725,6 +772,7 @@ struct HttpRequest { } #[derive(Clone, Debug, Deserialize)] +#[serde(deny_unknown_fields)] struct HttpResponse { status: Option, #[serde(default)] diff --git a/apollo-router/tests/set_context.rs b/apollo-router/tests/set_context.rs index dc3f366fcf..96086bbe4c 100644 --- a/apollo-router/tests/set_context.rs +++ b/apollo-router/tests/set_context.rs @@ -51,6 +51,7 @@ fn get_configuration(rust_qp: bool) -> serde_json::Value { }}; } json! {{ + "experimental_query_planner_mode": "legacy", "experimental_type_conditioned_fetching": true, // will make debugging easier "plugins": { diff --git a/apollo-router/tests/snapshots/set_context__set_context.snap b/apollo-router/tests/snapshots/set_context__set_context.snap index 18bfcbfcc9..3630f5d86a 100644 --- a/apollo-router/tests/snapshots/set_context__set_context.snap +++ b/apollo-router/tests/snapshots/set_context__set_context.snap @@ -34,7 +34,7 @@ expression: response "operationKind": "query", "operationName": "Query__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "0163c552923b61fbde6dbcd879ffc2bb887175dc41bbf75a272875524e664e8d", + "schemaAwareHash": "b45f75d11c91f90d616e0786fe9a1a675f4f478a6688aa38b9809b3416b66507", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -80,7 +80,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "e64d79913c52a4a8b95bfae44986487a1ac73118f27df3b602972a5cbb1f360a", + "schemaAwareHash": "02dbfc4ce65b1eb8ee39c37f09a88b56ee4671bbcdc935f3ec2a7e25e36c2931", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap b/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap index 67390167e7..c0c964b497 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure.snap @@ -43,7 +43,7 @@ expression: response "operationKind": "query", "operationName": "Query_fetch_dependent_failure__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "6bcaa7a2d52a416d5278eaef6be102427f328b6916075f193c87459516a7fb6d", + "schemaAwareHash": "e8671657b38c13454f18f2bf8df9ebbeb80235c50592f72a2c4141803fe6db59", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -89,7 +89,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "0e56752501c8cbf53429c5aa2df95765ea2c7cba95db9213ce42918699232651", + "schemaAwareHash": "8499a69f5ac2e4ce2e0acc76b38b7839b89b6ccba9142494d1a82dd17dd0e5f2", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure_rust_qp.snap b/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure_rust_qp.snap index 5caea23420..8e555063f8 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure_rust_qp.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_dependent_fetch_failure_rust_qp.snap @@ -43,7 +43,7 @@ expression: response "operationKind": "query", "operationName": "Query_fetch_dependent_failure__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "6b659295c8e5aff7b3d7146b878e848b43ad58fba3f4dfce2988530631c3448a", + "schemaAwareHash": "dfb0f7a17a089f11d0c95f8e9acb3a17fa4fb21216843913bc3a6c62ce2b7fbd", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -89,7 +89,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "3bc84712c95d01c4e9118cc1f8179e071662862a04cef56d39a0ac6a621daf36", + "schemaAwareHash": "f5ae7b50fe8d94eedfb385d91e561de06e3a3256fedca901c0b50ae689b5d630", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_list.snap b/apollo-router/tests/snapshots/set_context__set_context_list.snap index d6dd312f0a..fa0a57fc15 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_list.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_list.snap @@ -40,7 +40,7 @@ expression: response "operationKind": "query", "operationName": "Query__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "805348468cefee0e3e745cb1bcec0ab4bd44ba55f6ddb91e52e0bc9b437c2dee", + "schemaAwareHash": "50ba3d7291f38802f222251fe79055e06345e62252e74eba9e01bbec34510cea", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -86,7 +86,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "e64d79913c52a4a8b95bfae44986487a1ac73118f27df3b602972a5cbb1f360a", + "schemaAwareHash": "02dbfc4ce65b1eb8ee39c37f09a88b56ee4671bbcdc935f3ec2a7e25e36c2931", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap b/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap index c390c1db88..13e0282397 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_list_of_lists.snap @@ -44,7 +44,7 @@ expression: response "operationKind": "query", "operationName": "QueryLL__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "53e85332dda78d566187c8886c207b81acfe3ab5ea0cafd3d71fb0b153026d80", + "schemaAwareHash": "589a7dec7f09fdedd06128f1e7396c727740ac1f84ad936ea9c61c3cf96d3ee4", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -90,7 +90,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "8ed6f85b6a77c293c97171b4a98f7dd563e98a737d4c3a9f5c54911248498ec7", + "schemaAwareHash": "0c966292093d13acca6c8ebb257a146a46840e5a04c9cbaede12e08df98cd489", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_list_of_lists_rust_qp.snap b/apollo-router/tests/snapshots/set_context__set_context_list_of_lists_rust_qp.snap index 0945ccf058..6dabc52d4a 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_list_of_lists_rust_qp.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_list_of_lists_rust_qp.snap @@ -44,7 +44,7 @@ expression: response "operationKind": "query", "operationName": "QueryLL__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "0a6255094b34a44c5addf88a5a9bb37847f19ecf10370be675ba55a1330b4ac7", + "schemaAwareHash": "560ba34c3cdda6c435aaab55e21528b252f44caabc6c082117e4e9fcc935af5f", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -90,7 +90,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "71e6d73b679197d0e979c07446c670bad69897d77bd280dc9c39276fde6e8d99", + "schemaAwareHash": "b97924736c4f71e4b6e80e2a9e2661130363820bd3df5b2e38000be4a4fb47b5", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_list_rust_qp.snap b/apollo-router/tests/snapshots/set_context__set_context_list_rust_qp.snap index ab643923c0..2cee07ad33 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_list_rust_qp.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_list_rust_qp.snap @@ -40,7 +40,7 @@ expression: response "operationKind": "query", "operationName": "set_context_list_rust_qp__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "fd215e24828b3a7296abe901f843f68b525d8eaf35a019ac34a2198738c91230", + "schemaAwareHash": "f89e82a2730898d4c37766615534224fe8f569b4786a3946e652572a1b99117d", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -86,7 +86,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "96a8bf16a86cbddab93bb46364f8b0e63635a928924dcb681dc2371b810eee02", + "schemaAwareHash": "57d42b319499942de11c8eaf8bedb3618079a21fb01792b1a0a1ca8a1157d04c", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap b/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap index e9743a7902..0b45046ad7 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_no_typenames.snap @@ -32,7 +32,7 @@ expression: response "operationKind": "query", "operationName": "Query__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "0163c552923b61fbde6dbcd879ffc2bb887175dc41bbf75a272875524e664e8d", + "schemaAwareHash": "b45f75d11c91f90d616e0786fe9a1a675f4f478a6688aa38b9809b3416b66507", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -78,7 +78,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "e64d79913c52a4a8b95bfae44986487a1ac73118f27df3b602972a5cbb1f360a", + "schemaAwareHash": "02dbfc4ce65b1eb8ee39c37f09a88b56ee4671bbcdc935f3ec2a7e25e36c2931", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_no_typenames_rust_qp.snap b/apollo-router/tests/snapshots/set_context__set_context_no_typenames_rust_qp.snap index 6a47496428..d110b1b332 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_no_typenames_rust_qp.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_no_typenames_rust_qp.snap @@ -32,7 +32,7 @@ expression: response "operationKind": "query", "operationName": "set_context_no_typenames_rust_qp__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "9c1d7c67821fc43d63e8a217417fbe600a9100e1a43ba50e2f961d4fd4974144", + "schemaAwareHash": "9b5e7d0de84a6e670d5235682e776eb0ebcd753c955403c7159adea338813a93", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -78,7 +78,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "5fdc56a38428bad98d0c5e46f096c0179886815509ffc1918f5c6e0a784e2547", + "schemaAwareHash": "0b5a9920448d114be6250a0b85f3092f5dfbce80dc89e26880fb28a5ea684d3b", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_rust_qp.snap b/apollo-router/tests/snapshots/set_context__set_context_rust_qp.snap index fecc966894..7e7088d73f 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_rust_qp.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_rust_qp.snap @@ -34,7 +34,7 @@ expression: response "operationKind": "query", "operationName": "set_context_rust_qp__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "7fb5b477b89d2dcf76239dd30abcf6210462e144376a6b1b589ceb603edd55cd", + "schemaAwareHash": "d9094fb75802583731265ab088bd914c2c10ad3d2f7e835cbe13d58811ab797f", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -80,7 +80,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "fef499e9ca815057242c5a03e9f0960d5c50d6958b0ac7329fc23b5a6e714eab", + "schemaAwareHash": "971f94cb09cb7a1a5564661ae4345da5e709f3ae16b81215db80ae61a740e8d2", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap b/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap index 3208b9bf0a..2c9c27bc14 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_type_mismatch.snap @@ -32,7 +32,7 @@ expression: response "operationKind": "query", "operationName": "Query_type_mismatch__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "34c8f7c0f16220c5d4b589c8da405f49510e092756fa98629c73dea06fd7c243", + "schemaAwareHash": "f47b7620f3ba24d2c15a2978451bd7b59f462e63dc3259a244efe1d971979bfa", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -78,7 +78,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "feb578fd1831280f376d8961644e670dd8c3508d0a18fcf69a6de651e25e9ca8", + "schemaAwareHash": "a6ff3cddbf800b647fdb15f6da6d5e68a71979be93d51852bd289f047202d8ac", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_type_mismatch_rust_qp.snap b/apollo-router/tests/snapshots/set_context__set_context_type_mismatch_rust_qp.snap index 516c42f5fe..abacde1e41 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_type_mismatch_rust_qp.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_type_mismatch_rust_qp.snap @@ -32,7 +32,7 @@ expression: response "operationKind": "query", "operationName": "Query_type_mismatch__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "29f5e6a254fac05382ddc3e4aac47368dc9847abe711ecf17dbfca7945097faf", + "schemaAwareHash": "0ca90df94a895d97f403b550a05e72808aee80cbfc6f2f3aea8d32ae0d73d2cd", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -78,7 +78,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "864f2eecd06e2c450e48f2cb551d4e95946575eb7e537a17a04c9a1716c0a482", + "schemaAwareHash": "8ea1a4dce3d934c98814d56884f9e7dad9045562a072216ea903570e01c04680", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_union.snap b/apollo-router/tests/snapshots/set_context__set_context_union.snap index 6c995c1e8b..50c3e865df 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_union.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_union.snap @@ -31,7 +31,7 @@ expression: response "operationKind": "query", "operationName": "QueryUnion__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "3e768a1879f4ced427937721980688052b471dbfee0d653b212c85f2732591cc", + "schemaAwareHash": "b6ed60b7e69ed10f45f85aba713969cd99b0e1a832464ba3f225fdf055706424", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -80,7 +80,7 @@ expression: response "typeCondition": "V" } ], - "schemaAwareHash": "0c190d5db5b15f89fa45de844d2cec59725986e44fcb0dbdb9ab870a197cf026", + "schemaAwareHash": "99faa73249f207ea11b1b5064d77f278367398cfee881b2fc3a8a9ebe53f44fe", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_1" @@ -134,7 +134,7 @@ expression: response "typeCondition": "V" } ], - "schemaAwareHash": "2d7376a8d1f7f2a929361e838bb0435ed4c4a6194fa8754af52d4b6dc7140508", + "schemaAwareHash": "e925299b31ea9338d10257fd150ec7ece230f55117105dd631181f4b2a33075a", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_1" diff --git a/apollo-router/tests/snapshots/set_context__set_context_union_rust_qp.snap b/apollo-router/tests/snapshots/set_context__set_context_union_rust_qp.snap index 650849369c..c19a7222e8 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_union_rust_qp.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_union_rust_qp.snap @@ -31,7 +31,7 @@ expression: response "operationKind": "query", "operationName": "QueryUnion__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "eae4d791b0314c4e2509735ad3d0dd0ca5de8ee4c7f315513931df6e4cb5102d", + "schemaAwareHash": "967a248e156212f72f5abb27c01fe3d5e8bb4db154e0f7015db551ee0fe46877", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -80,7 +80,7 @@ expression: response "typeCondition": "V" } ], - "schemaAwareHash": "b1ba6dd8a0e2edc415efd084401bfa01ecbaaa76a0f7896c27c431bed8c20a08", + "schemaAwareHash": "77647255b7cbdfcb4a47ab2492c0c5252c4f6d06dd4008b104d8770a584a1e32", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_1" @@ -133,7 +133,7 @@ expression: response "typeCondition": "V" } ], - "schemaAwareHash": "45785998d1610758abe68519f9bc100828afa2ba56c7e55b9d18ad69f3ad27eb", + "schemaAwareHash": "4a608fbd27c2498c1f31bf737143990d8a2f31e72682542d3169fe2fac6d5834", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_1" diff --git a/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap b/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap index ead3b10258..b80bb0c853 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure.snap @@ -37,7 +37,7 @@ expression: response "operationKind": "query", "operationName": "Query_fetch_failure__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "84a7305d62d79b5bbca976c5522d6b32c5bbcbf76b495e4430f9cdcb51c80a57", + "schemaAwareHash": "d321568b33e32986df6d30a82443ebb919949617ffc33affe8b413658af52b8a", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -76,7 +76,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "acb960692b01a756fcc627cafef1c47ead8afa60fa70828e5011ba9f825218ab", + "schemaAwareHash": "a9aa68bb30f2040298629fc2fe72dc8438ce16bcdfdbe1a16ff088cf61d38719", "serviceName": "Subgraph2", "variableUsages": [] }, @@ -128,7 +128,7 @@ expression: response "typeCondition": "U" } ], - "schemaAwareHash": "9fd65f6f213899810bce20180de6754354a25dc3c1bc97d0b7214a177cf8b0bb", + "schemaAwareHash": "da0e31f9990723a68dbd1e1bb164a068342da5561db1a28679693a406429d09a", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" diff --git a/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure_rust_qp.snap b/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure_rust_qp.snap index 8194cfc237..77341c5367 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure_rust_qp.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_unrelated_fetch_failure_rust_qp.snap @@ -1,7 +1,6 @@ --- source: apollo-router/tests/set_context.rs expression: response -snapshot_kind: text --- { "data": null, @@ -38,7 +37,7 @@ snapshot_kind: text "operationKind": "query", "operationName": "Query_fetch_failure__Subgraph1__0", "outputRewrites": null, - "schemaAwareHash": "d3f1ad875170d008059583ca6074e732a178f74474ac31de3bb4397c5080020d", + "schemaAwareHash": "03786f26d73a1ad1bfa3fed40f657316857018dc1105b2da578904373b7e1882", "serviceName": "Subgraph1", "variableUsages": [] }, @@ -87,7 +86,7 @@ snapshot_kind: text "typeCondition": "U" } ], - "schemaAwareHash": "05dc59a826cec26ad8263101508c298dd8d31d79d36f18194dd4cf8cd5f02dc3", + "schemaAwareHash": "f615a413abdf99efaf7e760e1246371aa5dd0f2330820cf295335ed48cc077ed", "serviceName": "Subgraph1", "variableUsages": [ "contextualArgument_1_0" @@ -130,7 +129,7 @@ snapshot_kind: text "typeCondition": "U" } ], - "schemaAwareHash": "a3c7e6df6f9c93b228f16a937b7159ccf1294fec50a92f60ba004dbebbb64b50", + "schemaAwareHash": "0e71b293b5c0b6143252865b2c97338cd72a558897b0a478dd0cd8b027f9a5a3", "serviceName": "Subgraph2", "variableUsages": [] }, diff --git a/apollo-router/tests/snapshots/set_context__set_context_with_null.snap b/apollo-router/tests/snapshots/set_context__set_context_with_null.snap index badc32bc8a..f4c5eb3898 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_with_null.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_with_null.snap @@ -29,7 +29,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "4c0c9f83a57e9a50ff1f6dd601ec0a1588f1485d5cfb1015822af4017263e807", + "schemaAwareHash": "73819a48542fc2a3eb3a831b27ab5cc0b1286a73c2750279d8886fc529ba9e9e", "authorization": { "is_authenticated": false, "scopes": [], @@ -82,7 +82,7 @@ expression: response "renameKeyTo": "contextualArgument_1_0" } ], - "schemaAwareHash": "8db802e78024d406645f1ddc8972255e917bc738bfbed281691a45e34c92debb", + "schemaAwareHash": "042955e454618e67e75f3c86c9b8c71e2da866f1c40d0dc462d52053e1861803", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/set_context__set_context_with_null_rust_qp.snap b/apollo-router/tests/snapshots/set_context__set_context_with_null_rust_qp.snap index 6f775414e3..fcd539eeb1 100644 --- a/apollo-router/tests/snapshots/set_context__set_context_with_null_rust_qp.snap +++ b/apollo-router/tests/snapshots/set_context__set_context_with_null_rust_qp.snap @@ -29,7 +29,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "4fc423a49bbddcc8869c014934dfd128dd61a1760c4eb619940ad46f614c843b", + "schemaAwareHash": "5a33cc9574d930882310fe1f9ddae8f262a448a50ac9a899e71896a339fa0f85", "authorization": { "is_authenticated": false, "scopes": [], @@ -81,7 +81,7 @@ expression: response "renameKeyTo": "contextualArgument_1_0" } ], - "schemaAwareHash": "d863b0ef9ef616faaade4c73b2599395e074ec1521ec07634471894145e97f44", + "schemaAwareHash": "4f6eeca0e601bbf183b759aa785df84eb0c435a266a36568238e8d721dc8fc3c", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_disabled-2.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_disabled-2.snap index 390a7c79df..4d61ae95b4 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_disabled-2.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_disabled-2.snap @@ -79,7 +79,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "70ca85b28e861b24a7749862930a5f965c4c6e8074d60a87a3952d596fe7cc36", + "schemaAwareHash": "1d2d8b1ab80b4b1293e9753a915997835e6ff5bc54ba4c9b400abe7fa4661386", "authorization": { "is_authenticated": false, "scopes": [], @@ -137,7 +137,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "317a722a677563080aeac92f60ac2257d9288ca6851a0e8980fcf18f58b462a8", + "schemaAwareHash": "66a2bd39c499f1edd8c3ec1bfbc170cb995c6f9e23427b5486b633decd2da08b", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_disabled.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_disabled.snap index 687028717f..de7f4d2827 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_disabled.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_disabled.snap @@ -79,7 +79,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "0e1644746fe4beab7def35ec8cc12bde39874c6bb8b9dfd928456196b814a111", + "schemaAwareHash": "18631e67bb0c6b514cb51e8dff155a2900c8000ad319ea4784e5ca8b1275aca2", "authorization": { "is_authenticated": false, "scopes": [], @@ -137,7 +137,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6510f6b9672829bd9217618b78ef6f329fbddb125f88184d04e6faaa982ff8bb", + "schemaAwareHash": "7f3ec4c2c644d43e54d95da83790166d87ab6bfcb31fe5692d8262199bff6d3f", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled-2.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled-2.snap index ed94ed7a85..c89cab9c7d 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled-2.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled-2.snap @@ -79,7 +79,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "70ca85b28e861b24a7749862930a5f965c4c6e8074d60a87a3952d596fe7cc36", + "schemaAwareHash": "1d2d8b1ab80b4b1293e9753a915997835e6ff5bc54ba4c9b400abe7fa4661386", "authorization": { "is_authenticated": false, "scopes": [], @@ -140,7 +140,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "1d21a65a3b5a31e17f7834750ef5b37fb49d99d0a1e2145f00a62d43c5f8423a", + "schemaAwareHash": "31465e7b7e358ea9407067188249b51fd7342088e6084360ed0df28199cef5cc", "authorization": { "is_authenticated": false, "scopes": [], @@ -199,7 +199,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "df321f6532c2c9eda0d8c042e5f08073c24e558dd0cae01054886b79416a6c08", + "schemaAwareHash": "550ad525da9bb9497fb0d51bf7a64b7d5d73ade5ee7d2e425573dc7e2e248e99", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled.snap index 08a9782c85..39b73eabbc 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled.snap @@ -79,7 +79,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "0e1644746fe4beab7def35ec8cc12bde39874c6bb8b9dfd928456196b814a111", + "schemaAwareHash": "18631e67bb0c6b514cb51e8dff155a2900c8000ad319ea4784e5ca8b1275aca2", "authorization": { "is_authenticated": false, "scopes": [], @@ -141,7 +141,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6510f6b9672829bd9217618b78ef6f329fbddb125f88184d04e6faaa982ff8bb", + "schemaAwareHash": "7f3ec4c2c644d43e54d95da83790166d87ab6bfcb31fe5692d8262199bff6d3f", "authorization": { "is_authenticated": false, "scopes": [], @@ -201,7 +201,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6bc34c108f7cf81896971bffad76dc5275d46231b4dfe492ccc205dda9a4aa16", + "schemaAwareHash": "3874fd9db4a0302422701b93506f42a5de5604355be7093fa2abe23f440161f9", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_generate_query_fragments-2.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_generate_query_fragments-2.snap index ed94ed7a85..c89cab9c7d 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_generate_query_fragments-2.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_generate_query_fragments-2.snap @@ -79,7 +79,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "70ca85b28e861b24a7749862930a5f965c4c6e8074d60a87a3952d596fe7cc36", + "schemaAwareHash": "1d2d8b1ab80b4b1293e9753a915997835e6ff5bc54ba4c9b400abe7fa4661386", "authorization": { "is_authenticated": false, "scopes": [], @@ -140,7 +140,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "1d21a65a3b5a31e17f7834750ef5b37fb49d99d0a1e2145f00a62d43c5f8423a", + "schemaAwareHash": "31465e7b7e358ea9407067188249b51fd7342088e6084360ed0df28199cef5cc", "authorization": { "is_authenticated": false, "scopes": [], @@ -199,7 +199,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "df321f6532c2c9eda0d8c042e5f08073c24e558dd0cae01054886b79416a6c08", + "schemaAwareHash": "550ad525da9bb9497fb0d51bf7a64b7d5d73ade5ee7d2e425573dc7e2e248e99", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_generate_query_fragments.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_generate_query_fragments.snap index 08a9782c85..39b73eabbc 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_generate_query_fragments.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_generate_query_fragments.snap @@ -79,7 +79,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "0e1644746fe4beab7def35ec8cc12bde39874c6bb8b9dfd928456196b814a111", + "schemaAwareHash": "18631e67bb0c6b514cb51e8dff155a2900c8000ad319ea4784e5ca8b1275aca2", "authorization": { "is_authenticated": false, "scopes": [], @@ -141,7 +141,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6510f6b9672829bd9217618b78ef6f329fbddb125f88184d04e6faaa982ff8bb", + "schemaAwareHash": "7f3ec4c2c644d43e54d95da83790166d87ab6bfcb31fe5692d8262199bff6d3f", "authorization": { "is_authenticated": false, "scopes": [], @@ -201,7 +201,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6bc34c108f7cf81896971bffad76dc5275d46231b4dfe492ccc205dda9a4aa16", + "schemaAwareHash": "3874fd9db4a0302422701b93506f42a5de5604355be7093fa2abe23f440161f9", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list-2.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list-2.snap index fc5007829d..371fd3496e 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list-2.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list-2.snap @@ -141,7 +141,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "ff18ff586aee784ec507117854cb4b64f9693d528df1ee69c922b5d75ae637fb", + "schemaAwareHash": "b74616ae898acf3abefb83e24bde5faf0de0f9475d703b105b60c18c7372ab13", "authorization": { "is_authenticated": false, "scopes": [], @@ -203,7 +203,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "1d21a65a3b5a31e17f7834750ef5b37fb49d99d0a1e2145f00a62d43c5f8423a", + "schemaAwareHash": "31465e7b7e358ea9407067188249b51fd7342088e6084360ed0df28199cef5cc", "authorization": { "is_authenticated": false, "scopes": [], @@ -263,7 +263,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "df321f6532c2c9eda0d8c042e5f08073c24e558dd0cae01054886b79416a6c08", + "schemaAwareHash": "550ad525da9bb9497fb0d51bf7a64b7d5d73ade5ee7d2e425573dc7e2e248e99", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list.snap index 4c219874d6..ec86110080 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list.snap @@ -141,7 +141,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "70b62e564b3924984694d90de2b10947a2f5c14ceb76d154f43bb3c638c4830b", + "schemaAwareHash": "65c1648beef44b81ac988224191b18ff469c641fd33032ef0c84165245018b62", "authorization": { "is_authenticated": false, "scopes": [], @@ -204,7 +204,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6510f6b9672829bd9217618b78ef6f329fbddb125f88184d04e6faaa982ff8bb", + "schemaAwareHash": "7f3ec4c2c644d43e54d95da83790166d87ab6bfcb31fe5692d8262199bff6d3f", "authorization": { "is_authenticated": false, "scopes": [], @@ -265,7 +265,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6bc34c108f7cf81896971bffad76dc5275d46231b4dfe492ccc205dda9a4aa16", + "schemaAwareHash": "3874fd9db4a0302422701b93506f42a5de5604355be7093fa2abe23f440161f9", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list_of_list-2.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list_of_list-2.snap index 5cc97759df..354bd034a9 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list_of_list-2.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list_of_list-2.snap @@ -145,7 +145,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "cb374f6eaa19cb529eeae258f2b136dbc751e3784fdc279954e59622cfb1edde", + "schemaAwareHash": "dc1df8e8d701876c6ea7d25bbeab92a5629a82e55660ccc48fc37e12d5157efa", "authorization": { "is_authenticated": false, "scopes": [], @@ -208,7 +208,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "1d21a65a3b5a31e17f7834750ef5b37fb49d99d0a1e2145f00a62d43c5f8423a", + "schemaAwareHash": "31465e7b7e358ea9407067188249b51fd7342088e6084360ed0df28199cef5cc", "authorization": { "is_authenticated": false, "scopes": [], @@ -269,7 +269,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "df321f6532c2c9eda0d8c042e5f08073c24e558dd0cae01054886b79416a6c08", + "schemaAwareHash": "550ad525da9bb9497fb0d51bf7a64b7d5d73ade5ee7d2e425573dc7e2e248e99", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list_of_list.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list_of_list.snap index 593bd573f6..e4ba5927a3 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list_of_list.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_list_of_list_of_list.snap @@ -145,7 +145,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "26ae1da614855e4edee344061c0fc95ec4613a99e012de1f33207cb5318487b8", + "schemaAwareHash": "f2466229a91f69cadfa844a20343b03668b7f85fd1310a4b20ba9382ffa2f5e7", "authorization": { "is_authenticated": false, "scopes": [], @@ -209,7 +209,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6510f6b9672829bd9217618b78ef6f329fbddb125f88184d04e6faaa982ff8bb", + "schemaAwareHash": "7f3ec4c2c644d43e54d95da83790166d87ab6bfcb31fe5692d8262199bff6d3f", "authorization": { "is_authenticated": false, "scopes": [], @@ -271,7 +271,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "6bc34c108f7cf81896971bffad76dc5275d46231b4dfe492ccc205dda9a4aa16", + "schemaAwareHash": "3874fd9db4a0302422701b93506f42a5de5604355be7093fa2abe23f440161f9", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_shouldnt_make_article_fetch-2.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_shouldnt_make_article_fetch-2.snap index 41a6433f9f..8811454f74 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_shouldnt_make_article_fetch-2.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_shouldnt_make_article_fetch-2.snap @@ -54,7 +54,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "587c887350ef75eaf4b647be94fd682616bcd33909e15fb797cee226e95fa36a", + "schemaAwareHash": "446c1a72168f736a89e4f56799333e05b26092d36fc55e22c2e92828061c787b", "authorization": { "is_authenticated": false, "scopes": [], @@ -115,7 +115,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "a0bf36d3a611df53c3a60b9b124a2887f2d266858221c606ace0985d101d64bd", + "schemaAwareHash": "f9052a9ce97a084006a1f2054b7e0fba8734f24bb53cf0f7e0ba573c7e709b98", "authorization": { "is_authenticated": false, "scopes": [], @@ -174,7 +174,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "3e84a53f967bf40d4c08254a94f3fa32a828ab3ad8184a22bb3439c596ecaaf4", + "schemaAwareHash": "027cac0584184439636aea68757da18f3e0e18142948e3b8625724f93e8720fc", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_shouldnt_make_article_fetch.snap b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_shouldnt_make_article_fetch.snap index c8fe1fb487..8ae0b59f67 100644 --- a/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_shouldnt_make_article_fetch.snap +++ b/apollo-router/tests/snapshots/type_conditions___test_type_conditions_enabled_shouldnt_make_article_fetch.snap @@ -54,7 +54,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "5201830580c9c5fadd9c59aea072878f84465c1ae9d905207fa281aa7c1d5340", + "schemaAwareHash": "cc52bb826d3c06b3ccbc421340fe3f49a81dc2b71dcb6a931a9a769745038e3f", "authorization": { "is_authenticated": false, "scopes": [], @@ -116,7 +116,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "62ff891f6971184d3e42b98f8166be72027b5479f9ec098af460a48ea6f6cbf4", + "schemaAwareHash": "6e83e0a67b509381f1a0c2dfe84db92d0dd6bf4bb23fe4c97ccd3d871364c9f4", "authorization": { "is_authenticated": false, "scopes": [], @@ -176,7 +176,7 @@ expression: response "inputRewrites": null, "outputRewrites": null, "contextRewrites": null, - "schemaAwareHash": "7e6f6850777335eb1421a30a45f6888bb9e5d0acf8f55d576d55d1c4b7d23ec7", + "schemaAwareHash": "67834874c123139d942b140fb9ff00ed4e22df25228c3e758eeb44b28d3847eb", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/deny.toml b/deny.toml index 55cd3fb0f0..8e79104c33 100644 --- a/deny.toml +++ b/deny.toml @@ -30,6 +30,7 @@ git-fetch-with-cli = true ignore = [ "RUSTSEC-2023-0071", "RUSTSEC-2024-0376", # we do not use tonic::transport::Server + "RUSTSEC-2024-0421" # we only resolve trusted subgraphs ] # This section is considered when running `cargo deny check licenses` @@ -54,6 +55,7 @@ allow = [ "MPL-2.0", "Elastic-2.0", "Unicode-DFS-2016", + "Unicode-3.0", "Zlib" ] copyleft = "warn" diff --git a/dev-docs/logging.md b/dev-docs/logging.md index abf7ef32c1..bb2517d74b 100644 --- a/dev-docs/logging.md +++ b/dev-docs/logging.md @@ -106,7 +106,7 @@ expression: yaml - fields: alg: ES256 reason: "invalid type: string \"Hmm\", expected a sequence" - index: 5 + index: 5 level: WARN message: "ignoring a key since it is not valid, enable debug logs to full content" ``` @@ -130,7 +130,7 @@ Use `with_subscriber` to attach a subscriber to an async block. ```rust #[tokio::test] async fn test_async() { - async{...}.with_subscriber(assert_snapshot_subscriber!()) + async{...}.with_subscriber(assert_snapshot_subscriber!()).await } ``` diff --git a/dev-docs/metrics.md b/dev-docs/metrics.md index 34530201ef..37b8431d71 100644 --- a/dev-docs/metrics.md +++ b/dev-docs/metrics.md @@ -136,7 +136,7 @@ Make sure to use `.with_metrics()` method on the async block to ensure that the // Multi-threaded runtime needs to use a tokio task local to avoid tests interfering with each other async { u64_counter!("test", "test description", 1, "attr" => "val"); - assert_counter!("test", 1, "attr" => "val"); + assert_counter!("test", 1, "attr" = "val"); } .with_metrics() .await; @@ -147,7 +147,7 @@ Make sure to use `.with_metrics()` method on the async block to ensure that the async { // It's a single threaded tokio runtime, so we can still use a thread local u64_counter!("test", "test description", 1, "attr" => "val"); - assert_counter!("test", 1, "attr" => "val"); + assert_counter!("test", 1, "attr" = "val"); } .with_metrics() .await; diff --git a/dockerfiles/tracing/datadog-subgraph/package-lock.json b/dockerfiles/tracing/datadog-subgraph/package-lock.json index 849552a1fd..b2e4649c45 100644 --- a/dockerfiles/tracing/datadog-subgraph/package-lock.json +++ b/dockerfiles/tracing/datadog-subgraph/package-lock.json @@ -16,7 +16,7 @@ "graphql": "^16.5.0" }, "devDependencies": { - "typescript": "5.5.3" + "typescript": "5.7.2" } }, "node_modules/@apollo/cache-control-types": { @@ -770,9 +770,10 @@ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, "node_modules/body-parser": { - "version": "1.20.2", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", - "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "license": "MIT", "dependencies": { "bytes": "3.1.2", "content-type": "~1.0.5", @@ -782,7 +783,7 @@ "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", + "qs": "6.13.0", "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" @@ -792,6 +793,21 @@ "npm": "1.2.8000 || >= 1.4.16" } }, + "node_modules/body-parser/node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/bytes": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", @@ -801,12 +817,19 @@ } }, "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "license": "MIT", "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -943,6 +966,23 @@ "ms": "2.0.0" } }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/delay": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/delay/-/delay-5.0.0.tgz", @@ -1000,6 +1040,27 @@ "node": ">= 0.8" } }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", @@ -1019,36 +1080,37 @@ "integrity": "sha512-8qz9nOz5VeD2z96elrEKD2U433+L3DWdUdDkOINLGOJvx1GsMBbMn0aCeu28y8/e85A6mCigBiFlYMnTBEGlSw==" }, "node_modules/express": { - "version": "4.19.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", - "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/express/-/express-4.20.0.tgz", + "integrity": "sha512-pLdae7I6QqShF5PnNTCVn4hI91Dx0Grkn2+IAsMTgMIKuQVte2dN9PeGSSAME2FR8anOhVA62QDIUaWVfEXVLw==", + "license": "MIT", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.2", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", "finalhandler": "1.2.0", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", "qs": "6.11.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.0", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", @@ -1059,6 +1121,54 @@ "node": ">= 0.10.0" } }, + "node_modules/express/node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/express/node_modules/send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/express/node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/finalhandler": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", @@ -1106,19 +1216,43 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/get-intrinsic": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", - "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "license": "MIT", "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", "has-proto": "^1.0.1", - "has-symbols": "^1.0.3" + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gopd": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.1.0.tgz", + "integrity": "sha512-FQoVQnqcdk4hVM4JN1eromaun4iuS34oStkdlLENLdpULsuQcTyXj8w7ayhuUfPwEYZ1ZOooOTT6fdA9Vmx/RA==", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -1133,21 +1267,26 @@ "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "license": "MIT", "dependencies": { - "function-bind": "^1.1.1" + "es-define-property": "^1.0.0" }, - "engines": { - "node": ">= 0.4.0" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.1.0.tgz", + "integrity": "sha512-QLdzI9IIO1Jg7f9GT1gXpPpXArAn6cS31R1eEZqz08Gc+uQ8/XiqHWt17Fiw+2p6oTTIq5GXEpQkAlA88YRl/Q==", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7" + }, "engines": { "node": ">= 0.4" }, @@ -1159,6 +1298,7 @@ "version": "1.0.3", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "license": "MIT", "engines": { "node": ">= 0.4" }, @@ -1166,6 +1306,18 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/http-errors": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", @@ -1325,9 +1477,13 @@ } }, "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, "node_modules/methods": { "version": "1.1.2", @@ -1447,9 +1603,13 @@ } }, "node_modules/object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "version": "1.13.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.3.tgz", + "integrity": "sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -1496,9 +1656,10 @@ } }, "node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==", + "license": "MIT" }, "node_modules/pprof-format": { "version": "2.1.0", @@ -1665,9 +1826,10 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "node_modules/serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.0.tgz", + "integrity": "sha512-pDLK8zwl2eKaYrs8mrPZBJua4hMplRWJ1tIFksVC3FtBEBnl8dxgeHtsaMS8DhS9i4fLObaon6ABoc4/hQGdPA==", + "license": "MIT", "dependencies": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", @@ -1678,6 +1840,23 @@ "node": ">= 0.8.0" } }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", @@ -1704,13 +1883,18 @@ } }, "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "license": "MIT", "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -1768,9 +1952,9 @@ } }, "node_modules/typescript": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.3.tgz", - "integrity": "sha512-/hreyEujaB0w76zKo6717l3L0o/qEUtRgdvUBvlkhoWeOVMjMuHNHk0BRBzikzuGDqNmPQbg5ifMEqsHLiIUcQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz", + "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==", "dev": true, "license": "Apache-2.0", "bin": { diff --git a/dockerfiles/tracing/datadog-subgraph/package.json b/dockerfiles/tracing/datadog-subgraph/package.json index 0d38223e9d..58aa9aa002 100644 --- a/dockerfiles/tracing/datadog-subgraph/package.json +++ b/dockerfiles/tracing/datadog-subgraph/package.json @@ -18,6 +18,6 @@ "graphql": "^16.5.0" }, "devDependencies": { - "typescript": "5.5.3" + "typescript": "5.7.2" } } diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index adb40c2e47..4eec369eb4 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.58.1 + image: ghcr.io/apollographql/router:v1.59.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 9ea992be09..72a2857397 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.58.1 + image: ghcr.io/apollographql/router:v1.59.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index fd60819066..0ebdfb9ce7 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.58.1 + image: ghcr.io/apollographql/router:v1.59.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml @@ -36,6 +36,6 @@ services: zipkin: container_name: zipkin - image: openzipkin/zipkin:3.0.6 + image: openzipkin/zipkin:3.4.3 ports: - 9411:9411 diff --git a/dockerfiles/tracing/jaeger-subgraph/package-lock.json b/dockerfiles/tracing/jaeger-subgraph/package-lock.json index 3687cb6aa4..6918784db6 100644 --- a/dockerfiles/tracing/jaeger-subgraph/package-lock.json +++ b/dockerfiles/tracing/jaeger-subgraph/package-lock.json @@ -18,7 +18,7 @@ "opentracing": "^0.14.7" }, "devDependencies": { - "typescript": "5.5.3" + "typescript": "5.7.2" } }, "node_modules/@apollo/cache-control-types": { @@ -610,9 +610,9 @@ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, "node_modules/body-parser": { - "version": "1.20.2", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", - "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", "dependencies": { "bytes": "3.1.2", "content-type": "~1.0.5", @@ -622,7 +622,7 @@ "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", + "qs": "6.13.0", "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" @@ -632,6 +632,20 @@ "npm": "1.2.8000 || >= 1.4.16" } }, + "node_modules/body-parser/node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/bufrw": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/bufrw/-/bufrw-1.3.0.tgz", @@ -655,12 +669,18 @@ } }, "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -729,6 +749,22 @@ "ms": "2.0.0" } }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -776,6 +812,25 @@ "xtend": "~4.0.0" } }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", @@ -790,36 +845,36 @@ } }, "node_modules/express": { - "version": "4.19.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", - "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/express/-/express-4.20.0.tgz", + "integrity": "sha512-pLdae7I6QqShF5PnNTCVn4hI91Dx0Grkn2+IAsMTgMIKuQVte2dN9PeGSSAME2FR8anOhVA62QDIUaWVfEXVLw==", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.2", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", "finalhandler": "1.2.0", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", "qs": "6.11.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.0", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", @@ -830,6 +885,50 @@ "node": ">= 0.10.0" } }, + "node_modules/express/node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/express/node_modules/send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/express/node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/finalhandler": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", @@ -877,19 +976,40 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/get-intrinsic": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", - "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", "has-proto": "^1.0.1", - "has-symbols": "^1.0.3" + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gopd": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.1.0.tgz", + "integrity": "sha512-FQoVQnqcdk4hVM4JN1eromaun4iuS34oStkdlLENLdpULsuQcTyXj8w7ayhuUfPwEYZ1ZOooOTT6fdA9Vmx/RA==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -917,21 +1037,24 @@ "graphql": "^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0" } }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dependencies": { - "function-bind": "^1.1.1" + "es-define-property": "^1.0.0" }, - "engines": { - "node": ">= 0.4.0" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.1.0.tgz", + "integrity": "sha512-QLdzI9IIO1Jg7f9GT1gXpPpXArAn6cS31R1eEZqz08Gc+uQ8/XiqHWt17Fiw+2p6oTTIq5GXEpQkAlA88YRl/Q==", + "dependencies": { + "call-bind": "^1.0.7" + }, "engines": { "node": ">= 0.4" }, @@ -950,6 +1073,17 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/hexer": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/hexer/-/hexer-1.5.0.tgz", @@ -1065,9 +1199,12 @@ } }, "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, "node_modules/methods": { "version": "1.1.2", @@ -1163,9 +1300,12 @@ } }, "node_modules/object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "version": "1.13.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.3.tgz", + "integrity": "sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==", + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -1198,9 +1338,9 @@ } }, "node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==" }, "node_modules/process": { "version": "0.10.1", @@ -1319,9 +1459,9 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "node_modules/serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.0.tgz", + "integrity": "sha512-pDLK8zwl2eKaYrs8mrPZBJua4hMplRWJ1tIFksVC3FtBEBnl8dxgeHtsaMS8DhS9i4fLObaon6ABoc4/hQGdPA==", "dependencies": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", @@ -1332,6 +1472,22 @@ "node": ">= 0.8.0" } }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", @@ -1350,13 +1506,17 @@ } }, "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -1430,9 +1590,9 @@ } }, "node_modules/typescript": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.3.tgz", - "integrity": "sha512-/hreyEujaB0w76zKo6717l3L0o/qEUtRgdvUBvlkhoWeOVMjMuHNHk0BRBzikzuGDqNmPQbg5ifMEqsHLiIUcQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz", + "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -1990,9 +2150,9 @@ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" }, "body-parser": { - "version": "1.20.2", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", - "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", "requires": { "bytes": "3.1.2", "content-type": "~1.0.5", @@ -2002,10 +2162,20 @@ "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", + "qs": "6.13.0", "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" + }, + "dependencies": { + "qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "requires": { + "side-channel": "^1.0.6" + } + } } }, "bufrw": { @@ -2025,12 +2195,15 @@ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==" }, "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" } }, "combined-stream": { @@ -2081,6 +2254,16 @@ "ms": "2.0.0" } }, + "define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + } + }, "delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -2115,6 +2298,19 @@ "xtend": "~4.0.0" } }, + "es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "requires": { + "get-intrinsic": "^1.2.4" + } + }, + "es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==" + }, "escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", @@ -2126,41 +2322,80 @@ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==" }, "express": { - "version": "4.19.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", - "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/express/-/express-4.20.0.tgz", + "integrity": "sha512-pLdae7I6QqShF5PnNTCVn4hI91Dx0Grkn2+IAsMTgMIKuQVte2dN9PeGSSAME2FR8anOhVA62QDIUaWVfEXVLw==", "requires": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.2", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", "finalhandler": "1.2.0", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", "qs": "6.11.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.0", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", "utils-merge": "1.0.1", "vary": "~1.1.2" + }, + "dependencies": { + "encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==" + }, + "ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "requires": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "dependencies": { + "encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==" + } + } + } } }, "finalhandler": { @@ -2198,19 +2433,28 @@ "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==" }, "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==" }, "get-intrinsic": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", - "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", "has-proto": "^1.0.1", - "has-symbols": "^1.0.3" + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + }, + "gopd": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.1.0.tgz", + "integrity": "sha512-FQoVQnqcdk4hVM4JN1eromaun4iuS34oStkdlLENLdpULsuQcTyXj8w7ayhuUfPwEYZ1ZOooOTT6fdA9Vmx/RA==", + "requires": { + "get-intrinsic": "^1.2.4" } }, "graphql": { @@ -2226,24 +2470,35 @@ "tslib": "^2.1.0" } }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "requires": { - "function-bind": "^1.1.1" + "es-define-property": "^1.0.0" } }, "has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.1.0.tgz", + "integrity": "sha512-QLdzI9IIO1Jg7f9GT1gXpPpXArAn6cS31R1eEZqz08Gc+uQ8/XiqHWt17Fiw+2p6oTTIq5GXEpQkAlA88YRl/Q==", + "requires": { + "call-bind": "^1.0.7" + } }, "has-symbols": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" }, + "hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "requires": { + "function-bind": "^1.1.2" + } + }, "hexer": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/hexer/-/hexer-1.5.0.tgz", @@ -2328,9 +2583,9 @@ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==" }, "merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==" }, "methods": { "version": "1.1.2", @@ -2394,9 +2649,9 @@ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==" }, "object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==" + "version": "1.13.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.3.tgz", + "integrity": "sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==" }, "on-finished": { "version": "2.4.1", @@ -2417,9 +2672,9 @@ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==" }, "path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==" }, "process": { "version": "0.10.1", @@ -2502,9 +2757,9 @@ } }, "serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.0.tgz", + "integrity": "sha512-pDLK8zwl2eKaYrs8mrPZBJua4hMplRWJ1tIFksVC3FtBEBnl8dxgeHtsaMS8DhS9i4fLObaon6ABoc4/hQGdPA==", "requires": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", @@ -2512,6 +2767,19 @@ "send": "0.18.0" } }, + "set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "requires": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + } + }, "setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", @@ -2527,13 +2795,14 @@ } }, "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" } }, "statuses": { @@ -2588,9 +2857,9 @@ } }, "typescript": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.3.tgz", - "integrity": "sha512-/hreyEujaB0w76zKo6717l3L0o/qEUtRgdvUBvlkhoWeOVMjMuHNHk0BRBzikzuGDqNmPQbg5ifMEqsHLiIUcQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz", + "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==", "dev": true }, "unpipe": { diff --git a/dockerfiles/tracing/jaeger-subgraph/package.json b/dockerfiles/tracing/jaeger-subgraph/package.json index 2be04d726e..c5c24dfa63 100644 --- a/dockerfiles/tracing/jaeger-subgraph/package.json +++ b/dockerfiles/tracing/jaeger-subgraph/package.json @@ -19,6 +19,6 @@ "opentracing": "^0.14.7" }, "devDependencies": { - "typescript": "5.5.3" + "typescript": "5.7.2" } } diff --git a/dockerfiles/tracing/zipkin-subgraph/package-lock.json b/dockerfiles/tracing/zipkin-subgraph/package-lock.json index dbfad73245..cba9a4fb28 100644 --- a/dockerfiles/tracing/zipkin-subgraph/package-lock.json +++ b/dockerfiles/tracing/zipkin-subgraph/package-lock.json @@ -19,7 +19,7 @@ "zipkin-javascript-opentracing": "^3.0.0" }, "devDependencies": { - "typescript": "5.5.3" + "typescript": "5.7.2" } }, "node_modules/@apollo/cache-control-types": { @@ -631,9 +631,9 @@ "peer": true }, "node_modules/body-parser": { - "version": "1.20.2", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", - "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", "dependencies": { "bytes": "3.1.2", "content-type": "~1.0.5", @@ -643,7 +643,7 @@ "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", + "qs": "6.13.0", "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" @@ -653,6 +653,20 @@ "npm": "1.2.8000 || >= 1.4.16" } }, + "node_modules/body-parser/node_modules/qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "dependencies": { + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/bufrw": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/bufrw/-/bufrw-1.3.0.tgz", @@ -676,12 +690,18 @@ } }, "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -750,6 +770,22 @@ "ms": "2.0.0" } }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -797,6 +833,25 @@ "xtend": "~4.0.0" } }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", @@ -811,36 +866,36 @@ } }, "node_modules/express": { - "version": "4.19.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", - "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/express/-/express-4.20.0.tgz", + "integrity": "sha512-pLdae7I6QqShF5PnNTCVn4hI91Dx0Grkn2+IAsMTgMIKuQVte2dN9PeGSSAME2FR8anOhVA62QDIUaWVfEXVLw==", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.2", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", "finalhandler": "1.2.0", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", "qs": "6.11.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.0", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", @@ -851,6 +906,50 @@ "node": ">= 0.10.0" } }, + "node_modules/express/node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/express/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/express/node_modules/send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/express/node_modules/send/node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/finalhandler": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz", @@ -898,19 +997,40 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/get-intrinsic": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", - "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", "has-proto": "^1.0.1", - "has-symbols": "^1.0.3" + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gopd": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.1.0.tgz", + "integrity": "sha512-FQoVQnqcdk4hVM4JN1eromaun4iuS34oStkdlLENLdpULsuQcTyXj8w7ayhuUfPwEYZ1ZOooOTT6fdA9Vmx/RA==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -938,21 +1058,24 @@ "graphql": "^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0" } }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dependencies": { - "function-bind": "^1.1.1" + "es-define-property": "^1.0.0" }, - "engines": { - "node": ">= 0.4.0" + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.1.0.tgz", + "integrity": "sha512-QLdzI9IIO1Jg7f9GT1gXpPpXArAn6cS31R1eEZqz08Gc+uQ8/XiqHWt17Fiw+2p6oTTIq5GXEpQkAlA88YRl/Q==", + "dependencies": { + "call-bind": "^1.0.7" + }, "engines": { "node": ">= 0.4" }, @@ -971,6 +1094,17 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/hexer": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/hexer/-/hexer-1.5.0.tgz", @@ -1092,9 +1226,12 @@ } }, "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } }, "node_modules/methods": { "version": "1.1.2", @@ -1190,9 +1327,12 @@ } }, "node_modules/object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "version": "1.13.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.3.tgz", + "integrity": "sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==", + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -1225,9 +1365,9 @@ } }, "node_modules/path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==" }, "node_modules/process": { "version": "0.10.1", @@ -1346,9 +1486,9 @@ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" }, "node_modules/serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.0.tgz", + "integrity": "sha512-pDLK8zwl2eKaYrs8mrPZBJua4hMplRWJ1tIFksVC3FtBEBnl8dxgeHtsaMS8DhS9i4fLObaon6ABoc4/hQGdPA==", "dependencies": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", @@ -1359,6 +1499,22 @@ "node": ">= 0.8.0" } }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", @@ -1377,13 +1533,17 @@ } }, "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -1457,9 +1617,9 @@ } }, "node_modules/typescript": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.3.tgz", - "integrity": "sha512-/hreyEujaB0w76zKo6717l3L0o/qEUtRgdvUBvlkhoWeOVMjMuHNHk0BRBzikzuGDqNmPQbg5ifMEqsHLiIUcQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz", + "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -2055,9 +2215,9 @@ "peer": true }, "body-parser": { - "version": "1.20.2", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", - "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", + "version": "1.20.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", + "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", "requires": { "bytes": "3.1.2", "content-type": "~1.0.5", @@ -2067,10 +2227,20 @@ "http-errors": "2.0.0", "iconv-lite": "0.4.24", "on-finished": "2.4.1", - "qs": "6.11.0", + "qs": "6.13.0", "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" + }, + "dependencies": { + "qs": { + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", + "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "requires": { + "side-channel": "^1.0.6" + } + } } }, "bufrw": { @@ -2090,12 +2260,15 @@ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==" }, "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" } }, "combined-stream": { @@ -2146,6 +2319,16 @@ "ms": "2.0.0" } }, + "define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + } + }, "delayed-stream": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", @@ -2180,6 +2363,19 @@ "xtend": "~4.0.0" } }, + "es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "requires": { + "get-intrinsic": "^1.2.4" + } + }, + "es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==" + }, "escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", @@ -2191,41 +2387,80 @@ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==" }, "express": { - "version": "4.19.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", - "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", + "version": "4.20.0", + "resolved": "https://registry.npmjs.org/express/-/express-4.20.0.tgz", + "integrity": "sha512-pLdae7I6QqShF5PnNTCVn4hI91Dx0Grkn2+IAsMTgMIKuQVte2dN9PeGSSAME2FR8anOhVA62QDIUaWVfEXVLw==", "requires": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.2", + "body-parser": "1.20.3", "content-disposition": "0.5.4", "content-type": "~1.0.4", "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", "finalhandler": "1.2.0", "fresh": "0.5.2", "http-errors": "2.0.0", - "merge-descriptors": "1.0.1", + "merge-descriptors": "1.0.3", "methods": "~1.1.2", "on-finished": "2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.7", + "path-to-regexp": "0.1.10", "proxy-addr": "~2.0.7", "qs": "6.11.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.18.0", - "serve-static": "1.15.0", + "send": "0.19.0", + "serve-static": "1.16.0", "setprototypeof": "1.2.0", "statuses": "2.0.1", "type-is": "~1.6.18", "utils-merge": "1.0.1", "vary": "~1.1.2" + }, + "dependencies": { + "encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==" + }, + "ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "send": { + "version": "0.19.0", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", + "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "requires": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "dependencies": { + "encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==" + } + } + } } }, "finalhandler": { @@ -2263,19 +2498,28 @@ "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==" }, "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==" }, "get-intrinsic": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", - "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", "has-proto": "^1.0.1", - "has-symbols": "^1.0.3" + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + }, + "gopd": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.1.0.tgz", + "integrity": "sha512-FQoVQnqcdk4hVM4JN1eromaun4iuS34oStkdlLENLdpULsuQcTyXj8w7ayhuUfPwEYZ1ZOooOTT6fdA9Vmx/RA==", + "requires": { + "get-intrinsic": "^1.2.4" } }, "graphql": { @@ -2291,24 +2535,35 @@ "tslib": "^2.1.0" } }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "requires": { - "function-bind": "^1.1.1" + "es-define-property": "^1.0.0" } }, "has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.1.0.tgz", + "integrity": "sha512-QLdzI9IIO1Jg7f9GT1gXpPpXArAn6cS31R1eEZqz08Gc+uQ8/XiqHWt17Fiw+2p6oTTIq5GXEpQkAlA88YRl/Q==", + "requires": { + "call-bind": "^1.0.7" + } }, "has-symbols": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" }, + "hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "requires": { + "function-bind": "^1.1.2" + } + }, "hexer": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/hexer/-/hexer-1.5.0.tgz", @@ -2399,9 +2654,9 @@ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==" }, "merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==" }, "methods": { "version": "1.1.2", @@ -2465,9 +2720,9 @@ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==" }, "object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==" + "version": "1.13.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.3.tgz", + "integrity": "sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA==" }, "on-finished": { "version": "2.4.1", @@ -2488,9 +2743,9 @@ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==" }, "path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ==" + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz", + "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==" }, "process": { "version": "0.10.1", @@ -2573,9 +2828,9 @@ } }, "serve-static": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz", - "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==", + "version": "1.16.0", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.0.tgz", + "integrity": "sha512-pDLK8zwl2eKaYrs8mrPZBJua4hMplRWJ1tIFksVC3FtBEBnl8dxgeHtsaMS8DhS9i4fLObaon6ABoc4/hQGdPA==", "requires": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", @@ -2583,6 +2838,19 @@ "send": "0.18.0" } }, + "set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "requires": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + } + }, "setprototypeof": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", @@ -2598,13 +2866,14 @@ } }, "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" } }, "statuses": { @@ -2659,9 +2928,9 @@ } }, "typescript": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.3.tgz", - "integrity": "sha512-/hreyEujaB0w76zKo6717l3L0o/qEUtRgdvUBvlkhoWeOVMjMuHNHk0BRBzikzuGDqNmPQbg5ifMEqsHLiIUcQ==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.7.2.tgz", + "integrity": "sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg==", "dev": true }, "unpipe": { diff --git a/dockerfiles/tracing/zipkin-subgraph/package.json b/dockerfiles/tracing/zipkin-subgraph/package.json index de7de6f96b..96688dcfa9 100644 --- a/dockerfiles/tracing/zipkin-subgraph/package.json +++ b/dockerfiles/tracing/zipkin-subgraph/package.json @@ -20,6 +20,6 @@ "zipkin-javascript-opentracing": "^3.0.0" }, "devDependencies": { - "typescript": "5.5.3" + "typescript": "5.7.2" } } diff --git a/docs/source/images/router/datadog-apm-ops-example.png b/docs/source/images/router/datadog-apm-ops-example.png new file mode 100644 index 0000000000..6d92a5e2d5 Binary files /dev/null and b/docs/source/images/router/datadog-apm-ops-example.png differ diff --git a/docs/source/reference/router/configuration.mdx b/docs/source/reference/router/configuration.mdx index fcb888892c..481ff80cb6 100644 --- a/docs/source/reference/router/configuration.mdx +++ b/docs/source/reference/router/configuration.mdx @@ -106,7 +106,7 @@ This reference lists and describes the options supported by the `router` binary. The [supergraph schema](/federation/federated-types/overview#supergraph-schema) of a router. Specified by absolute or relative path (`-s` / `--supergraph `, or `APOLLO_ROUTER_SUPERGRAPH_PATH`), or a comma-separated list of URLs (`APOLLO_ROUTER_SUPERGRAPH_URLS`). -> 💡 Avoid embedding tokens in `APOLLO_ROUTER_SUPERGRAPH_URLS` because the URLs may appear in log messages. +> 💡 Avoid embedding tokens in `APOLLO_ROUTER_SUPERGRAPH_URLS` because the URLs may appear in log messages. Setting this option disables polling from Apollo Uplink to fetch the latest supergraph schema. @@ -176,7 +176,7 @@ If set, a router runs in dev mode to help with local development. -If set, the router watches for changes to its configuration file and any supergraph file passed with `--supergraph` and reloads them automatically without downtime. This setting only affects local files provided to the router. The supergraph and configuration provided from GraphOS via Launches (and delivered via Uplink) are _always_ loaded automatically, regardless of this setting. +If set, the router watches for changes to its configuration file and any supergraph file passed with `--supergraph` and reloads them automatically without downtime. This setting only affects local files provided to the router. The supergraph and configuration provided from GraphOS via Launches (and delivered via Uplink) are _always_ loaded automatically, regardless of this setting. @@ -301,7 +301,6 @@ If set, the listen address of the router. - @@ -445,7 +444,7 @@ supergraph: supergraph: # The socket address and port to listen on. # Note that this must be quoted to avoid interpretation as an array in YAML. - listen: '[::1]:4000' + listen: "[::1]:4000" ``` #### Unix socket @@ -511,39 +510,39 @@ The router can serve any of the following landing pages to browsers that visit i - A basic landing page that displays an example query `curl` command (default) - ```yaml title="router.yaml" - # This is the default behavior. You don't need to include this config. - homepage: - enabled: true - ``` + ```yaml title="router.yaml" + # This is the default behavior. You don't need to include this config. + homepage: + enabled: true + ``` - _No_ landing page - ```yaml title="router.yaml" - homepage: - enabled: false - ``` + ```yaml title="router.yaml" + homepage: + enabled: false + ``` - [Apollo Sandbox](/graphos/explorer/sandbox), which enables you to explore your schema and compose operations against it using the Explorer - Note the additional configuration required to use Sandbox: + Note the additional configuration required to use Sandbox: - ```yaml title="router.yaml" - sandbox: - enabled: true + ```yaml title="router.yaml" + sandbox: + enabled: true - # Sandbox uses introspection to obtain your router's schema. - supergraph: - introspection: true + # Sandbox uses introspection to obtain your router's schema. + supergraph: + introspection: true - # Sandbox requires the default landing page to be disabled. - homepage: - enabled: false - ``` + # Sandbox requires the default landing page to be disabled. + homepage: + enabled: false + ``` - **Do not enable Sandbox in production.** Sandbox requires enabling introspection, which is strongly discouraged in production environments. + **Do not enable Sandbox in production.** Sandbox requires enabling introspection, which is strongly discouraged in production environments. @@ -559,7 +558,7 @@ override_subgraph_url: accounts: "${env.ACCOUNTS_SUBGRAPH_HOST_URL}" ``` -In this example, the `organizations` subgraph URL is overridden to point to `http://localhost:8080`, and the `accounts` subgraph URL is overridden to point to a new URL using [variable expansion](#variable-expansion). The URL specified in the supergraph schema is ignored. +In this example, the `organizations` subgraph URL is overridden to point to `http://localhost:8080`, and the `accounts` subgraph URL is overridden to point to a new URL using [variable expansion](#variable-expansion). The URL specified in the supergraph schema is ignored. Any subgraphs that are _omitted_ from `override_subgraph_url` continue to use the routing URL specified in the supergraph schema. @@ -575,7 +574,8 @@ By default, the router stores the following data in its in-memory cache to impro You can configure certain caching behaviors for generated query plans and APQ (but not introspection responses). For details, see [In-Memory Caching in the router](/router/configuration/in-memory-caching/). -**If you have a GraphOS Enterprise plan:** +**If you have a GraphOS Enterprise plan:** + - You can configure a Redis-backed _distributed_ cache that enables multiple router instances to share cached values. For details, see [Distributed caching in the GraphOS Router](/router/configuration/distributed-caching/). - You can configure a Redis-backed _entity_ cache that enables a client query to retrieve cached entity data split between subgraph reponses. For details, see [Subgraph entity caching in the GraphOS Router](/router/configuration/entity-caching/). @@ -589,17 +589,12 @@ Starting with v1.49.0, the router can run a Rust-native query planner. This nati -Starting with v1.57.0, to run the most performant and resource-efficient native query planner and to disable the V8 JavaScript runtime in the router, set the following options in your `router.yaml`: - -```yaml title="router.yaml" -experimental_query_planner_mode: new -``` +Starting with v1.59.0, the native query planner is GA and is run by default. -You can also improve throughput by reducing the size of queries sent to subgraphs with the following option: +If you need to run the deprecated JavaScript-based implementation, configure your router's query planner mode to `legacy`: ```yaml title="router.yaml" -supergraph: - generate_query_fragments: true +experimental_query_planner_mode: legacy ``` @@ -612,8 +607,7 @@ Learn more in [Native Query Planner](/router/executing-operations/native-query-p - + You can improve the performance of the router's query planner by configuring parallelized query planning. @@ -624,12 +618,13 @@ To resolve such blocking scenarios, you can enable parallel query planning. Conf ```yaml title="router.yaml" supergraph: query_planning: - experimental_parallelism: auto # number of available cpus + experimental_parallelism: auto # number of available cpus ``` -The value of `experimental_parallelism` is the number of query planners in the router's _query planner pool_. A query planner pool is a preallocated set of query planners from which the router can use to plan operations. The total number of pools is the maximum number of query planners that can run in parallel and therefore the maximum number of operations that can be worked on simultaneously. +The value of `experimental_parallelism` is the number of query planners in the router's _query planner pool_. A query planner pool is a preallocated set of query planners from which the router can use to plan operations. The total number of pools is the maximum number of query planners that can run in parallel and therefore the maximum number of operations that can be worked on simultaneously. Valid values of `experimental_parallelism`: + - Any integer starting from `1` - The special value `auto`, which sets the number of query planners equal to the number of available CPUs on the router's host machine @@ -661,7 +656,7 @@ It also includes other improvements that make it more likely that two operations Configure enhanced operation signature normalization in `router.yaml` with the `telemetry.apollo.signature_normalization_algorithm` option: ```yaml title="router.yaml" -telemetry: +telemetry: apollo: signature_normalization_algorithm: enhanced # Default is legacy ``` @@ -678,14 +673,14 @@ Given the following example operation: ```graphql showLineNumbers=false query InlineInputTypeQuery { inputTypeQuery( - input: { - inputString: "foo", - inputInt: 42, - inputBoolean: null, - nestedType: { someFloat: 4.2 }, - enumInput: SOME_VALUE_1, - nestedTypeList: [ { someFloat: 4.2, someNullableFloat: null } ], - listInput: [1, 2, 3] + input: { + inputString: "foo" + inputInt: 42 + inputBoolean: null + nestedType: { someFloat: 4.2 } + enumInput: SOME_VALUE_1 + nestedTypeList: [{ someFloat: 4.2, someNullableFloat: null }] + listInput: [1, 2, 3] } ) { enumResponse @@ -709,16 +704,16 @@ The enhanced normalization algorithm generates the following signature: query InlineInputTypeQuery { inputTypeQuery( input: { - inputString: "", - inputInt: 0, - inputBoolean: null, - nestedType: {someFloat: 0}, - enumInput: SOME_VALUE_1, - nestedTypeList: [{someFloat: 0, someNullableFloat: null}], + inputString: "" + inputInt: 0 + inputBoolean: null + nestedType: { someFloat: 0 } + enumInput: SOME_VALUE_1 + nestedTypeList: [{ someFloat: 0, someNullableFloat: null }] listInput: [] } ) { - enumResponse + enumResponse } } ``` @@ -821,7 +816,6 @@ The router supports extended reference reporting in the following versions: - You can configure the router to report enum and input object references for enhanced insights and operation checks. @@ -831,10 +825,11 @@ Legacy reporting can also cause [inaccurate operation checks](#enhanced-operatio Configure extended reference reporting in `router.yaml` with the `telemetry.apollo.metrics_reference_mode` option like so: ```yaml title="router.yaml" -telemetry: +telemetry: apollo: metrics_reference_mode: extended # Default is legacy ``` + #### Configuration effect timing Once you configure extended reference reporting, you can view enum value and input field usage alongside object [field usage in GraphOS Studio](/graphos/metrics/field-usage) for all subsequent operations. @@ -883,6 +878,7 @@ Thanks to extended reference reporting, operation checks can more accurately fla Changing or removing default values for input object fields is considered a breaking change. + You can [configure checks to ignore default values changes](/graphos/platform/schema-management/checks#ignored-conditions-settings). @@ -891,6 +887,7 @@ You can [configure checks to ignore default values changes](/graphos/platform/sc ##### Nullable input object field removal + Removing a nullable input object field is always considered a breaking change. Removing a nullable input object field is only considered a breaking change if the nullable field is present in historical operations. If the nullable field is always omitted in historical operations, its removal isn't considered a breaking change. @@ -904,10 +901,10 @@ You can [configure checks to ignore default values changes](/graphos/platform/sc Changing a nullable input object field to non-nullable is considered a breaking change. Changing a nullable input object field to non-nullable is only considered a breaking change if the field had a null value in historical operations. If the field was always a non-null value in historical operations, changing it to non-nullable isn't considered a breaking change. + - You won't see an immediate change in checks behavior when you first turn on extended reference reporting. @@ -982,28 +979,31 @@ TLS support is configured in the `tls` section, under the `supergraph` key for t The list of supported TLS versions and algorithms is static, it cannot be configured. Supported TLS versions: -* TLS 1.2 -* TLS 1.3 + +- TLS 1.2 +- TLS 1.3 Supported cipher suites: -* TLS13_AES_256_GCM_SHA384 -* TLS13_AES_128_GCM_SHA256 -* TLS13_CHACHA20_POLY1305_SHA256 -* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 -* TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 -* TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 -* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 -* TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 -* TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + +- TLS13_AES_256_GCM_SHA384 +- TLS13_AES_128_GCM_SHA256 +- TLS13_CHACHA20_POLY1305_SHA256 +- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 +- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 Supported key exchange groups: -* X25519 -* SECP256R1 -* SECP384R1 + +- X25519 +- SECP256R1 +- SECP384R1 #### TLS termination -Clients can connect to the router directly over HTTPS, without terminating TLS in an intermediary. You can configure this in the `tls` configuration section: +Clients can connect to the router directly over HTTPS, without terminating TLS in an intermediary. You can configure this in the `tls` configuration section: ```yaml tls: @@ -1013,7 +1013,7 @@ tls: key: ${file./path/to/key.pem} ``` -To set the file paths in your configuration with Unix-style expansion, you can follow the examples in the [variable expansion](#variable-expansion) guide. +To set the file paths in your configuration with Unix-style expansion, you can follow the examples in the [variable expansion](#variable-expansion) guide. The router expects the file referenced in the `certificate_chain` value to be a combination of several PEM certificates concatenated together into a single file (as is commonplace with Apache TLS configuration). @@ -1092,7 +1092,7 @@ apq: router: cache: redis: - urls: [ "rediss://redis.example.com:6379" ] + urls: ["rediss://redis.example.com:6379"] #highlight-start tls: certificate_authorities: ${file./path/to/ca.crt} @@ -1162,6 +1162,7 @@ Limit the maximum buffer size for the HTTP1 connection. Default is ~400kib. Note for Rust Crate Users: If you are using the Router as a Rust crate, the `http1_request_max_buf_size` option requires the `hyper_header_limits` feature and also necessitates using Apollo's fork of the Hyper crate until the [changes are merged upstream](https://github.com/hyperium/hyper/pull/3523). You can include this fork by adding the following patch to your Cargo.toml file: + ```toml [patch.crates-io] "hyper" = { git = "https://github.com/apollographql/hyper.git", tag = "header-customizations-20241108" } @@ -1185,26 +1186,29 @@ In the example below, the `GetProducts` operation has a recursion of three, and ```graphql query GetProducts { - allProducts { #1 + allProducts { + #1 ...productVariation - delivery { #2 + delivery { + #2 fastestDelivery #3 } } } fragment ProductVariation on Product { - variation { #1 + variation { + #1 name #2 } } ``` -Note that the router calculates the recursion depth for each operation and fragment _separately_. Even if a fragment is included in an operation, that fragment's recursion depth does not contribute to the _operation's_ recursion depth. +Note that the router calculates the recursion depth for each operation and fragment _separately_. Even if a fragment is included in an operation, that fragment's recursion depth does not contribute to the _operation's_ recursion depth. ### Demand control -See [Demand Control](/router/executing-operations/demand-control) to learn how to analyze the cost of operations and to reject requests with operations that exceed customizable cost limits. +See [Demand Control](/router/executing-operations/demand-control) to learn how to analyze the cost of operations and to reject requests with operations that exceed customizable cost limits. ### Early cancel @@ -1223,7 +1227,6 @@ supergraph: experimental_log_on_broken_pipe: true ``` - ### Plugins You can customize the router's behavior with [plugins](/router/customizations/overview). Each plugin can have its own section in the configuration file with arbitrary values: @@ -1251,6 +1254,7 @@ The router uses Unix-style expansion. Here are some examples: Variable expansions are valid only for YAML _values_, not keys: + ```yaml supergraph: listen: "${env.MY_LISTEN_ADDRESS}" #highlight-line @@ -1259,37 +1263,38 @@ example: ``` - -### Fragment generation and reuse + + +### Automatic fragment generation By default, the router compresses subgraph requests by generating fragment definitions based on the shape of the subgraph operation. In many cases this significantly reduces the size of the query sent to subgraphs. -The router also supports an experimental algorithm that attempts to reuse fragments -from the original operation while forming subgraph requests. This experimental feature -used to be enabled by default, but is still available to support subgraphs that rely -on the specific shape of fragments in an operation: +You can explicitly opt-out of this behavior by specifying: ```yaml supergraph: generate_query_fragments: false - experimental_reuse_query_fragments: true ``` -Note that `generate_query_fragments` and `experimental_reuse_query_fragments` are -mutually exclusive; if both are explicitly set to `true`, `generate_query_fragments` -will take precedence. - -In the future, the `generate_query_fragments` option will be the only option for handling fragments. - - +The legacy query planner still supports an experimental algorithm that attempts to +reuse fragments from the original operation while forming subgraph requests. The +legacy query planner has to be explicitly enabled. This experimental feature used to +be enabled by default, but is still available to support subgraphs that rely on the +specific shape of fragments in an operation: - +```yaml +supergraph: + generate_query_fragments: false + experimental_reuse_query_fragments: true +``` -In the future, the `generate_query_fragments` option will be the only option for handling fragments. +Note that `generate_query_fragments` and `experimental_reuse_query_fragments` are +mutually exclusive; if both are explicitly set to `true`, `generate_query_fragments` +will take precedence. @@ -1338,8 +1343,8 @@ New releases of the router might introduce breaking changes to the [YAML config 1. The router emits a warning on startup. 2. The router attempts to translate your provided configuration to the new expected format. - - If the translation succeeds without errors, the router starts up as usual. - - If the translation fails, the router terminates. + - If the translation succeeds without errors, the router starts up as usual. + - If the translation fails, the router terminates. If you encounter this warning, you can use the `router config upgrade` command to see the new expected format for your existing configuration file: @@ -1355,4 +1360,4 @@ You can also view a diff of exactly which changes are necessary to upgrade your ## Related topics -* [Checklist for configuring the router for production](/technotes/TN0008-production-readiness-checklist/#apollo-router) +- [Checklist for configuring the router for production](/technotes/TN0008-production-readiness-checklist/#apollo-router) diff --git a/docs/source/reference/router/telemetry/instrumentation/selectors.mdx b/docs/source/reference/router/telemetry/instrumentation/selectors.mdx index 580ed124ca..828ee39b65 100644 --- a/docs/source/reference/router/telemetry/instrumentation/selectors.mdx +++ b/docs/source/reference/router/telemetry/instrumentation/selectors.mdx @@ -84,11 +84,13 @@ The subgraph service executes multiple times during query execution, with each e | `subgraph_request_header` | Yes | | The name of a subgraph request header | | `subgraph_response_header` | Yes | | The name of a subgraph response header | | `subgraph_response_status` | Yes | `code`\|`reason` | The status of a subgraph response | -| `subgraph_on_graphql_error` | No | `true`\|`false` | Boolean set to true if the subgraph response payload contains a GraphQL error | +| `subgraph_on_graphql_error` | No | `true`\|`false` | Boolean set to true if the subgraph response payload contains a GraphQL error | | `supergraph_operation_name` | Yes | `string`\|`hash` | The operation name from the supergraph query | | `supergraph_operation_kind` | Yes | `string` | The operation kind from the supergraph query | | `supergraph_query` | Yes | `string` | The graphql query to the supergraph | | `supergraph_query_variable` | Yes | | The name of a supergraph query variable | +| `supergraph_request_header` | Yes | | The name of a supergraph request header | +| `subgraph_resend_count` | Yes | `true`\|`false` | Number of retries for an http request to a subgraph | | `request_context` | Yes | | The name of a request context key | | `response_context` | Yes | | The name of a response context key | | `baggage` | Yes | | The name of a baggage item | diff --git a/docs/source/reference/router/telemetry/instrumentation/standard-attributes.mdx b/docs/source/reference/router/telemetry/instrumentation/standard-attributes.mdx index b1440ca115..eb264481e3 100644 --- a/docs/source/reference/router/telemetry/instrumentation/standard-attributes.mdx +++ b/docs/source/reference/router/telemetry/instrumentation/standard-attributes.mdx @@ -108,3 +108,4 @@ Standard attributes of the `subgraph` service: | `subgraph.graphql.operation.name` | | The operation name from the subgraph query (need `spec_compliant` [mode](/router/configuration/telemetry/instrumentation/spans/#mode) to disable it) | | `subgraph.graphql.operation.type` | `query`\|`mutation`\|`subscription` | The operation kind from the subgraph query | | `subgraph.graphql.document` | | The GraphQL query to the subgraph (need `spec_compliant` [mode](/router/configuration/telemetry/instrumentation/spans/#mode) to disable it) | +| `http.request.resend_count` | `true`\|`false` | Number of retries for an http request to a subgraph | diff --git a/docs/source/reference/router/telemetry/instrumentation/standard-instruments.mdx b/docs/source/reference/router/telemetry/instrumentation/standard-instruments.mdx index da9b655036..499e6cc043 100644 --- a/docs/source/reference/router/telemetry/instrumentation/standard-instruments.mdx +++ b/docs/source/reference/router/telemetry/instrumentation/standard-instruments.mdx @@ -16,14 +16,10 @@ These instruments can be consumed by configuring a [metrics exporter](/router/co - `apollo_router_http_request_duration_seconds_bucket` - HTTP subgraph request duration, attributes: - `subgraph`: (Optional) The subgraph being queried - `apollo_router_http_requests_total` - Total number of HTTP requests by HTTP status -- `apollo_router_timeout` - Number of triggered timeouts -- `apollo_router_http_request_retry_total` - Number of subgraph requests retried, attributes: - - `subgraph`: The subgraph being queried - - `status` : If the retry was aborted (`aborted`) ### GraphQL -- `apollo_router_graphql_error` - counts GraphQL errors in responses, attributes: +- `apollo.router.graphql_error` - counts GraphQL errors in responses, attributes: - `code`: error code ### Session @@ -125,4 +121,10 @@ The initial call to Uplink during router startup is not reflected in metrics. The following metrics have been deprecated and should not be used. -- `apollo_router_span` - **Deprecated**ā€”use `apollo_router_processing_time` instead. +- `apollo_router_span` - **Deprecated**: use `apollo_router_processing_time` instead. +- `apollo_router_deduplicated_subscriptions_total` - **Deprecated**: use the `apollo.router.operations.subscriptions` metric's `subscriptions.deduplicated` attribute. +- `apollo_authentication_failure_count` - **Deprecated**: use the `apollo.router.operations.authentication.jwt` metric's `authentication.jwt.failed` attribute. +- `apollo_authentication_success_count` - **Deprecated**: use the `apollo.router.operations.authentication.jwt` metric instead. If the `authentication.jwt.failed` attribute is *absent* or `false`, the authentication succeeded. +- `apollo_require_authentication_failure_count` - **Deprecated**: use the `http.server.request.duration` metric's `http.response.status_code` attribute. Requests with authentication failures have HTTP status code 401. +- `apollo_router_timeout` - **Deprecated**: this metric conflates timed-out requests from client to the router, and requests from the router to subgraphs. Timed-out requests have HTTP status code 504. Use the `http.response.status_code` attribute on the `http.server.request.duration` metric to identify timed-out router requests, and the same attribute on the `http.client.request.duration` metric to identify timed-out subgraph requests. +- `apollo_router_http_request_retry_total` **Deprecated**: use the `http.client.request.duration` metric's `http.request.resend_count` attribute. Requests with retries will contains `http.request.resend_count` set with the number of retries. \ No newline at end of file diff --git a/docs/source/reference/router/telemetry/metrics-exporters/datadog.mdx b/docs/source/reference/router/telemetry/metrics-exporters/datadog.mdx index 4aa7850a76..4526ab38d4 100644 --- a/docs/source/reference/router/telemetry/metrics-exporters/datadog.mdx +++ b/docs/source/reference/router/telemetry/metrics-exporters/datadog.mdx @@ -8,27 +8,13 @@ Enable and configure the [OTLP exporter](/router/configuration/telemetry/exporte For general tracing configuration, refer to [Router Metrics Configuration](/router/configuration/telemetry/exporters/metrics/overview). -## Datadog configuration +## Configuration -To export metrics to Datadog, you must both: - -- Configure the Datadog agent to accept OpenTelemetry Protocol (OTLP) metrics, and -- Configure the router to send traces to the Datadog agent. - -### Datadog agent configuration - -To configure the Datadog agent, add OTLP configuration (`otlp_config`) to your `datadog.yaml`. For example: - -```yaml title="datadog.yaml" -otlp_config: - receiver: - protocols: - grpc: - endpoint: :4317 -``` +To export metrics to Datadog, you must configure both the router to send traces to the Datadog agent and the Datadog agent to accept OpenTelemetry Protocol (OTLP) metrics. ### Router configuration -To configure the router, enable the [OTLP exporter](/router/configuration/telemetry/exporters/metrics/otlp#configuration) and set both `temporality: delta` and `endpoint: `. For example: + +You should enable the [OTLP exporter](/router/configuration/telemetry/exporters/metrics/otlp#configuration) and set both `temporality: delta` and `endpoint: `. For example: ```yaml title="router.yaml" telemetry: @@ -44,9 +30,20 @@ telemetry: -**You must set `temporality: delta`**, otherwise the router generates incorrect metrics. +You must set `temporality: delta`, otherwise the router generates incorrect metrics. -For more details about Datadog configuration, see [Datadog's docs on Agent configuration](https://docs.datadoghq.com/opentelemetry/otlp_ingest_in_the_agent/?tab=host). +### Datadog agent configuration + +To configure the Datadog agent, add OTLP configuration (`otlp_config`) to your `datadog.yaml`. For example: +```yaml title="datadog.yaml" +otlp_config: + receiver: + protocols: + grpc: + endpoint: :4317 +``` + +For more details about Datadog configuration, see [Datadog's docs on Agent configuration](https://docs.datadoghq.com/opentelemetry/otlp_ingest_in_the_agent/?tab=host). diff --git a/docs/source/reference/router/telemetry/trace-exporters/datadog.mdx b/docs/source/reference/router/telemetry/trace-exporters/datadog.mdx index e1b105d338..23cd378332 100644 --- a/docs/source/reference/router/telemetry/trace-exporters/datadog.mdx +++ b/docs/source/reference/router/telemetry/trace-exporters/datadog.mdx @@ -10,6 +10,52 @@ Enable and configure the [Datadog](https://www.datadoghq.com/) exporter for trac For general tracing configuration, refer to [Router Tracing Configuration](/router/configuration/telemetry/exporters/tracing/overview). +## Attributes for Datadog APM UI + +The router should set attributes that Datadog uses to organize its APM view and other UI: + +- `otel.name`: span name that's fixed for Datadog +- `resource.name`: Datadog resource name that's displayed in traces +- `operation.name`: Datadog operation name that populates a dropdown menu in the Datadog service page + +You should add these attributes to your `router.yaml` configuration file. The example below sets these attributes for the `router`, `supergraph`, and `subgraph` stages of the router's request lifecycle: + +```yaml title="router.yaml" +telemetry: + instrumentation: + spans: + mode: spec_compliant + router: + attributes: + otel.name: router + operation.name: "router" + resource.name: + request_method: true + + supergraph: + attributes: + otel.name: supergraph + operation.name: "supergraph" + resource.name: + operation_name: string + + subgraph: + attributes: + otel.name: subgraph + operation.name: "subgraph" + resource.name: + subgraph_operation_name: string +``` + +Consequently you can filter for these operations in Datadog APM: + +Datadog APM showing operations set with example attributes set in router.yaml + ## OTLP configuration To export traces to Datadog via OTLP, you must do the following: diff --git a/docs/source/routing/about-router.mdx b/docs/source/routing/about-router.mdx index 47c71d0de6..b1bbaf2caf 100644 --- a/docs/source/routing/about-router.mdx +++ b/docs/source/routing/about-router.mdx @@ -1,34 +1,38 @@ --- title: Supergraph Routing with GraphOS Router -subtitle: Learn the basics about router features and deployment types +subtitle: Learn the basics about router features and deployment types description: Apollo provides cloud and self-hosted GraphOS Router options. The router acts as an entry point to your GraphQL APIs and provides a unified interface for clients to interact with. redirectFrom: - - /graphos/routing + - /graphos/routing --- ## What is GraphOS Router? GraphOS Router is the runtime of the GraphOS platform. It executes client operations by planning and executing subgraph queries, then merging them into client responses. It's also the single entry point and gateway to your federated GraphQL API. - - + + -### Runtime of GraphOS platform +### Runtime of GraphOS platform As the runtime of the [GraphOS platform](/graphos/get-started/concepts/graphos), a GraphOS Router gets the supergraph schemaā€”the blueprint of the federated graphsā€”from the GraphOS control plane. It then executes incoming clients operations based on that schema. Unlike API gateways that offer capabilities to manage API endpoints, the router isn't based on URLs or REST endpoints. Rather, the router is a GraphQL-native solution for handling client APIs. -### Subgraph query planner +### Subgraph query planner Whenever your router receives an incoming GraphQL operation, it needs to figure out how to use your subgraphs to populate data for each of that operation's fields. To do this, the router generates a _query plan_: - - + + A query plan is a blueprint for dividing a single incoming operation into one or more operations that are each resolvable by a single subgraph. Some of these operations depend on the results of other operations, so the query plan also defines any required ordering for their execution. The router's query planner determines the optimal set of subgraph queries for each client operation, then it merges the subgraph responses into a single response for the client. You can use the following tools for inspecting query plans: + - Use the [Explorer IDE](/graphos/platform/explorer/) to view dynamically calculated example query plans for your operations in its right-hand panel. - Use the [Apollo Solutions command line utility](https://github.com/apollosolutions/generate-query-plan) for generating a query plan locally. @@ -46,8 +50,11 @@ As the entry point to your supergraph, a GraphOS Router must be able to process You can choose for Apollo to provision and manage the runtime infrastructure for your routers. Apollo hosts and deploys each instance of router in the cloud. Each _cloud-hosted router_ instance is fully integrated and configurable within GraphOS. - - + + @@ -59,8 +66,11 @@ While cloud routers are hosted in the cloud, GraphQL subgraph servers are still You can choose to manage the runtime infrastructure for your routers by yourself. Using container images of router, you can host and deploy your router instances from your own infrastructure. These _self-hosted router_ instances allow you full control over their deployment. - - + + ### Common router core @@ -110,11 +120,12 @@ Apollo offers the following router options, in increasing order of configurabili You host and manage the router on your own infrastructure. Highly configurable and customizable, including all options for Cloud - Dedicated routers and additional [customization options](/graphos/routing/customization/overview). + Dedicated routers and additional [customization + options](/graphos/routing/customization/overview). - The Apollo Router Core is available as a free and source-available router. - Connecting your self-hosted router to GraphOS requires an{' '} + The Apollo Router Core is available as a free and source-available + router. Connecting your self-hosted router to GraphOS requires an{" "} Enterprise plan. @@ -123,7 +134,11 @@ Apollo offers the following router options, in increasing order of configurabili -**We've paused new sign-ups for Serverless and Dedicated plans while we improve our offerings based on user feedback. This means cloud routing is temporarily unavailable to new users. In the meantime, you can explore other GraphOS features with a [free trial](https://studio.apollographql.com/signup?referrer=docs-content). +**We've paused new sign-ups for Serverless and Dedicated plans while +we improve our offerings based on user feedback. This means cloud routing is +temporarily unavailable to new users. In the meantime, you can explore other +GraphOS features with a [free +trial](https://studio.apollographql.com/signup?referrer=docs-content). @@ -133,7 +148,6 @@ Although powered by the source-available Apollo Router Core binary, GraphOS Rout Cloud-hosted routers automatically have access to additional GraphOS Router features, while self-hosted routers must be authenticated with a GraphOS Enterprise license to gain access to these features. Refer to the [pricing page](https://www.apollographql.com/pricing#graphos-router) to compare GraphOS Router features across plan types. - ## Next steps - Learn more about Apollo-managed routers in [cloud-hosted router](/graphos/routing/cloud/) @@ -146,4 +160,3 @@ Cloud-hosted routers automatically have access to additional GraphOS Router feat - To learn more about the intricacies of query plans, see the [example graph](/graphos/reference/federation/query-plans#example-graph) and [query plan](/graphos/reference/federation/query-plans#example-graph) in reference docs -- For the most performant query planning, configure and use the [Rust-native query planner](/graphos/routing/query-planning/native-query-planner). diff --git a/docs/source/routing/observability/client-awareness.mdx b/docs/source/routing/observability/client-awareness.mdx deleted file mode 100644 index f7660f85a8..0000000000 --- a/docs/source/routing/observability/client-awareness.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Client Awareness -subtitle: Configure client awareness in the router -description: Configure client awareness in the Apollo GraphOS Router or Apollo Router Core to separate the metrics and operations per client. ---- - -import { Link } from "gatsby"; - -The GraphOS Router and Apollo Router Core support [client awareness](/graphos/metrics/client-awareness/) by default. If the client sets the headers `apollographql-client-name` and `apollographql-client-version` in its HTTP requests, GraphOS Studio can separate the metrics and operations per client. - -## Overriding client awareness headers - -Different header names can be used by updating the configuration file. If those headers will be sent by a browser, they must be allowed in the [CORS (Cross Origin Resource Sharing) configuration](/router/configuration/cors), as follows: - -```yaml title="router.yaml" -telemetry: - apollo: - # defaults to apollographql-client-name - client_name_header: MyClientHeaderName - # defaults to apollographql-client-version - client_version_header: MyClientHeaderVersion -cors: - # The headers to allow. - # (Defaults to [ Content-Type ], which is required for GraphOS Studio) - allow_headers: [ Content-Type, MyClientHeaderName, MyClientHeaderVersion] -``` diff --git a/docs/source/routing/observability/client-id-enforcement.mdx b/docs/source/routing/observability/client-id-enforcement.mdx index 474405547e..15d63fc691 100644 --- a/docs/source/routing/observability/client-id-enforcement.mdx +++ b/docs/source/routing/observability/client-id-enforcement.mdx @@ -1,5 +1,5 @@ --- -title: Client ID Enforcement +title: Client Awareness and Enforcement subtitle: Require client details and operation names to help monitor schema usage description: Improve GraphQL operation monitoring by tagging operations with with client details. See code examples for Apollo GraphOS Router and Apollo Server. published: 2022-05-31 @@ -7,19 +7,61 @@ id: TN0001 tags: [server, observability, router] redirectFrom: - /technotes/TN0001-client-id-enforcement/ + - /graphos/routing/observability/client-awareness --- -As part of GraphOS Studio metrics reporting, servers can [tag reported operations with the requesting client's name and version](/graphos/metrics/client-awareness). This helps graph maintainers understand which clients are using which fields in the schema. +Metrics about GraphQL schema usage are more insightful when information about clients using the schema is available. Understanding client usage can help you reshape your schema to serve clients more efficiently. +As part of GraphOS Studio metrics reporting, servers can [tag reported operations with the requesting client's name and version](/graphos/metrics/client-awareness). +This **client awareness** helps graph maintainers understand which clients are using which fields in the schema. -Clients can (and should) also [name their GraphQL operations](/react/data/operation-best-practices/#name-all-operations), which provides more context around how and where data is being used. +Apollo's GraphOS Router and Apollo Server can enable client awareness by requiring metadata about requesting clients. +The router supports client awareness by default. If the client sets its name and version with the headers `apollographql-client-name` and `apollographql-client-version` in its HTTP requests, GraphOS Studio can separate the metrics and operations per client. -Together, these pieces of information help teams monitor their graph and make changes to it safely. We strongly encourage that your GraphQL gateway require client details and operation names from all requesting clients. + + +The client name is also used by the persisted queries feature. + + + + +Clients should [name their GraphQL operations](/react/data/operation-best-practices/#name-all-operations) to provide more context around how and where data is being used. + +## Why enforce client reporting? + +Client metadata enables better insights into schema usage, such as: + +- **Identifying which clients use which fields**: This facilitates usage monitoring and safe deprecation of fields. +- **Understanding traffic patterns**: This helps optimize schema design based on real-world client behavior. +- **Improving operation-level observability**: This provides details for debugging and performance improvements. + +Apollo strongly recommends requiring client name, client version, and operation names in all incoming GraphQL requests. ## Enforcing in GraphOS Router -The GraphOS Router supports client awareness by default if the client sets the `apollographql-client-name` and `apollographql-client-id` in their requests. These values can be overridden using the [router configuration file](/router/managed-federation/client-awareness/) directly. +The GraphOS Router supports client awareness by default if the client sets the `apollographql-client-name` and `apollographql-client-id` in their requests. +These values can be overridden using the [router configuration file](/router/managed-federation/client-awareness/) directly. +You can use a Rhai script to _enforce_ that clients include metadata. + +### Customizing client awareness headers + +If headers with customized names need to be sent by a browser, they must be allowed in the [CORS (Cross Origin Resource Sharing) configuration](/router/configuration/cors), as follows: + +```yaml title="router.yaml" +telemetry: + apollo: + # defaults to apollographql-client-name + client_name_header: MyClientHeaderName + # defaults to apollographql-client-version + client_version_header: MyClientHeaderVersion +cors: + # The headers to allow. + # (Defaults to [ Content-Type ], which is required for GraphOS Studio) + allow_headers: [ Content-Type, MyClientHeaderName, MyClientHeaderVersion] +``` + +### Enforcing via Rhai script -Client headers can also be enforced using a [Rhai script](/graphos/routing/customization/rhai) on every incoming request. +Client headers can be enforced using a [Rhai script](/graphos/routing/customization/rhai) on every incoming request. ```rhai title="client-id.rhai" fn supergraph_service(service) { diff --git a/docs/source/routing/query-planning/native-query-planner.mdx b/docs/source/routing/query-planning/native-query-planner.mdx index 7da2d5a099..2f6cb487ce 100644 --- a/docs/source/routing/query-planning/native-query-planner.mdx +++ b/docs/source/routing/query-planning/native-query-planner.mdx @@ -7,17 +7,13 @@ redirectFrom: - /router/executing-operations/native-query-planner --- - - -Learn to run the GraphOS Router with the Rust-native query planner and improve your query planning performance and scalability. +Learn about the Rust-native query planner in GraphOS Router. The planner is GA as of v1.59.0. ## Background about query planner implementations In v1.49.0 the router introduced a [query planner](/graphos/routing/about-router#query-planning) implemented natively in Rust. This native query planner improves the overall performance and resource utilization of query planning. It exists alongside the legacy JavaScript implementation that uses the V8 JavaScript engine, and it will eventually replace the legacy implementation. -### Comparing query planner implementations - -As part of the effort to ensure correctness and stability of the new query planner, starting in v1.53.0 the router enables both the new and legacy planners and runs them in parallel to compare their results by default. After their comparison, the router discards the native query planner's results and uses only the legacy planner to execute requests. The native query planner uses a single thread in the cold path of the router. It has a bounded queue of ten queries. If the queue is full, the router simply does not run the comparison to avoid excessive resource consumption. +As of v1.59.0, the native query planner is the default planner in the router. As part of this, Deno, and by extension v8, are no longer initialized at router startup. The legacy implementation is deprecated, but it is still possible to configure the router to run with the legacy query planner. ## Configuring query planning @@ -25,55 +21,36 @@ You can configure the `experimental_query_planner_mode` option in your `router.y The `experimental_query_planner_mode` option has the following supported modes: -- `new`- enables only the new Rust-native query planner -- `legacy` - enables only the legacy JavaScript query planner -- `both_best_effort` (default) - enables both new and legacy query planners for comparison. The legacy query planner is used for execution. - - - -## Optimize native query planner - - - -To run the native query planner with the best performance and resource utilization, configure your router with the following options: - -```yaml title="router.yaml" -experimental_query_planner_mode: new -``` - - - -In router v1.56, running the native query planner with the best performance and resource utilization also requires setting `experimental_introspection_mode: new`. +- `new` (default) - enables only the new Rust-native query planner +- `legacy` - enables only the legacy JavaScript query planner. The legacy planner is deprecated and will be removed in the next router release. +- `both_best_effort` - enables both new and legacy query planners for comparison. The legacy query planner is used for execution. - +`experimental_query_planner_mode` will be removed in the next router release. -Setting `experimental_query_planner_mode: new` not only enables native query planning and schema introspection, it also disables the V8 JavaScript runtime used by the legacy query planner. Disabling V8 frees up CPU and memory and improves native query planning performance. - -Additionally, to enable more optimal native query planning and faster throughput by reducing the size of queries sent to subgraphs, you can enable query fragment generation with the following option: - -```yaml title="router.yaml" -supergraph: - generate_query_fragments: true -``` - - - -Regarding [fragment reuse and generation](/router/configuration/overview#fragment-reuse-and-generation), in the future the `generate_query_fragments` option will be the only option for handling fragments. +### Comparing query planner implementations - +As part of the effort to ensure correctness and stability of the new query planner, starting in v1.53.0 the router enables both the new and legacy planners and runs them in parallel to compare their results by default. After their comparison, the router discards the native query planner's results and uses only the legacy planner to execute requests. The native query planner uses a single thread in the cold path of the router. It has a bounded queue of ten queries. If the queue is full, the router simply does not run the comparison to avoid excessive resource consumption. -## Metrics for native query planner +### Metrics for native query planner When running both query planners for comparison with `experimental_query_planner_mode: both_best_effort`, the following metrics track mismatches and errors: - `apollo.router.operations.query_planner.both` with the following attributes: - - `generation.is_matched` (bool) - - `generation.js_error` (bool) - - `generation.rust_error` (bool) + + - `generation.is_matched` (bool) + - `generation.js_error` (bool) + - `generation.rust_error` (bool) - `apollo.router.query_planning.plan.duration` with the following attributes to differentiate between planners: - - `planner` (rust | js) + - `planner` (rust | js) + +## Federation v1 composed supergraphs -## Limitations of native query planner +The native query planner does not support _supergraphs_ composed with Federation v1, so the router will fallback to the legacy planner for any variants still using a Federation v1 supergraph in v1.59.0. Users are highly encouraged to [recompose with Federation v2](https://www.apollographql.com/docs/graphos/reference/migration/to-federation-version-2#step-2-configure-your-composition-method). +Federation v1 _subgraphs_ continue to be supported. -The native query planner doesn't implement `@context`. This is planned to be implemented in a future router release. +Customers that are on Federation v1 composed supergraph should see this as an info level log on startup: + +```bash +2024-12-05T10:13:39.760333Z INFO Falling back to the legacy query planner: failed to initialize the query planner: Supergraphs composed with federation version 1 are not supported. Please recompose your supergraph with federation version 2 or greater +``` diff --git a/docs/source/routing/security/persisted-queries.mdx b/docs/source/routing/security/persisted-queries.mdx index 5befd7071c..ea7d1e92b4 100644 --- a/docs/source/routing/security/persisted-queries.mdx +++ b/docs/source/routing/security/persisted-queries.mdx @@ -64,7 +64,7 @@ persisted_queries: log_unknown: true ``` -If used with the [`safelist`](#safelist) option, the router logs unregistered and rejected operations. With [`safelist.required_id`](#require_id) off, the only rejected operations are unregistered ones. If [`safelist.required_id`](#require_id) is turned on, operations can be rejected even when registered because they use operation IDs rather than operation strings. +If used with the [`safelist`](#safelist) option, the router logs unregistered and rejected operations. With [`safelist.require_id`](#require_id) off, the only rejected operations are unregistered ones. If [`safelist.require_id`](#require_id) is turned on, operations can be rejected even when registered because they use operation IDs rather than operation strings. #### `experimental_prewarm_query_plan_cache` @@ -114,7 +114,7 @@ To enable safelisting, you _must_ turn off [automatic persisted queries](/router -By default, the [`require_id`](#required_id) suboption is `false`, meaning the router accepts both operation IDs and operation strings as long as the operation is registered. +By default, the [`require_id`](#require_id) suboption is `false`, meaning the router accepts both operation IDs and operation strings as long as the operation is registered. #### `require_id` @@ -138,6 +138,30 @@ To enable safelisting, you _must_ turn off [automatic persisted queries](/router +### Customization via request context + +GraphOS Router can be [customized](/graphos/routing/customization/overview) via several mechanisms such as [Rhai scripts](/graphos/routing/customization/rhai) and [coprocessors](/graphos/routing/customization/coprocessor). These plugins can affect your router's persistent query processing by writing to the request context. + +#### `apollo_persisted_queries::client_name` + +When publishing operations to a PQL, you can specify a client name associated with the operation (by including a `clientName` field in the individual operation in your [manifest](/graphos/platform/security/persisted-queries#per-operation-properties), or by including the `--for-client-name` option to `rover persisted-queries publish`). If an operation has a client name, it will only be executed by requests that specify that client name. (Your PQL can contain multiple operations with the same ID and different client names.) + +Your customization (Rhai script, coprocessor, etc) can examine a request during the [Router Service stage](/graphos/routing/customization/overview#request-path) of the request path and set the `apollo_persisted_queries::client_name` value in the request context to the request's client name. + +If this context value is not set by a customization, your router will use the same client name used for [client awareness](/graphos/routing/observability/client-awareness) in observability. This client name is read from an HTTP header specified by `telemetry.apollo.client_name_header`, or `apollographql-client-name` by default. + +If your request specifies an ID and a client name but there is no operation in the PQL with that ID and client name, your router will look to see if there is an operation with that ID and no client name specified, and use that if it finds it. + +#### `apollo_persisted_queries::safelist::skip_enforcement` + +If safelisting is enabled, you can still opt out of safelist enforcement on a per-request basis. + +Your customization (Rhai script, coprocessor, etc) can examine a request during the [Router Service stage](/graphos/routing/customization/overview#request-path) of the request path and set the `apollo_persisted_queries::safelist::skip_enforcement` value in the request context to the boolean value `true`. + +For any request where you set this value, Router will skip safelist enforcement: requests with a full operation string will be allowed even if they are not in the safelist, and even if [`safelist.required_id`](#require_id) is enabled. + +This does not affect the behavior of the [`log_unknown` option](#log_unknown): unknown operations will still be logged if that option is set. + ## Limitations * **Unsupported with offline license**. An GraphOS Router using an [offline Enterprise license](/router/enterprise-features/#offline-enterprise-license) cannot use safelisting with persisted queries. The feature relies on Apollo Uplink to fetch persisted query manifests, so it doesn't work as designed when the router is disconnected from Uplink. diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index cbdd08e61b..95cf5805eb 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.58.1 +version: 1.59.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.58.1" +appVersion: "v1.59.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index f26ac4d052..a9a5af6b71 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.58.1](https://img.shields.io/badge/Version-1.58.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.58.1](https://img.shields.io/badge/AppVersion-v1.58.1-informational?style=flat-square) +![Version: 1.59.0](https://img.shields.io/badge/Version-1.59.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.59.0](https://img.shields.io/badge/AppVersion-v1.59.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.58.1 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.59.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.58.1 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.58.1 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.59.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ @@ -81,7 +81,7 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | resources | object | `{}` | | | restartPolicy | string | `"Always"` | Sets the restart policy of pods | | rollingUpdate | object | `{}` | Sets the [rolling update strategy parameters](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment). Can take absolute values or % values. | -| router | object | `{"args":["--hot-reload"],"configuration":{"health_check":{"listen":"0.0.0.0:8088"},"supergraph":{"listen":"0.0.0.0:4000"}}}` | See https://www.apollographql.com/docs/router/configuration/overview/#yaml-config-file for yaml structure | +| router | object | `{"args":["--hot-reload"],"configuration":{"health_check":{"listen":"0.0.0.0:8088"},"supergraph":{"listen":"0.0.0.0:4000"}}}` | See https://www.apollographql.com/docs/graphos/reference/router/configuration#yaml-config-file for yaml structure | | securityContext | object | `{}` | | | service.annotations | object | `{}` | | | service.port | int | `80` | | @@ -98,4 +98,4 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | virtualservice.enabled | bool | `false` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/helm/chart/router/values.yaml b/helm/chart/router/values.yaml index 35f45618a7..d497aef33e 100644 --- a/helm/chart/router/values.yaml +++ b/helm/chart/router/values.yaml @@ -4,7 +4,7 @@ replicaCount: 1 -# -- See https://www.apollographql.com/docs/router/configuration/overview/#yaml-config-file for yaml structure +# -- See https://www.apollographql.com/docs/graphos/reference/router/configuration#yaml-config-file for yaml structure router: configuration: supergraph: diff --git a/licenses.html b/licenses.html index ee38302396..3b497f5e45 100644 --- a/licenses.html +++ b/licenses.html @@ -44,11 +44,12 @@

Third Party Licenses

Overview of licenses:

@@ -2319,6 +2323,18 @@ 

Used by:

Apache License 2.0

Used by:

+ +
  • +

    Apache License 2.0

    +

    Used by:

    + +
                                     Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +   1. Definitions.
    +
    +      "License" shall mean the terms and conditions for use, reproduction,
    +      and distribution as defined by Sections 1 through 9 of this document.
    +
    +      "Licensor" shall mean the copyright owner or entity authorized by
    +      the copyright owner that is granting the License.
    +
    +      "Legal Entity" shall mean the union of the acting entity and all
    +      other entities that control, are controlled by, or are under common
    +      control with that entity. For the purposes of this definition,
    +      "control" means (i) the power, direct or indirect, to cause the
    +      direction or management of such entity, whether by contract or
    +      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +      outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +      "You" (or "Your") shall mean an individual or Legal Entity
    +      exercising permissions granted by this License.
    +
    +      "Source" form shall mean the preferred form for making modifications,
    +      including but not limited to software source code, documentation
    +      source, and configuration files.
    +
    +      "Object" form shall mean any form resulting from mechanical
    +      transformation or translation of a Source form, including but
    +      not limited to compiled object code, generated documentation,
    +      and conversions to other media types.
    +
    +      "Work" shall mean the work of authorship, whether in Source or
    +      Object form, made available under the License, as indicated by a
    +      copyright notice that is included in or attached to the work
    +      (an example is provided in the Appendix below).
    +
    +      "Derivative Works" shall mean any work, whether in Source or Object
    +      form, that is based on (or derived from) the Work and for which the
    +      editorial revisions, annotations, elaborations, or other modifications
    +      represent, as a whole, an original work of authorship. For the purposes
    +      of this License, Derivative Works shall not include works that remain
    +      separable from, or merely link (or bind by name) to the interfaces of,
    +      the Work and Derivative Works thereof.
    +
    +      "Contribution" shall mean any work of authorship, including
    +      the original version of the Work and any modifications or additions
    +      to that Work or Derivative Works thereof, that is intentionally
    +      submitted to Licensor for inclusion in the Work by the copyright owner
    +      or by an individual or Legal Entity authorized to submit on behalf of
    +      the copyright owner. For the purposes of this definition, "submitted"
    +      means any form of electronic, verbal, or written communication sent
    +      to the Licensor or its representatives, including but not limited to
    +      communication on electronic mailing lists, source code control systems,
    +      and issue tracking systems that are managed by, or on behalf of, the
    +      Licensor for the purpose of discussing and improving the Work, but
    +      excluding communication that is conspicuously marked or otherwise
    +      designated in writing by the copyright owner as "Not a Contribution."
    +
    +      "Contributor" shall mean Licensor and any individual or Legal Entity
    +      on behalf of whom a Contribution has been received by Licensor and
    +      subsequently incorporated within the Work.
    +
    +   2. Grant of Copyright License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      copyright license to reproduce, prepare Derivative Works of,
    +      publicly display, publicly perform, sublicense, and distribute the
    +      Work and such Derivative Works in Source or Object form.
    +
    +   3. Grant of Patent License. Subject to the terms and conditions of
    +      this License, each Contributor hereby grants to You a perpetual,
    +      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +      (except as stated in this section) patent license to make, have made,
    +      use, offer to sell, sell, import, and otherwise transfer the Work,
    +      where such license applies only to those patent claims licensable
    +      by such Contributor that are necessarily infringed by their
    +      Contribution(s) alone or by combination of their Contribution(s)
    +      with the Work to which such Contribution(s) was submitted. If You
    +      institute patent litigation against any entity (including a
    +      cross-claim or counterclaim in a lawsuit) alleging that the Work
    +      or a Contribution incorporated within the Work constitutes direct
    +      or contributory patent infringement, then any patent licenses
    +      granted to You under this License for that Work shall terminate
    +      as of the date such litigation is filed.
    +
    +   4. Redistribution. You may reproduce and distribute copies of the
    +      Work or Derivative Works thereof in any medium, with or without
    +      modifications, and in Source or Object form, provided that You
    +      meet the following conditions:
    +
    +      (a) You must give any other recipients of the Work or
    +          Derivative Works a copy of this License; and
    +
    +      (b) You must cause any modified files to carry prominent notices
    +          stating that You changed the files; and
    +
    +      (c) You must retain, in the Source form of any Derivative Works
    +          that You distribute, all copyright, patent, trademark, and
    +          attribution notices from the Source form of the Work,
    +          excluding those notices that do not pertain to any part of
    +          the Derivative Works; and
    +
    +      (d) If the Work includes a "NOTICE" text file as part of its
    +          distribution, then any Derivative Works that You distribute must
    +          include a readable copy of the attribution notices contained
    +          within such NOTICE file, excluding those notices that do not
    +          pertain to any part of the Derivative Works, in at least one
    +          of the following places: within a NOTICE text file distributed
    +          as part of the Derivative Works; within the Source form or
    +          documentation, if provided along with the Derivative Works; or,
    +          within a display generated by the Derivative Works, if and
    +          wherever such third-party notices normally appear. The contents
    +          of the NOTICE file are for informational purposes only and
    +          do not modify the License. You may add Your own attribution
    +          notices within Derivative Works that You distribute, alongside
    +          or as an addendum to the NOTICE text from the Work, provided
    +          that such additional attribution notices cannot be construed
    +          as modifying the License.
    +
    +      You may add Your own copyright statement to Your modifications and
    +      may provide additional or different license terms and conditions
    +      for use, reproduction, or distribution of Your modifications, or
    +      for any such Derivative Works as a whole, provided Your use,
    +      reproduction, and distribution of the Work otherwise complies with
    +      the conditions stated in this License.
    +
    +   5. Submission of Contributions. Unless You explicitly state otherwise,
    +      any Contribution intentionally submitted for inclusion in the Work
    +      by You to the Licensor shall be under the terms and conditions of
    +      this License, without any additional terms or conditions.
    +      Notwithstanding the above, nothing herein shall supersede or modify
    +      the terms of any separate license agreement you may have executed
    +      with Licensor regarding such Contributions.
    +
    +   6. Trademarks. This License does not grant permission to use the trade
    +      names, trademarks, service marks, or product names of the Licensor,
    +      except as required for reasonable and customary use in describing the
    +      origin of the Work and reproducing the content of the NOTICE file.
    +
    +   7. Disclaimer of Warranty. Unless required by applicable law or
    +      agreed to in writing, Licensor provides the Work (and each
    +      Contributor provides its Contributions) on an "AS IS" BASIS,
    +      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +      implied, including, without limitation, any warranties or conditions
    +      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +      PARTICULAR PURPOSE. You are solely responsible for determining the
    +      appropriateness of using or redistributing the Work and assume any
    +      risks associated with Your exercise of permissions under this License.
    +
    +   8. Limitation of Liability. In no event and under no legal theory,
    +      whether in tort (including negligence), contract, or otherwise,
    +      unless required by applicable law (such as deliberate and grossly
    +      negligent acts) or agreed to in writing, shall any Contributor be
    +      liable to You for damages, including any direct, indirect, special,
    +      incidental, or consequential damages of any character arising as a
    +      result of this License or out of the use or inability to use the
    +      Work (including but not limited to damages for loss of goodwill,
    +      work stoppage, computer failure or malfunction, or any and all
    +      other commercial damages or losses), even if such Contributor
    +      has been advised of the possibility of such damages.
    +
    +   9. Accepting Warranty or Additional Liability. While redistributing
    +      the Work or Derivative Works thereof, You may choose to offer,
    +      and charge a fee for, acceptance of support, warranty, indemnity,
    +      or other liability obligations and/or rights consistent with this
    +      License. However, in accepting such obligations, You may act only
    +      on Your own behalf and on Your sole responsibility, not on behalf
    +      of any other Contributor, and only if You agree to indemnify,
    +      defend, and hold each Contributor harmless for any liability
    +      incurred by, or claims asserted against, such Contributor by reason
    +      of your accepting any such warranty or additional liability.
    +
    +   END OF TERMS AND CONDITIONS
    +
    +   APPENDIX: How to apply the Apache License to your work.
    +
    +      To apply the Apache License to your work, attach the following
    +      boilerplate notice, with the fields enclosed by brackets "[]"
    +      replaced with your own identifying information. (Don't include
    +      the brackets!)  The text should be enclosed in the appropriate
    +      comment syntax for the file format. We also recommend that a
    +      file or class name and description of purpose be included on the
    +      same "printed page" as the copyright notice for easier
    +      identification within third-party archives.
    +
    +   Copyright [yyyy] [name of copyright owner]
    +
    +   Licensed under the Apache License, Version 2.0 (the "License");
    +   you may not use this file except in compliance with the License.
    +   You may obtain a copy of the License at
    +
    +       http://www.apache.org/licenses/LICENSE-2.0
    +
    +   Unless required by applicable law or agreed to in writing, software
    +   distributed under the License is distributed on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +   See the License for the specific language governing permissions and
    +   limitations under the License.
    +   
  • Apache License 2.0

    @@ -4676,6 +4901,215 @@

    Used by:

    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
                                     Apache License
    +                           Version 2.0, January 2004
    +                        http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1.  Definitions.
    +
    +    "License" shall mean the terms and conditions for use, reproduction,
    +    and distribution as defined by Sections 1 through 9 of this document.
    +
    +    "Licensor" shall mean the copyright owner or entity authorized by
    +    the copyright owner that is granting the License.
    +
    +    "Legal Entity" shall mean the union of the acting entity and all
    +    other entities that control, are controlled by, or are under common
    +    control with that entity. For the purposes of this definition,
    +    "control" means (i) the power, direct or indirect, to cause the
    +    direction or management of such entity, whether by contract or
    +    otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +    outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +    "You" (or "Your") shall mean an individual or Legal Entity
    +    exercising permissions granted by this License.
    +
    +    "Source" form shall mean the preferred form for making modifications,
    +    including but not limited to software source code, documentation
    +    source, and configuration files.
    +
    +    "Object" form shall mean any form resulting from mechanical
    +    transformation or translation of a Source form, including but
    +    not limited to compiled object code, generated documentation,
    +    and conversions to other media types.
    +
    +    "Work" shall mean the work of authorship, whether in Source or
    +    Object form, made available under the License, as indicated by a
    +    copyright notice that is included in or attached to the work
    +    (an example is provided in the Appendix below).
    +
    +    "Derivative Works" shall mean any work, whether in Source or Object
    +    form, that is based on (or derived from) the Work and for which the
    +    editorial revisions, annotations, elaborations, or other modifications
    +    represent, as a whole, an original work of authorship. For the purposes
    +    of this License, Derivative Works shall not include works that remain
    +    separable from, or merely link (or bind by name) to the interfaces of,
    +    the Work and Derivative Works thereof.
    +
    +    "Contribution" shall mean any work of authorship, including
    +    the original version of the Work and any modifications or additions
    +    to that Work or Derivative Works thereof, that is intentionally
    +    submitted to Licensor for inclusion in the Work by the copyright owner
    +    or by an individual or Legal Entity authorized to submit on behalf of
    +    the copyright owner. For the purposes of this definition, "submitted"
    +    means any form of electronic, verbal, or written communication sent
    +    to the Licensor or its representatives, including but not limited to
    +    communication on electronic mailing lists, source code control systems,
    +    and issue tracking systems that are managed by, or on behalf of, the
    +    Licensor for the purpose of discussing and improving the Work, but
    +    excluding communication that is conspicuously marked or otherwise
    +    designated in writing by the copyright owner as "Not a Contribution."
    +
    +    "Contributor" shall mean Licensor and any individual or Legal Entity
    +    on behalf of whom a Contribution has been received by Licensor and
    +    subsequently incorporated within the Work.
    +
    +2.  Grant of Copyright License. Subject to the terms and conditions of
    +    this License, each Contributor hereby grants to You a perpetual,
    +    worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +    copyright license to reproduce, prepare Derivative Works of,
    +    publicly display, publicly perform, sublicense, and distribute the
    +    Work and such Derivative Works in Source or Object form.
    +
    +3.  Grant of Patent License. Subject to the terms and conditions of
    +    this License, each Contributor hereby grants to You a perpetual,
    +    worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +    (except as stated in this section) patent license to make, have made,
    +    use, offer to sell, sell, import, and otherwise transfer the Work,
    +    where such license applies only to those patent claims licensable
    +    by such Contributor that are necessarily infringed by their
    +    Contribution(s) alone or by combination of their Contribution(s)
    +    with the Work to which such Contribution(s) was submitted. If You
    +    institute patent litigation against any entity (including a
    +    cross-claim or counterclaim in a lawsuit) alleging that the Work
    +    or a Contribution incorporated within the Work constitutes direct
    +    or contributory patent infringement, then any patent licenses
    +    granted to You under this License for that Work shall terminate
    +    as of the date such litigation is filed.
    +
    +4.  Redistribution. You may reproduce and distribute copies of the
    +    Work or Derivative Works thereof in any medium, with or without
    +    modifications, and in Source or Object form, provided that You
    +    meet the following conditions:
    +
    +    (a) You must give any other recipients of the Work or
    +    Derivative Works a copy of this License; and
    +
    +    (b) You must cause any modified files to carry prominent notices
    +    stating that You changed the files; and
    +
    +    (c) You must retain, in the Source form of any Derivative Works
    +    that You distribute, all copyright, patent, trademark, and
    +    attribution notices from the Source form of the Work,
    +    excluding those notices that do not pertain to any part of
    +    the Derivative Works; and
    +
    +    (d) If the Work includes a "NOTICE" text file as part of its
    +    distribution, then any Derivative Works that You distribute must
    +    include a readable copy of the attribution notices contained
    +    within such NOTICE file, excluding those notices that do not
    +    pertain to any part of the Derivative Works, in at least one
    +    of the following places: within a NOTICE text file distributed
    +    as part of the Derivative Works; within the Source form or
    +    documentation, if provided along with the Derivative Works; or,
    +    within a display generated by the Derivative Works, if and
    +    wherever such third-party notices normally appear. The contents
    +    of the NOTICE file are for informational purposes only and
    +    do not modify the License. You may add Your own attribution
    +    notices within Derivative Works that You distribute, alongside
    +    or as an addendum to the NOTICE text from the Work, provided
    +    that such additional attribution notices cannot be construed
    +    as modifying the License.
    +
    +    You may add Your own copyright statement to Your modifications and
    +    may provide additional or different license terms and conditions
    +    for use, reproduction, or distribution of Your modifications, or
    +    for any such Derivative Works as a whole, provided Your use,
    +    reproduction, and distribution of the Work otherwise complies with
    +    the conditions stated in this License.
    +
    +5.  Submission of Contributions. Unless You explicitly state otherwise,
    +    any Contribution intentionally submitted for inclusion in the Work
    +    by You to the Licensor shall be under the terms and conditions of
    +    this License, without any additional terms or conditions.
    +    Notwithstanding the above, nothing herein shall supersede or modify
    +    the terms of any separate license agreement you may have executed
    +    with Licensor regarding such Contributions.
    +
    +6.  Trademarks. This License does not grant permission to use the trade
    +    names, trademarks, service marks, or product names of the Licensor,
    +    except as required for reasonable and customary use in describing the
    +    origin of the Work and reproducing the content of the NOTICE file.
    +
    +7.  Disclaimer of Warranty. Unless required by applicable law or
    +    agreed to in writing, Licensor provides the Work (and each
    +    Contributor provides its Contributions) on an "AS IS" BASIS,
    +    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +    implied, including, without limitation, any warranties or conditions
    +    of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +    PARTICULAR PURPOSE. You are solely responsible for determining the
    +    appropriateness of using or redistributing the Work and assume any
    +    risks associated with Your exercise of permissions under this License.
    +
    +8.  Limitation of Liability. In no event and under no legal theory,
    +    whether in tort (including negligence), contract, or otherwise,
    +    unless required by applicable law (such as deliberate and grossly
    +    negligent acts) or agreed to in writing, shall any Contributor be
    +    liable to You for damages, including any direct, indirect, special,
    +    incidental, or consequential damages of any character arising as a
    +    result of this License or out of the use or inability to use the
    +    Work (including but not limited to damages for loss of goodwill,
    +    work stoppage, computer failure or malfunction, or any and all
    +    other commercial damages or losses), even if such Contributor
    +    has been advised of the possibility of such damages.
    +
    +9.  Accepting Warranty or Additional Liability. While redistributing
    +    the Work or Derivative Works thereof, You may choose to offer,
    +    and charge a fee for, acceptance of support, warranty, indemnity,
    +    or other liability obligations and/or rights consistent with this
    +    License. However, in accepting such obligations, You may act only
    +    on Your own behalf and on Your sole responsibility, not on behalf
    +    of any other Contributor, and only if You agree to indemnify,
    +    defend, and hold each Contributor harmless for any liability
    +    incurred by, or claims asserted against, such Contributor by reason
    +    of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +APPENDIX: How to apply the Apache License to your work.
    +
    +      To apply the Apache License to your work, attach the following
    +      boilerplate notice, with the fields enclosed by brackets "[]"
    +      replaced with your own identifying information. (Don't include
    +      the brackets!)  The text should be enclosed in the appropriate
    +      comment syntax for the file format. We also recommend that a
    +      file or class name and description of purpose be included on the
    +      same "printed page" as the copyright notice for easier
    +      identification within third-party archives.
    +
    +Copyright [yyyy] [name of copyright owner]
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +       http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
     
  • @@ -5896,15 +6330,225 @@

    Used by:

    Apache License 2.0

    Used by:

    +
                                  Apache License
    +                        Version 2.0, January 2004
    +                     http://www.apache.org/licenses/
    +
    +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    +
    +1. Definitions.
    +
    +   "License" shall mean the terms and conditions for use, reproduction,
    +   and distribution as defined by Sections 1 through 9 of this document.
    +
    +   "Licensor" shall mean the copyright owner or entity authorized by
    +   the copyright owner that is granting the License.
    +
    +   "Legal Entity" shall mean the union of the acting entity and all
    +   other entities that control, are controlled by, or are under common
    +   control with that entity. For the purposes of this definition,
    +   "control" means (i) the power, direct or indirect, to cause the
    +   direction or management of such entity, whether by contract or
    +   otherwise, or (ii) ownership of fifty percent (50%) or more of the
    +   outstanding shares, or (iii) beneficial ownership of such entity.
    +
    +   "You" (or "Your") shall mean an individual or Legal Entity
    +   exercising permissions granted by this License.
    +
    +   "Source" form shall mean the preferred form for making modifications,
    +   including but not limited to software source code, documentation
    +   source, and configuration files.
    +
    +   "Object" form shall mean any form resulting from mechanical
    +   transformation or translation of a Source form, including but
    +   not limited to compiled object code, generated documentation,
    +   and conversions to other media types.
    +
    +   "Work" shall mean the work of authorship, whether in Source or
    +   Object form, made available under the License, as indicated by a
    +   copyright notice that is included in or attached to the work
    +   (an example is provided in the Appendix below).
    +
    +   "Derivative Works" shall mean any work, whether in Source or Object
    +   form, that is based on (or derived from) the Work and for which the
    +   editorial revisions, annotations, elaborations, or other modifications
    +   represent, as a whole, an original work of authorship. For the purposes
    +   of this License, Derivative Works shall not include works that remain
    +   separable from, or merely link (or bind by name) to the interfaces of,
    +   the Work and Derivative Works thereof.
    +
    +   "Contribution" shall mean any work of authorship, including
    +   the original version of the Work and any modifications or additions
    +   to that Work or Derivative Works thereof, that is intentionally
    +   submitted to Licensor for inclusion in the Work by the copyright owner
    +   or by an individual or Legal Entity authorized to submit on behalf of
    +   the copyright owner. For the purposes of this definition, "submitted"
    +   means any form of electronic, verbal, or written communication sent
    +   to the Licensor or its representatives, including but not limited to
    +   communication on electronic mailing lists, source code control systems,
    +   and issue tracking systems that are managed by, or on behalf of, the
    +   Licensor for the purpose of discussing and improving the Work, but
    +   excluding communication that is conspicuously marked or otherwise
    +   designated in writing by the copyright owner as "Not a Contribution."
    +
    +   "Contributor" shall mean Licensor and any individual or Legal Entity
    +   on behalf of whom a Contribution has been received by Licensor and
    +   subsequently incorporated within the Work.
    +
    +2. Grant of Copyright License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   copyright license to reproduce, prepare Derivative Works of,
    +   publicly display, publicly perform, sublicense, and distribute the
    +   Work and such Derivative Works in Source or Object form.
    +
    +3. Grant of Patent License. Subject to the terms and conditions of
    +   this License, each Contributor hereby grants to You a perpetual,
    +   worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    +   (except as stated in this section) patent license to make, have made,
    +   use, offer to sell, sell, import, and otherwise transfer the Work,
    +   where such license applies only to those patent claims licensable
    +   by such Contributor that are necessarily infringed by their
    +   Contribution(s) alone or by combination of their Contribution(s)
    +   with the Work to which such Contribution(s) was submitted. If You
    +   institute patent litigation against any entity (including a
    +   cross-claim or counterclaim in a lawsuit) alleging that the Work
    +   or a Contribution incorporated within the Work constitutes direct
    +   or contributory patent infringement, then any patent licenses
    +   granted to You under this License for that Work shall terminate
    +   as of the date such litigation is filed.
    +
    +4. Redistribution. You may reproduce and distribute copies of the
    +   Work or Derivative Works thereof in any medium, with or without
    +   modifications, and in Source or Object form, provided that You
    +   meet the following conditions:
    +
    +   (a) You must give any other recipients of the Work or
    +       Derivative Works a copy of this License; and
    +
    +   (b) You must cause any modified files to carry prominent notices
    +       stating that You changed the files; and
    +
    +   (c) You must retain, in the Source form of any Derivative Works
    +       that You distribute, all copyright, patent, trademark, and
    +       attribution notices from the Source form of the Work,
    +       excluding those notices that do not pertain to any part of
    +       the Derivative Works; and
    +
    +   (d) If the Work includes a "NOTICE" text file as part of its
    +       distribution, then any Derivative Works that You distribute must
    +       include a readable copy of the attribution notices contained
    +       within such NOTICE file, excluding those notices that do not
    +       pertain to any part of the Derivative Works, in at least one
    +       of the following places: within a NOTICE text file distributed
    +       as part of the Derivative Works; within the Source form or
    +       documentation, if provided along with the Derivative Works; or,
    +       within a display generated by the Derivative Works, if and
    +       wherever such third-party notices normally appear. The contents
    +       of the NOTICE file are for informational purposes only and
    +       do not modify the License. You may add Your own attribution
    +       notices within Derivative Works that You distribute, alongside
    +       or as an addendum to the NOTICE text from the Work, provided
    +       that such additional attribution notices cannot be construed
    +       as modifying the License.
    +
    +   You may add Your own copyright statement to Your modifications and
    +   may provide additional or different license terms and conditions
    +   for use, reproduction, or distribution of Your modifications, or
    +   for any such Derivative Works as a whole, provided Your use,
    +   reproduction, and distribution of the Work otherwise complies with
    +   the conditions stated in this License.
    +
    +5. Submission of Contributions. Unless You explicitly state otherwise,
    +   any Contribution intentionally submitted for inclusion in the Work
    +   by You to the Licensor shall be under the terms and conditions of
    +   this License, without any additional terms or conditions.
    +   Notwithstanding the above, nothing herein shall supersede or modify
    +   the terms of any separate license agreement you may have executed
    +   with Licensor regarding such Contributions.
    +
    +6. Trademarks. This License does not grant permission to use the trade
    +   names, trademarks, service marks, or product names of the Licensor,
    +   except as required for reasonable and customary use in describing the
    +   origin of the Work and reproducing the content of the NOTICE file.
    +
    +7. Disclaimer of Warranty. Unless required by applicable law or
    +   agreed to in writing, Licensor provides the Work (and each
    +   Contributor provides its Contributions) on an "AS IS" BASIS,
    +   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    +   implied, including, without limitation, any warranties or conditions
    +   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    +   PARTICULAR PURPOSE. You are solely responsible for determining the
    +   appropriateness of using or redistributing the Work and assume any
    +   risks associated with Your exercise of permissions under this License.
    +
    +8. Limitation of Liability. In no event and under no legal theory,
    +   whether in tort (including negligence), contract, or otherwise,
    +   unless required by applicable law (such as deliberate and grossly
    +   negligent acts) or agreed to in writing, shall any Contributor be
    +   liable to You for damages, including any direct, indirect, special,
    +   incidental, or consequential damages of any character arising as a
    +   result of this License or out of the use or inability to use the
    +   Work (including but not limited to damages for loss of goodwill,
    +   work stoppage, computer failure or malfunction, or any and all
    +   other commercial damages or losses), even if such Contributor
    +   has been advised of the possibility of such damages.
    +
    +9. Accepting Warranty or Additional Liability. While redistributing
    +   the Work or Derivative Works thereof, You may choose to offer,
    +   and charge a fee for, acceptance of support, warranty, indemnity,
    +   or other liability obligations and/or rights consistent with this
    +   License. However, in accepting such obligations, You may act only
    +   on Your own behalf and on Your sole responsibility, not on behalf
    +   of any other Contributor, and only if You agree to indemnify,
    +   defend, and hold each Contributor harmless for any liability
    +   incurred by, or claims asserted against, such Contributor by reason
    +   of your accepting any such warranty or additional liability.
    +
    +END OF TERMS AND CONDITIONS
    +
    +APPENDIX: How to apply the Apache License to your work.
    +
    +   To apply the Apache License to your work, attach the following
    +   boilerplate notice, with the fields enclosed by brackets "[]"
    +   replaced with your own identifying information. (Don't include
    +   the brackets!)  The text should be enclosed in the appropriate
    +   comment syntax for the file format. We also recommend that a
    +   file or class name and description of purpose be included on the
    +   same "printed page" as the copyright notice for easier
    +   identification within third-party archives.
    +
    +Copyright (c) 2016 Alex Crichton
    +Copyright (c) 2017 The Tokio Authors
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +	http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    +
                                  Apache License
                             Version 2.0, January 2004
    @@ -6094,8 +6738,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright (c) 2016 Alex Crichton -Copyright (c) 2017 The Tokio Authors +Copyright 2014 Paho Lurie-Gregg Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6107,14 +6750,13 @@

    Used by:

    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License. -
    +limitations under the License.
  • Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -6304,7 +6946,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2014 Paho Lurie-Gregg +Copyright 2016 Sean McArthur Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6316,13 +6958,14 @@

    Used by:

    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License.
    +limitations under the License. +
  • Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -6512,7 +7155,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2016 Sean McArthur +Copyright 2017 Sergio Benitez Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6531,7 +7174,7 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -6722,6 +7365,7 @@ 

    Used by:

    identification within third-party archives. Copyright 2017 Sergio Benitez +Copyright 2014 Alex Chricton Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6740,7 +7384,8 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -6930,8 +7575,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2017 Sergio Benitez -Copyright 2014 Alex Chricton +Copyright 2017 http-rs authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6950,8 +7594,7 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -7141,7 +7784,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2017 http-rs authors +Copyright 2017 quininer kel Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -7160,7 +7803,7 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -7350,7 +7993,7 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2017 quininer kel +Copyright 2018 The pin-utils authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -7369,7 +8012,7 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -7559,13 +8202,13 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2018 The pin-utils authors +Copyright 2019 The CryptoCorrosion Contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -7578,7 +8221,8 @@

    Used by:

    Apache License 2.0

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -7768,13 +8412,13 @@ 

    Used by:

    same "printed page" as the copyright notice for easier identification within third-party archives. -Copyright 2019 The CryptoCorrosion Contributors +Copyright 2020 Andrew Straw Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -8441,6 +9085,8 @@

    Used by:

  • core-foundation-sys
  • countme
  • crossbeam-channel
  • +
  • crossbeam-deque
  • +
  • crossbeam-epoch
  • crossbeam-utils
  • debugid
  • derivative
  • @@ -8477,6 +9123,7 @@

    Used by:

  • hyper-timeout
  • idna
  • idna
  • +
  • idna_adapter
  • if_chain
  • indexmap
  • indexmap
  • @@ -8531,6 +9178,8 @@

    Used by:

  • prost-types
  • prost-types
  • proteus
  • +
  • rayon
  • +
  • rayon-core
  • regex
  • regex-automata
  • regex-lite
  • @@ -10637,6 +11286,7 @@

    Used by:

                                  Apache License
                             Version 2.0, January 2004
    @@ -11945,6 +12595,7 @@ 

    Used by:

  • async-graphql-derive
  • async-graphql-parser
  • async-graphql-value
  • +
  • chrono
  • deno-proc-macro-rules
  • deno-proc-macro-rules-macros
  • dunce
  • @@ -12042,6 +12693,27 @@

    Used by:

    http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    Copyright 2016 Nicolas Silva
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
     Unless required by applicable law or agreed to in writing, software
     distributed under the License is distributed on an "AS IS" BASIS,
     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    @@ -12901,6 +13573,107 @@ 

    Used by:

    **trademark** means trademarks, service marks, and similar rights. --------------------------------------------------------------------------------
    +
  • +
  • +

    Elastic License 2.0

    +

    Used by:

    + +
    Elastic License 2.0
    +
    +URL: https://www.elastic.co/licensing/elastic-license
    +
    +## Acceptance
    +
    +By using the software, you agree to all of the terms and conditions below.
    +
    +## Copyright License
    +
    +The licensor grants you a non-exclusive, royalty-free, worldwide,
    +non-sublicensable, non-transferable license to use, copy, distribute, make
    +available, and prepare derivative works of the software, in each case subject to
    +the limitations and conditions below.
    +
    +## Limitations
    +
    +You may not provide the software to third parties as a hosted or managed
    +service, where the service provides users with access to any substantial set of
    +the features or functionality of the software.
    +
    +You may not move, change, disable, or circumvent the license key functionality
    +in the software, and you may not remove or obscure any functionality in the
    +software that is protected by the license key.
    +
    +You may not alter, remove, or obscure any licensing, copyright, or other notices
    +of the licensor in the software. Any use of the licensorā€™s trademarks is subject
    +to applicable law.
    +
    +## Patents
    +
    +The licensor grants you a license, under any patent claims the licensor can
    +license, or becomes able to license, to make, have made, use, sell, offer for
    +sale, import and have imported the software, in each case subject to the
    +limitations and conditions in this license. This license does not cover any
    +patent claims that you cause to be infringed by modifications or additions to
    +the software. If you or your company make any written claim that the software
    +infringes or contributes to infringement of any patent, your patent license for
    +the software granted under these terms ends immediately. If your company makes
    +such a claim, your patent license ends immediately for work on behalf of your
    +company.
    +
    +## Notices
    +
    +You must ensure that anyone who gets a copy of any part of the software from you
    +also gets a copy of these terms.
    +
    +If you modify the software, you must include in any modified copies of the
    +software prominent notices stating that you have modified the software.
    +
    +## No Other Rights
    +
    +These terms do not imply any licenses other than those expressly granted in
    +these terms.
    +
    +## Termination
    +
    +If you use the software in violation of these terms, such use is not licensed,
    +and your licenses will automatically terminate. If the licensor provides you
    +with a notice of your violation, and you cease all violation of this license no
    +later than 30 days after you receive that notice, your licenses will be
    +reinstated retroactively. However, if you violate these terms after such
    +reinstatement, any additional violation of these terms will cause your licenses
    +to terminate automatically and permanently.
    +
    +## No Liability
    +
    +*As far as the law allows, the software comes as is, without any warranty or
    +condition, and the licensor will not be liable to you for any damages arising
    +out of these terms or the use or nature of the software, under any kind of
    +legal claim.*
    +
    +## Definitions
    +
    +The **licensor** is the entity offering these terms, and the **software** is the
    +software the licensor makes available under these terms, including any portion
    +of it.
    +
    +**you** refers to the individual or entity agreeing to these terms.
    +
    +**your company** is any legal entity, sole proprietorship, or other kind of
    +organization that you work for, plus all organizations that have control over,
    +are under the control of, or are under common control with that
    +organization. **control** means ownership of substantially all the assets of an
    +entity, or the power to direct its management and policies by vote, contract, or
    +otherwise. Control can be direct or indirect.
    +
    +**your licenses** are all the licenses granted to you for the software under
    +these terms.
    +
    +**use** means anything you do with the software requiring one of your licenses.
    +
    +**trademark** means trademarks, service marks, and similar rights.
    +
  • ISC License

    @@ -14153,6 +14926,21 @@

    Used by:

    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    Copyright 2016 Nika Layzell
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     
  • @@ -15251,6 +16039,36 @@

    Used by:

    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    The MIT License (MIT)
    +
    +Copyright (c) 2015 Guillaume Gomez
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    +
     
  • @@ -16918,6 +17736,71 @@

    Used by:

    * Hudson (tjh@cryptsoft.com). * */ +
  • +
  • +

    Unicode License v3

    +

    Used by:

    + +
    UNICODE LICENSE V3
    +
    +COPYRIGHT AND PERMISSION NOTICE
    +
    +Copyright Ā© 1991-2023 Unicode, Inc.
    +
    +NOTICE TO USER: Carefully read the following legal agreement. BY
    +DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING DATA FILES, AND/OR
    +SOFTWARE, YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
    +TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT
    +DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE.
    +
    +Permission is hereby granted, free of charge, to any person obtaining a
    +copy of data files and any associated documentation (the "Data Files") or
    +software and any associated documentation (the "Software") to deal in the
    +Data Files or Software without restriction, including without limitation
    +the rights to use, copy, modify, merge, publish, distribute, and/or sell
    +copies of the Data Files or Software, and to permit persons to whom the
    +Data Files or Software are furnished to do so, provided that either (a)
    +this copyright and permission notice appear with all copies of the Data
    +Files or Software, or (b) this copyright and permission notice appear in
    +associated Documentation.
    +
    +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
    +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
    +THIRD PARTY RIGHTS.
    +
    +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE
    +BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES,
    +OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
    +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
    +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA
    +FILES OR SOFTWARE.
    +
    +Except as contained in this notice, the name of a copyright holder shall
    +not be used in advertising or otherwise to promote the sale, use or other
    +dealings in these Data Files or Software without prior written
    +authorization of the copyright holder.
    +
  • Unicode License Agreement - Data Files and Software (2016)

    diff --git a/scripts/install.sh b/scripts/install.sh index d9e413f445..4403b9866e 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.58.1" +PACKAGE_VERSION="v1.59.0" download_binary() { downloader --check